From 6888798ef682d1a749c812c01c3cf63df11227aa Mon Sep 17 00:00:00 2001 From: Ruud Date: Mon, 28 Feb 2011 15:25:42 +0100 Subject: [PATCH] Remove submodule, just put Dependencies in ./libs --- .gitmodules | 3 - libs | 1 - libs/README.md | 4 + libs/__init__.py | 1 + libs/apscheduler/__init__.py | 0 libs/apscheduler/expressions.py | 176 + libs/apscheduler/fields.py | 92 + libs/apscheduler/scheduler.py | 407 + libs/apscheduler/triggers.py | 171 + libs/apscheduler/util.py | 91 + libs/argparse.py | 2353 +++++ libs/axel/__init__.py | 13 + libs/axel/axel.py | 325 + libs/daemon.py | 181 + libs/decorator.py | 209 + libs/elixir/__init__.py | 119 + libs/elixir/collection.py | 127 + libs/elixir/entity.py | 1172 +++ libs/elixir/events.py | 30 + libs/elixir/ext/__init__.py | 5 + libs/elixir/ext/associable.py | 234 + libs/elixir/ext/encrypted.py | 124 + libs/elixir/ext/list.py | 251 + libs/elixir/ext/perform_ddl.py | 106 + libs/elixir/ext/versioned.py | 288 + libs/elixir/fields.py | 191 + libs/elixir/options.py | 285 + libs/elixir/properties.py | 244 + libs/elixir/py23compat.py | 73 + libs/elixir/relationships.py | 1257 +++ libs/elixir/statements.py | 59 + libs/flask/__init__.py | 34 + libs/flask/app.py | 965 ++ libs/flask/config.py | 157 + libs/flask/ctx.py | 66 + libs/flask/globals.py | 27 + libs/flask/helpers.py | 496 + libs/flask/logging.py | 42 + libs/flask/module.py | 230 + libs/flask/session.py | 43 + libs/flask/signals.py | 50 + libs/flask/templating.py | 100 + libs/flask/testing.py | 67 + libs/flask/wrappers.py | 88 + libs/getmeta.py | 206 + libs/hachoir_core/__init__.py | 2 + libs/hachoir_core/benchmark.py | 210 + libs/hachoir_core/bits.py | 277 + libs/hachoir_core/cmd_line.py | 43 + libs/hachoir_core/compatibility.py | 185 + libs/hachoir_core/config.py | 29 + libs/hachoir_core/dict.py | 183 + libs/hachoir_core/endian.py | 15 + libs/hachoir_core/error.py | 45 + libs/hachoir_core/event_handler.py | 26 + libs/hachoir_core/field/__init__.py | 59 + libs/hachoir_core/field/basic_field_set.py | 147 + libs/hachoir_core/field/bit_field.py | 68 + libs/hachoir_core/field/byte_field.py | 73 + libs/hachoir_core/field/character.py | 27 + libs/hachoir_core/field/enum.py | 26 + libs/hachoir_core/field/fake_array.py | 81 + libs/hachoir_core/field/field.py | 262 + libs/hachoir_core/field/field_set.py | 7 + libs/hachoir_core/field/float.py | 99 + libs/hachoir_core/field/generic_field_set.py | 532 + libs/hachoir_core/field/helper.py | 57 + libs/hachoir_core/field/integer.py | 44 + libs/hachoir_core/field/link.py | 109 + libs/hachoir_core/field/new_seekable_field_set.py | 82 + libs/hachoir_core/field/padding.py | 138 + libs/hachoir_core/field/parser.py | 40 + libs/hachoir_core/field/seekable_field_set.py | 182 + libs/hachoir_core/field/static_field_set.py | 54 + libs/hachoir_core/field/string_field.py | 402 + libs/hachoir_core/field/sub_file.py | 72 + libs/hachoir_core/field/timestamp.py | 86 + libs/hachoir_core/field/vector.py | 38 + libs/hachoir_core/i18n.py | 214 + libs/hachoir_core/iso639.py | 558 + libs/hachoir_core/language.py | 23 + libs/hachoir_core/log.py | 144 + libs/hachoir_core/memory.py | 99 + libs/hachoir_core/profiler.py | 31 + libs/hachoir_core/stream/__init__.py | 11 + libs/hachoir_core/stream/input.py | 563 ++ libs/hachoir_core/stream/input_helper.py | 38 + libs/hachoir_core/stream/output.py | 173 + libs/hachoir_core/stream/stream.py | 5 + libs/hachoir_core/text_handler.py | 60 + libs/hachoir_core/timeout.py | 76 + libs/hachoir_core/tools.py | 582 ++ libs/hachoir_core/version.py | 5 + libs/hachoir_metadata/__init__.py | 15 + libs/hachoir_metadata/archive.py | 166 + libs/hachoir_metadata/audio.py | 406 + libs/hachoir_metadata/config.py | 2 + libs/hachoir_metadata/file_system.py | 28 + libs/hachoir_metadata/filter.py | 52 + libs/hachoir_metadata/formatter.py | 25 + libs/hachoir_metadata/image.py | 299 + libs/hachoir_metadata/jpeg.py | 289 + libs/hachoir_metadata/metadata.py | 291 + libs/hachoir_metadata/metadata_item.py | 146 + libs/hachoir_metadata/misc.py | 262 + libs/hachoir_metadata/program.py | 100 + libs/hachoir_metadata/qt/__init__.py | 0 libs/hachoir_metadata/qt/dialog.ui | 64 + libs/hachoir_metadata/register.py | 112 + libs/hachoir_metadata/riff.py | 190 + libs/hachoir_metadata/safe.py | 27 + libs/hachoir_metadata/setter.py | 171 + libs/hachoir_metadata/timezone.py | 42 + libs/hachoir_metadata/version.py | 5 + libs/hachoir_metadata/video.py | 412 + libs/hachoir_parser/__init__.py | 7 + libs/hachoir_parser/archive/__init__.py | 12 + libs/hachoir_parser/archive/ace.py | 267 + libs/hachoir_parser/archive/ar.py | 52 + libs/hachoir_parser/archive/bzip2_parser.py | 85 + libs/hachoir_parser/archive/cab.py | 125 + libs/hachoir_parser/archive/gzip_parser.py | 129 + libs/hachoir_parser/archive/mar.py | 67 + libs/hachoir_parser/archive/rar.py | 353 + libs/hachoir_parser/archive/rpm.py | 267 + libs/hachoir_parser/archive/sevenzip.py | 401 + libs/hachoir_parser/archive/tar.py | 124 + libs/hachoir_parser/archive/zip.py | 427 + libs/hachoir_parser/audio/__init__.py | 12 + libs/hachoir_parser/audio/aiff.py | 127 + libs/hachoir_parser/audio/au.py | 88 + libs/hachoir_parser/audio/flac.py | 157 + libs/hachoir_parser/audio/id3.py | 507 + libs/hachoir_parser/audio/itunesdb.py | 433 + libs/hachoir_parser/audio/midi.py | 246 + libs/hachoir_parser/audio/mod.py | 149 + libs/hachoir_parser/audio/modplug.py | 291 + libs/hachoir_parser/audio/mpeg_audio.py | 408 + libs/hachoir_parser/audio/real_audio.py | 90 + libs/hachoir_parser/audio/s3m.py | 668 ++ libs/hachoir_parser/audio/xm.py | 390 + libs/hachoir_parser/common/__init__.py | 0 libs/hachoir_parser/common/deflate.py | 33 + libs/hachoir_parser/common/msdos.py | 62 + libs/hachoir_parser/common/tracker.py | 10 + libs/hachoir_parser/common/win32.py | 154 + libs/hachoir_parser/common/win32_lang_id.py | 136 + libs/hachoir_parser/container/__init__.py | 7 + libs/hachoir_parser/container/action_script.py | 661 ++ libs/hachoir_parser/container/asn1.py | 282 + libs/hachoir_parser/container/mkv.py | 598 ++ libs/hachoir_parser/container/ogg.py | 349 + libs/hachoir_parser/container/realmedia.py | 172 + libs/hachoir_parser/container/riff.py | 439 + libs/hachoir_parser/container/swf.py | 433 + libs/hachoir_parser/file_system/__init__.py | 8 + libs/hachoir_parser/file_system/ext2.py | 464 + libs/hachoir_parser/file_system/fat.py | 433 + libs/hachoir_parser/file_system/iso9660.py | 121 + libs/hachoir_parser/file_system/linux_swap.py | 114 + libs/hachoir_parser/file_system/mbr.py | 230 + libs/hachoir_parser/file_system/ntfs.py | 285 + libs/hachoir_parser/file_system/reiser_fs.py | 120 + libs/hachoir_parser/game/__init__.py | 4 + libs/hachoir_parser/game/blp.py | 269 + libs/hachoir_parser/game/laf.py | 94 + libs/hachoir_parser/game/spider_man_video.py | 65 + libs/hachoir_parser/game/zsnes.py | 250 + libs/hachoir_parser/guess.py | 123 + libs/hachoir_parser/image/__init__.py | 12 + libs/hachoir_parser/image/bmp.py | 195 + libs/hachoir_parser/image/common.py | 49 + libs/hachoir_parser/image/exif.py | 361 + libs/hachoir_parser/image/gif.py | 227 + libs/hachoir_parser/image/ico.py | 139 + libs/hachoir_parser/image/iptc.py | 113 + libs/hachoir_parser/image/jpeg.py | 368 + libs/hachoir_parser/image/pcx.py | 73 + libs/hachoir_parser/image/photoshop_metadata.py | 171 + libs/hachoir_parser/image/png.py | 268 + libs/hachoir_parser/image/psd.py | 85 + libs/hachoir_parser/image/tga.py | 85 + libs/hachoir_parser/image/tiff.py | 211 + libs/hachoir_parser/image/wmf.py | 611 ++ libs/hachoir_parser/image/xcf.py | 331 + libs/hachoir_parser/misc/__init__.py | 14 + libs/hachoir_parser/misc/bplist.py | 299 + libs/hachoir_parser/misc/chm.py | 200 + libs/hachoir_parser/misc/common.py | 13 + libs/hachoir_parser/misc/file_3do.py | 214 + libs/hachoir_parser/misc/file_3ds.py | 177 + libs/hachoir_parser/misc/gnome_keyring.py | 200 + libs/hachoir_parser/misc/hlp.py | 76 + libs/hachoir_parser/misc/lnk.py | 613 ++ libs/hachoir_parser/misc/msoffice.py | 131 + libs/hachoir_parser/misc/msoffice_summary.py | 377 + libs/hachoir_parser/misc/ole2.py | 367 + libs/hachoir_parser/misc/pcf.py | 170 + libs/hachoir_parser/misc/pdf.py | 442 + libs/hachoir_parser/misc/pifv.py | 241 + libs/hachoir_parser/misc/torrent.py | 163 + libs/hachoir_parser/misc/ttf.py | 277 + libs/hachoir_parser/misc/word_doc.py | 299 + libs/hachoir_parser/network/__init__.py | 2 + libs/hachoir_parser/network/common.py | 118 + libs/hachoir_parser/network/ouid.py | 10110 +++++++++++++++++++ libs/hachoir_parser/network/tcpdump.py | 505 + libs/hachoir_parser/parser.py | 154 + libs/hachoir_parser/parser_list.py | 216 + libs/hachoir_parser/program/__init__.py | 6 + libs/hachoir_parser/program/elf.py | 187 + libs/hachoir_parser/program/exe.py | 224 + libs/hachoir_parser/program/exe_ne.py | 60 + libs/hachoir_parser/program/exe_pe.py | 221 + libs/hachoir_parser/program/exe_res.py | 445 + libs/hachoir_parser/program/java.py | 1097 ++ libs/hachoir_parser/program/prc.py | 82 + libs/hachoir_parser/program/python.py | 334 + libs/hachoir_parser/template.py | 54 + libs/hachoir_parser/version.py | 5 + libs/hachoir_parser/video/__init__.py | 6 + libs/hachoir_parser/video/amf.py | 110 + libs/hachoir_parser/video/asf.py | 356 + libs/hachoir_parser/video/flv.py | 157 + libs/hachoir_parser/video/fourcc.py | 415 + libs/hachoir_parser/video/mov.py | 327 + libs/hachoir_parser/video/mpeg_ts.py | 102 + libs/hachoir_parser/video/mpeg_video.py | 576 ++ libs/imdb/Character.py | 197 + libs/imdb/Company.py | 195 + libs/imdb/Movie.py | 398 + libs/imdb/Person.py | 275 + libs/imdb/__init__.py | 902 ++ libs/imdb/_compat.py | 72 + libs/imdb/_exceptions.py | 46 + libs/imdb/_logging.py | 63 + libs/imdb/articles.py | 142 + libs/imdb/helpers.py | 548 + libs/imdb/locale/__init__.py | 29 + libs/imdb/locale/generatepot.py | 78 + libs/imdb/locale/msgfmt.py | 204 + libs/imdb/locale/rebuildmo.py | 49 + libs/imdb/parser/__init__.py | 28 + libs/imdb/parser/http/__init__.py | 771 ++ libs/imdb/parser/http/bsouplxml/__init__.py | 0 libs/imdb/parser/http/bsouplxml/_bsoup.py | 1970 ++++ libs/imdb/parser/http/bsouplxml/bsoupxpath.py | 394 + libs/imdb/parser/http/bsouplxml/etree.py | 75 + libs/imdb/parser/http/bsouplxml/html.py | 31 + libs/imdb/parser/http/characterParser.py | 203 + libs/imdb/parser/http/companyParser.py | 91 + libs/imdb/parser/http/movieParser.py | 1955 ++++ libs/imdb/parser/http/personParser.py | 509 + libs/imdb/parser/http/searchCharacterParser.py | 69 + libs/imdb/parser/http/searchCompanyParser.py | 71 + libs/imdb/parser/http/searchKeywordParser.py | 111 + libs/imdb/parser/http/searchMovieParser.py | 178 + libs/imdb/parser/http/searchPersonParser.py | 92 + libs/imdb/parser/http/topBottomParser.py | 106 + libs/imdb/parser/http/utils.py | 817 ++ libs/imdb/parser/mobile/__init__.py | 811 ++ libs/imdb/utils.py | 1536 +++ libs/jinja2/__init__.py | 73 + libs/jinja2/_debugsupport.c | 78 + libs/jinja2/_markupsafe/__init__.py | 225 + libs/jinja2/_markupsafe/_bundle.py | 49 + libs/jinja2/_markupsafe/_constants.py | 267 + libs/jinja2/_markupsafe/_native.py | 45 + libs/jinja2/_markupsafe/tests.py | 80 + libs/jinja2/_stringdefs.py | 130 + libs/jinja2/bccache.py | 289 + libs/jinja2/compiler.py | 1652 +++ libs/jinja2/constants.py | 32 + libs/jinja2/debug.py | 333 + libs/jinja2/defaults.py | 40 + libs/jinja2/environment.py | 1121 ++ libs/jinja2/exceptions.py | 143 + libs/jinja2/ext.py | 612 ++ libs/jinja2/filters.py | 789 ++ libs/jinja2/lexer.py | 681 ++ libs/jinja2/loaders.py | 451 + libs/jinja2/meta.py | 102 + libs/jinja2/nodes.py | 910 ++ libs/jinja2/optimizer.py | 68 + libs/jinja2/parser.py | 896 ++ libs/jinja2/runtime.py | 550 + libs/jinja2/sandbox.py | 361 + libs/jinja2/tests.py | 146 + libs/jinja2/testsuite/__init__.py | 95 + libs/jinja2/testsuite/api.py | 245 + libs/jinja2/testsuite/core_tags.py | 285 + libs/jinja2/testsuite/debug.py | 60 + libs/jinja2/testsuite/doctests.py | 29 + libs/jinja2/testsuite/ext.py | 455 + libs/jinja2/testsuite/filters.py | 349 + libs/jinja2/testsuite/imports.py | 141 + libs/jinja2/testsuite/inheritance.py | 227 + libs/jinja2/testsuite/lexnparse.py | 387 + libs/jinja2/testsuite/loader.py | 190 + libs/jinja2/testsuite/regression.py | 255 + libs/jinja2/testsuite/res/__init__.py | 0 libs/jinja2/testsuite/res/templates/broken.html | 3 + libs/jinja2/testsuite/res/templates/foo/test.html | 1 + .../testsuite/res/templates/syntaxerror.html | 4 + libs/jinja2/testsuite/res/templates/test.html | 1 + libs/jinja2/testsuite/security.py | 165 + libs/jinja2/testsuite/tests.py | 87 + libs/jinja2/testsuite/utils.py | 82 + libs/jinja2/utils.py | 601 ++ libs/jinja2/visitor.py | 87 + libs/migrate/__init__.py | 9 + libs/migrate/changeset/__init__.py | 28 + libs/migrate/changeset/ansisql.py | 358 + libs/migrate/changeset/constraint.py | 202 + libs/migrate/changeset/databases/__init__.py | 10 + libs/migrate/changeset/databases/firebird.py | 99 + libs/migrate/changeset/databases/mysql.py | 94 + libs/migrate/changeset/databases/oracle.py | 111 + libs/migrate/changeset/databases/postgres.py | 46 + libs/migrate/changeset/databases/sqlite.py | 155 + libs/migrate/changeset/databases/visitor.py | 78 + libs/migrate/changeset/schema.py | 651 ++ libs/migrate/exceptions.py | 87 + libs/migrate/versioning/__init__.py | 5 + libs/migrate/versioning/api.py | 383 + libs/migrate/versioning/cfgparse.py | 27 + libs/migrate/versioning/config.py | 14 + libs/migrate/versioning/genmodel.py | 254 + libs/migrate/versioning/migrate_repository.py | 100 + libs/migrate/versioning/pathed.py | 75 + libs/migrate/versioning/repository.py | 231 + libs/migrate/versioning/schema.py | 213 + libs/migrate/versioning/schemadiff.py | 285 + libs/migrate/versioning/script/__init__.py | 6 + libs/migrate/versioning/script/base.py | 57 + libs/migrate/versioning/script/py.py | 160 + libs/migrate/versioning/script/sql.py | 49 + libs/migrate/versioning/shell.py | 214 + libs/migrate/versioning/template.py | 94 + libs/migrate/versioning/templates/__init__.py | 0 libs/migrate/versioning/templates/manage.py_tmpl | 5 + .../versioning/templates/manage/default.py_tmpl | 10 + .../versioning/templates/manage/pylons.py_tmpl | 29 + .../versioning/templates/repository/__init__.py | 0 .../versioning/templates/repository/default/README | 4 + .../templates/repository/default/__init__.py | 0 .../templates/repository/default/migrate.cfg | 20 + .../repository/default/versions/__init__.py | 0 .../versioning/templates/repository/pylons/README | 4 + .../templates/repository/pylons/__init__.py | 0 .../templates/repository/pylons/migrate.cfg | 20 + .../repository/pylons/versions/__init__.py | 0 .../versioning/templates/script/__init__.py | 0 .../versioning/templates/script/default.py_tmpl | 11 + .../versioning/templates/script/pylons.py_tmpl | 11 + .../templates/sql_script/default.py_tmpl | 0 .../versioning/templates/sql_script/pylons.py_tmpl | 0 libs/migrate/versioning/util/__init__.py | 179 + libs/migrate/versioning/util/importpath.py | 16 + libs/migrate/versioning/util/keyedinstance.py | 36 + libs/migrate/versioning/version.py | 215 + libs/simplejson/__init__.py | 438 + libs/simplejson/_speedups.c | 2602 +++++ libs/simplejson/decoder.py | 421 + libs/simplejson/encoder.py | 501 + libs/simplejson/ordered_dict.py | 119 + libs/simplejson/scanner.py | 77 + libs/simplejson/tool.py | 39 + libs/sqlalchemy/__init__.py | 120 + libs/sqlalchemy/cextension/processors.c | 417 + libs/sqlalchemy/cextension/resultproxy.c | 602 ++ libs/sqlalchemy/connectors/__init__.py | 10 + libs/sqlalchemy/connectors/mxodbc.py | 153 + libs/sqlalchemy/connectors/pyodbc.py | 125 + libs/sqlalchemy/connectors/zxJDBC.py | 58 + libs/sqlalchemy/databases/__init__.py | 35 + libs/sqlalchemy/dialects/__init__.py | 18 + libs/sqlalchemy/dialects/access/__init__.py | 0 libs/sqlalchemy/dialects/access/base.py | 455 + libs/sqlalchemy/dialects/firebird/__init__.py | 22 + libs/sqlalchemy/dialects/firebird/base.py | 688 ++ libs/sqlalchemy/dialects/firebird/kinterbasdb.py | 160 + libs/sqlalchemy/dialects/informix/__init__.py | 9 + libs/sqlalchemy/dialects/informix/base.py | 470 + libs/sqlalchemy/dialects/informix/informixdb.py | 73 + libs/sqlalchemy/dialects/maxdb/__init__.py | 9 + libs/sqlalchemy/dialects/maxdb/base.py | 1063 ++ libs/sqlalchemy/dialects/maxdb/sapdb.py | 23 + libs/sqlalchemy/dialects/mssql/__init__.py | 26 + libs/sqlalchemy/dialects/mssql/adodbapi.py | 68 + libs/sqlalchemy/dialects/mssql/base.py | 1425 +++ .../dialects/mssql/information_schema.py | 96 + libs/sqlalchemy/dialects/mssql/mxodbc.py | 97 + libs/sqlalchemy/dialects/mssql/pymssql.py | 110 + libs/sqlalchemy/dialects/mssql/pyodbc.py | 210 + libs/sqlalchemy/dialects/mssql/zxjdbc.py | 75 + libs/sqlalchemy/dialects/mysql/__init__.py | 27 + libs/sqlalchemy/dialects/mysql/base.py | 2538 +++++ libs/sqlalchemy/dialects/mysql/mysqlconnector.py | 135 + libs/sqlalchemy/dialects/mysql/mysqldb.py | 205 + libs/sqlalchemy/dialects/mysql/oursql.py | 258 + libs/sqlalchemy/dialects/mysql/pyodbc.py | 82 + libs/sqlalchemy/dialects/mysql/zxjdbc.py | 117 + libs/sqlalchemy/dialects/oracle/__init__.py | 23 + libs/sqlalchemy/dialects/oracle/base.py | 1114 ++ libs/sqlalchemy/dialects/oracle/cx_oracle.py | 712 ++ libs/sqlalchemy/dialects/oracle/zxjdbc.py | 215 + libs/sqlalchemy/dialects/postgres.py | 16 + libs/sqlalchemy/dialects/postgresql/__init__.py | 20 + libs/sqlalchemy/dialects/postgresql/base.py | 1386 +++ libs/sqlalchemy/dialects/postgresql/pg8000.py | 116 + libs/sqlalchemy/dialects/postgresql/psycopg2.py | 295 + .../sqlalchemy/dialects/postgresql/pypostgresql.py | 75 + libs/sqlalchemy/dialects/postgresql/zxjdbc.py | 25 + libs/sqlalchemy/dialects/sqlite/__init__.py | 20 + libs/sqlalchemy/dialects/sqlite/base.py | 619 ++ libs/sqlalchemy/dialects/sqlite/pysqlite.py | 243 + libs/sqlalchemy/dialects/sybase/__init__.py | 26 + libs/sqlalchemy/dialects/sybase/base.py | 432 + libs/sqlalchemy/dialects/sybase/mxodbc.py | 23 + libs/sqlalchemy/dialects/sybase/pyodbc.py | 83 + libs/sqlalchemy/dialects/sybase/pysybase.py | 100 + .../dialects/type_migration_guidelines.txt | 145 + libs/sqlalchemy/engine/__init__.py | 301 + libs/sqlalchemy/engine/base.py | 2759 +++++ libs/sqlalchemy/engine/ddl.py | 134 + libs/sqlalchemy/engine/default.py | 738 ++ libs/sqlalchemy/engine/reflection.py | 471 + libs/sqlalchemy/engine/strategies.py | 233 + libs/sqlalchemy/engine/threadlocal.py | 130 + libs/sqlalchemy/engine/url.py | 220 + libs/sqlalchemy/exc.py | 200 + libs/sqlalchemy/ext/__init__.py | 6 + libs/sqlalchemy/ext/associationproxy.py | 884 ++ libs/sqlalchemy/ext/compiler.py | 239 + libs/sqlalchemy/ext/declarative.py | 1451 +++ libs/sqlalchemy/ext/horizontal_shard.py | 125 + libs/sqlalchemy/ext/orderinglist.py | 321 + libs/sqlalchemy/ext/serializer.py | 161 + libs/sqlalchemy/ext/sqlsoup.py | 796 ++ libs/sqlalchemy/interfaces.py | 206 + libs/sqlalchemy/log.py | 119 + libs/sqlalchemy/orm/__init__.py | 1295 +++ libs/sqlalchemy/orm/attributes.py | 1820 ++++ libs/sqlalchemy/orm/collections.py | 1461 +++ libs/sqlalchemy/orm/dependency.py | 1095 ++ libs/sqlalchemy/orm/dynamic.py | 306 + libs/sqlalchemy/orm/evaluator.py | 111 + libs/sqlalchemy/orm/exc.py | 116 + libs/sqlalchemy/orm/identity.py | 272 + libs/sqlalchemy/orm/interfaces.py | 1094 ++ libs/sqlalchemy/orm/mapper.py | 2517 +++++ libs/sqlalchemy/orm/properties.py | 1503 +++ libs/sqlalchemy/orm/query.py | 2852 ++++++ libs/sqlalchemy/orm/scoping.py | 215 + libs/sqlalchemy/orm/session.py | 1697 ++++ libs/sqlalchemy/orm/shard.py | 15 + libs/sqlalchemy/orm/state.py | 549 + libs/sqlalchemy/orm/strategies.py | 1340 +++ libs/sqlalchemy/orm/sync.py | 97 + libs/sqlalchemy/orm/unitofwork.py | 558 + libs/sqlalchemy/orm/util.py | 691 ++ libs/sqlalchemy/pool.py | 985 ++ libs/sqlalchemy/processors.py | 109 + libs/sqlalchemy/queue.py | 191 + libs/sqlalchemy/schema.py | 2597 +++++ libs/sqlalchemy/sql/__init__.py | 65 + libs/sqlalchemy/sql/compiler.py | 1641 +++ libs/sqlalchemy/sql/expression.py | 4603 +++++++++ libs/sqlalchemy/sql/functions.py | 110 + libs/sqlalchemy/sql/operators.py | 148 + libs/sqlalchemy/sql/util.py | 712 ++ libs/sqlalchemy/sql/visitors.py | 263 + libs/sqlalchemy/test/__init__.py | 33 + libs/sqlalchemy/test/assertsql.py | 300 + libs/sqlalchemy/test/engines.py | 311 + libs/sqlalchemy/test/entities.py | 89 + libs/sqlalchemy/test/orm.py | 117 + libs/sqlalchemy/test/pickleable.py | 81 + libs/sqlalchemy/test/profiling.py | 228 + libs/sqlalchemy/test/requires.py | 333 + libs/sqlalchemy/test/schema.py | 85 + libs/sqlalchemy/test/testing.py | 803 ++ libs/sqlalchemy/test/util.py | 81 + libs/sqlalchemy/topological.py | 83 + libs/sqlalchemy/types.py | 1979 ++++ libs/sqlalchemy/util.py | 1875 ++++ libs/tempita/__init__.py | 1160 +++ libs/tempita/_looper.py | 163 + libs/tempita/compat3.py | 45 + libs/werkzeug/__init__.py | 159 + libs/werkzeug/_internal.py | 398 + libs/werkzeug/contrib/__init__.py | 16 + libs/werkzeug/contrib/atom.py | 343 + libs/werkzeug/contrib/cache.py | 551 + libs/werkzeug/contrib/fixers.py | 208 + libs/werkzeug/contrib/iterio.py | 281 + libs/werkzeug/contrib/jsrouting.py | 258 + libs/werkzeug/contrib/kickstart.py | 288 + libs/werkzeug/contrib/limiter.py | 36 + libs/werkzeug/contrib/lint.py | 333 + libs/werkzeug/contrib/profiler.py | 118 + libs/werkzeug/contrib/securecookie.py | 344 + libs/werkzeug/contrib/sessions.py | 344 + libs/werkzeug/contrib/testtools.py | 71 + libs/werkzeug/contrib/wrappers.py | 275 + libs/werkzeug/datastructures.py | 2373 +++++ libs/werkzeug/debug/__init__.py | 178 + libs/werkzeug/debug/console.py | 200 + libs/werkzeug/debug/render.py | 103 + libs/werkzeug/debug/repr.py | 267 + libs/werkzeug/debug/shared/FONT_LICENSE | 96 + libs/werkzeug/debug/shared/console.png | Bin 0 -> 507 bytes libs/werkzeug/debug/shared/debugger.js | 199 + libs/werkzeug/debug/shared/jquery.js | 167 + libs/werkzeug/debug/shared/less.png | Bin 0 -> 191 bytes libs/werkzeug/debug/shared/more.png | Bin 0 -> 200 bytes libs/werkzeug/debug/shared/source.png | Bin 0 -> 818 bytes libs/werkzeug/debug/shared/style.css | 113 + libs/werkzeug/debug/shared/ubuntu.ttf | Bin 0 -> 70220 bytes libs/werkzeug/debug/tbtools.py | 479 + libs/werkzeug/debug/utils.py | 20 + libs/werkzeug/exceptions.py | 460 + libs/werkzeug/formparser.py | 362 + libs/werkzeug/http.py | 607 ++ libs/werkzeug/local.py | 421 + libs/werkzeug/posixemulation.py | 105 + libs/werkzeug/routing.py | 1484 +++ libs/werkzeug/script.py | 309 + libs/werkzeug/security.py | 104 + libs/werkzeug/serving.py | 584 ++ libs/werkzeug/templates.py | 394 + libs/werkzeug/test.py | 818 ++ libs/werkzeug/testapp.py | 226 + libs/werkzeug/urls.py | 465 + libs/werkzeug/useragents.py | 185 + libs/werkzeug/utils.py | 670 ++ libs/werkzeug/wrappers.py | 1532 +++ libs/werkzeug/wsgi.py | 765 ++ libs/xmg/__init__.py | 0 libs/xmg/xmg.py | 206 + 541 files changed, 173394 insertions(+), 4 deletions(-) delete mode 100644 .gitmodules delete mode 160000 libs create mode 100644 libs/README.md create mode 100644 libs/__init__.py create mode 100644 libs/apscheduler/__init__.py create mode 100644 libs/apscheduler/expressions.py create mode 100644 libs/apscheduler/fields.py create mode 100644 libs/apscheduler/scheduler.py create mode 100644 libs/apscheduler/triggers.py create mode 100644 libs/apscheduler/util.py create mode 100644 libs/argparse.py create mode 100644 libs/axel/__init__.py create mode 100644 libs/axel/axel.py create mode 100644 libs/daemon.py create mode 100644 libs/decorator.py create mode 100644 libs/elixir/__init__.py create mode 100644 libs/elixir/collection.py create mode 100644 libs/elixir/entity.py create mode 100644 libs/elixir/events.py create mode 100644 libs/elixir/ext/__init__.py create mode 100644 libs/elixir/ext/associable.py create mode 100644 libs/elixir/ext/encrypted.py create mode 100644 libs/elixir/ext/list.py create mode 100644 libs/elixir/ext/perform_ddl.py create mode 100644 libs/elixir/ext/versioned.py create mode 100644 libs/elixir/fields.py create mode 100644 libs/elixir/options.py create mode 100644 libs/elixir/properties.py create mode 100644 libs/elixir/py23compat.py create mode 100644 libs/elixir/relationships.py create mode 100644 libs/elixir/statements.py create mode 100644 libs/flask/__init__.py create mode 100644 libs/flask/app.py create mode 100644 libs/flask/config.py create mode 100644 libs/flask/ctx.py create mode 100644 libs/flask/globals.py create mode 100644 libs/flask/helpers.py create mode 100644 libs/flask/logging.py create mode 100644 libs/flask/module.py create mode 100644 libs/flask/session.py create mode 100644 libs/flask/signals.py create mode 100644 libs/flask/templating.py create mode 100644 libs/flask/testing.py create mode 100644 libs/flask/wrappers.py create mode 100644 libs/getmeta.py create mode 100644 libs/hachoir_core/__init__.py create mode 100644 libs/hachoir_core/benchmark.py create mode 100644 libs/hachoir_core/bits.py create mode 100644 libs/hachoir_core/cmd_line.py create mode 100644 libs/hachoir_core/compatibility.py create mode 100644 libs/hachoir_core/config.py create mode 100644 libs/hachoir_core/dict.py create mode 100644 libs/hachoir_core/endian.py create mode 100644 libs/hachoir_core/error.py create mode 100644 libs/hachoir_core/event_handler.py create mode 100644 libs/hachoir_core/field/__init__.py create mode 100644 libs/hachoir_core/field/basic_field_set.py create mode 100644 libs/hachoir_core/field/bit_field.py create mode 100644 libs/hachoir_core/field/byte_field.py create mode 100644 libs/hachoir_core/field/character.py create mode 100644 libs/hachoir_core/field/enum.py create mode 100644 libs/hachoir_core/field/fake_array.py create mode 100644 libs/hachoir_core/field/field.py create mode 100644 libs/hachoir_core/field/field_set.py create mode 100644 libs/hachoir_core/field/float.py create mode 100644 libs/hachoir_core/field/generic_field_set.py create mode 100644 libs/hachoir_core/field/helper.py create mode 100644 libs/hachoir_core/field/integer.py create mode 100644 libs/hachoir_core/field/link.py create mode 100644 libs/hachoir_core/field/new_seekable_field_set.py create mode 100644 libs/hachoir_core/field/padding.py create mode 100644 libs/hachoir_core/field/parser.py create mode 100644 libs/hachoir_core/field/seekable_field_set.py create mode 100644 libs/hachoir_core/field/static_field_set.py create mode 100644 libs/hachoir_core/field/string_field.py create mode 100644 libs/hachoir_core/field/sub_file.py create mode 100644 libs/hachoir_core/field/timestamp.py create mode 100644 libs/hachoir_core/field/vector.py create mode 100644 libs/hachoir_core/i18n.py create mode 100644 libs/hachoir_core/iso639.py create mode 100644 libs/hachoir_core/language.py create mode 100644 libs/hachoir_core/log.py create mode 100644 libs/hachoir_core/memory.py create mode 100644 libs/hachoir_core/profiler.py create mode 100644 libs/hachoir_core/stream/__init__.py create mode 100644 libs/hachoir_core/stream/input.py create mode 100644 libs/hachoir_core/stream/input_helper.py create mode 100644 libs/hachoir_core/stream/output.py create mode 100644 libs/hachoir_core/stream/stream.py create mode 100644 libs/hachoir_core/text_handler.py create mode 100644 libs/hachoir_core/timeout.py create mode 100644 libs/hachoir_core/tools.py create mode 100644 libs/hachoir_core/version.py create mode 100644 libs/hachoir_metadata/__init__.py create mode 100644 libs/hachoir_metadata/archive.py create mode 100644 libs/hachoir_metadata/audio.py create mode 100644 libs/hachoir_metadata/config.py create mode 100644 libs/hachoir_metadata/file_system.py create mode 100644 libs/hachoir_metadata/filter.py create mode 100644 libs/hachoir_metadata/formatter.py create mode 100644 libs/hachoir_metadata/image.py create mode 100644 libs/hachoir_metadata/jpeg.py create mode 100644 libs/hachoir_metadata/metadata.py create mode 100644 libs/hachoir_metadata/metadata_item.py create mode 100644 libs/hachoir_metadata/misc.py create mode 100644 libs/hachoir_metadata/program.py create mode 100644 libs/hachoir_metadata/qt/__init__.py create mode 100644 libs/hachoir_metadata/qt/dialog.ui create mode 100644 libs/hachoir_metadata/register.py create mode 100644 libs/hachoir_metadata/riff.py create mode 100644 libs/hachoir_metadata/safe.py create mode 100644 libs/hachoir_metadata/setter.py create mode 100644 libs/hachoir_metadata/timezone.py create mode 100644 libs/hachoir_metadata/version.py create mode 100644 libs/hachoir_metadata/video.py create mode 100644 libs/hachoir_parser/__init__.py create mode 100644 libs/hachoir_parser/archive/__init__.py create mode 100644 libs/hachoir_parser/archive/ace.py create mode 100644 libs/hachoir_parser/archive/ar.py create mode 100644 libs/hachoir_parser/archive/bzip2_parser.py create mode 100644 libs/hachoir_parser/archive/cab.py create mode 100644 libs/hachoir_parser/archive/gzip_parser.py create mode 100644 libs/hachoir_parser/archive/mar.py create mode 100644 libs/hachoir_parser/archive/rar.py create mode 100644 libs/hachoir_parser/archive/rpm.py create mode 100644 libs/hachoir_parser/archive/sevenzip.py create mode 100644 libs/hachoir_parser/archive/tar.py create mode 100644 libs/hachoir_parser/archive/zip.py create mode 100644 libs/hachoir_parser/audio/__init__.py create mode 100644 libs/hachoir_parser/audio/aiff.py create mode 100644 libs/hachoir_parser/audio/au.py create mode 100644 libs/hachoir_parser/audio/flac.py create mode 100644 libs/hachoir_parser/audio/id3.py create mode 100644 libs/hachoir_parser/audio/itunesdb.py create mode 100644 libs/hachoir_parser/audio/midi.py create mode 100644 libs/hachoir_parser/audio/mod.py create mode 100644 libs/hachoir_parser/audio/modplug.py create mode 100644 libs/hachoir_parser/audio/mpeg_audio.py create mode 100644 libs/hachoir_parser/audio/real_audio.py create mode 100644 libs/hachoir_parser/audio/s3m.py create mode 100644 libs/hachoir_parser/audio/xm.py create mode 100644 libs/hachoir_parser/common/__init__.py create mode 100644 libs/hachoir_parser/common/deflate.py create mode 100644 libs/hachoir_parser/common/msdos.py create mode 100644 libs/hachoir_parser/common/tracker.py create mode 100644 libs/hachoir_parser/common/win32.py create mode 100644 libs/hachoir_parser/common/win32_lang_id.py create mode 100644 libs/hachoir_parser/container/__init__.py create mode 100644 libs/hachoir_parser/container/action_script.py create mode 100644 libs/hachoir_parser/container/asn1.py create mode 100644 libs/hachoir_parser/container/mkv.py create mode 100644 libs/hachoir_parser/container/ogg.py create mode 100644 libs/hachoir_parser/container/realmedia.py create mode 100644 libs/hachoir_parser/container/riff.py create mode 100644 libs/hachoir_parser/container/swf.py create mode 100644 libs/hachoir_parser/file_system/__init__.py create mode 100644 libs/hachoir_parser/file_system/ext2.py create mode 100644 libs/hachoir_parser/file_system/fat.py create mode 100644 libs/hachoir_parser/file_system/iso9660.py create mode 100644 libs/hachoir_parser/file_system/linux_swap.py create mode 100644 libs/hachoir_parser/file_system/mbr.py create mode 100644 libs/hachoir_parser/file_system/ntfs.py create mode 100644 libs/hachoir_parser/file_system/reiser_fs.py create mode 100644 libs/hachoir_parser/game/__init__.py create mode 100644 libs/hachoir_parser/game/blp.py create mode 100644 libs/hachoir_parser/game/laf.py create mode 100644 libs/hachoir_parser/game/spider_man_video.py create mode 100644 libs/hachoir_parser/game/zsnes.py create mode 100644 libs/hachoir_parser/guess.py create mode 100644 libs/hachoir_parser/image/__init__.py create mode 100644 libs/hachoir_parser/image/bmp.py create mode 100644 libs/hachoir_parser/image/common.py create mode 100644 libs/hachoir_parser/image/exif.py create mode 100644 libs/hachoir_parser/image/gif.py create mode 100644 libs/hachoir_parser/image/ico.py create mode 100644 libs/hachoir_parser/image/iptc.py create mode 100644 libs/hachoir_parser/image/jpeg.py create mode 100644 libs/hachoir_parser/image/pcx.py create mode 100644 libs/hachoir_parser/image/photoshop_metadata.py create mode 100644 libs/hachoir_parser/image/png.py create mode 100644 libs/hachoir_parser/image/psd.py create mode 100644 libs/hachoir_parser/image/tga.py create mode 100644 libs/hachoir_parser/image/tiff.py create mode 100644 libs/hachoir_parser/image/wmf.py create mode 100644 libs/hachoir_parser/image/xcf.py create mode 100644 libs/hachoir_parser/misc/__init__.py create mode 100644 libs/hachoir_parser/misc/bplist.py create mode 100644 libs/hachoir_parser/misc/chm.py create mode 100644 libs/hachoir_parser/misc/common.py create mode 100644 libs/hachoir_parser/misc/file_3do.py create mode 100644 libs/hachoir_parser/misc/file_3ds.py create mode 100644 libs/hachoir_parser/misc/gnome_keyring.py create mode 100644 libs/hachoir_parser/misc/hlp.py create mode 100644 libs/hachoir_parser/misc/lnk.py create mode 100644 libs/hachoir_parser/misc/msoffice.py create mode 100644 libs/hachoir_parser/misc/msoffice_summary.py create mode 100644 libs/hachoir_parser/misc/ole2.py create mode 100644 libs/hachoir_parser/misc/pcf.py create mode 100644 libs/hachoir_parser/misc/pdf.py create mode 100644 libs/hachoir_parser/misc/pifv.py create mode 100644 libs/hachoir_parser/misc/torrent.py create mode 100644 libs/hachoir_parser/misc/ttf.py create mode 100644 libs/hachoir_parser/misc/word_doc.py create mode 100644 libs/hachoir_parser/network/__init__.py create mode 100644 libs/hachoir_parser/network/common.py create mode 100644 libs/hachoir_parser/network/ouid.py create mode 100644 libs/hachoir_parser/network/tcpdump.py create mode 100644 libs/hachoir_parser/parser.py create mode 100644 libs/hachoir_parser/parser_list.py create mode 100644 libs/hachoir_parser/program/__init__.py create mode 100644 libs/hachoir_parser/program/elf.py create mode 100644 libs/hachoir_parser/program/exe.py create mode 100644 libs/hachoir_parser/program/exe_ne.py create mode 100644 libs/hachoir_parser/program/exe_pe.py create mode 100644 libs/hachoir_parser/program/exe_res.py create mode 100644 libs/hachoir_parser/program/java.py create mode 100644 libs/hachoir_parser/program/prc.py create mode 100644 libs/hachoir_parser/program/python.py create mode 100644 libs/hachoir_parser/template.py create mode 100644 libs/hachoir_parser/version.py create mode 100644 libs/hachoir_parser/video/__init__.py create mode 100644 libs/hachoir_parser/video/amf.py create mode 100644 libs/hachoir_parser/video/asf.py create mode 100644 libs/hachoir_parser/video/flv.py create mode 100644 libs/hachoir_parser/video/fourcc.py create mode 100644 libs/hachoir_parser/video/mov.py create mode 100644 libs/hachoir_parser/video/mpeg_ts.py create mode 100644 libs/hachoir_parser/video/mpeg_video.py create mode 100644 libs/imdb/Character.py create mode 100644 libs/imdb/Company.py create mode 100644 libs/imdb/Movie.py create mode 100644 libs/imdb/Person.py create mode 100644 libs/imdb/__init__.py create mode 100644 libs/imdb/_compat.py create mode 100644 libs/imdb/_exceptions.py create mode 100644 libs/imdb/_logging.py create mode 100644 libs/imdb/articles.py create mode 100644 libs/imdb/helpers.py create mode 100644 libs/imdb/locale/__init__.py create mode 100644 libs/imdb/locale/generatepot.py create mode 100644 libs/imdb/locale/msgfmt.py create mode 100644 libs/imdb/locale/rebuildmo.py create mode 100644 libs/imdb/parser/__init__.py create mode 100644 libs/imdb/parser/http/__init__.py create mode 100644 libs/imdb/parser/http/bsouplxml/__init__.py create mode 100644 libs/imdb/parser/http/bsouplxml/_bsoup.py create mode 100644 libs/imdb/parser/http/bsouplxml/bsoupxpath.py create mode 100644 libs/imdb/parser/http/bsouplxml/etree.py create mode 100644 libs/imdb/parser/http/bsouplxml/html.py create mode 100644 libs/imdb/parser/http/characterParser.py create mode 100644 libs/imdb/parser/http/companyParser.py create mode 100644 libs/imdb/parser/http/movieParser.py create mode 100644 libs/imdb/parser/http/personParser.py create mode 100644 libs/imdb/parser/http/searchCharacterParser.py create mode 100644 libs/imdb/parser/http/searchCompanyParser.py create mode 100644 libs/imdb/parser/http/searchKeywordParser.py create mode 100644 libs/imdb/parser/http/searchMovieParser.py create mode 100644 libs/imdb/parser/http/searchPersonParser.py create mode 100644 libs/imdb/parser/http/topBottomParser.py create mode 100644 libs/imdb/parser/http/utils.py create mode 100644 libs/imdb/parser/mobile/__init__.py create mode 100644 libs/imdb/utils.py create mode 100644 libs/jinja2/__init__.py create mode 100644 libs/jinja2/_debugsupport.c create mode 100644 libs/jinja2/_markupsafe/__init__.py create mode 100644 libs/jinja2/_markupsafe/_bundle.py create mode 100644 libs/jinja2/_markupsafe/_constants.py create mode 100644 libs/jinja2/_markupsafe/_native.py create mode 100644 libs/jinja2/_markupsafe/tests.py create mode 100644 libs/jinja2/_stringdefs.py create mode 100644 libs/jinja2/bccache.py create mode 100644 libs/jinja2/compiler.py create mode 100644 libs/jinja2/constants.py create mode 100644 libs/jinja2/debug.py create mode 100644 libs/jinja2/defaults.py create mode 100644 libs/jinja2/environment.py create mode 100644 libs/jinja2/exceptions.py create mode 100644 libs/jinja2/ext.py create mode 100644 libs/jinja2/filters.py create mode 100644 libs/jinja2/lexer.py create mode 100644 libs/jinja2/loaders.py create mode 100644 libs/jinja2/meta.py create mode 100644 libs/jinja2/nodes.py create mode 100644 libs/jinja2/optimizer.py create mode 100644 libs/jinja2/parser.py create mode 100644 libs/jinja2/runtime.py create mode 100644 libs/jinja2/sandbox.py create mode 100644 libs/jinja2/tests.py create mode 100644 libs/jinja2/testsuite/__init__.py create mode 100644 libs/jinja2/testsuite/api.py create mode 100644 libs/jinja2/testsuite/core_tags.py create mode 100644 libs/jinja2/testsuite/debug.py create mode 100644 libs/jinja2/testsuite/doctests.py create mode 100644 libs/jinja2/testsuite/ext.py create mode 100644 libs/jinja2/testsuite/filters.py create mode 100644 libs/jinja2/testsuite/imports.py create mode 100644 libs/jinja2/testsuite/inheritance.py create mode 100644 libs/jinja2/testsuite/lexnparse.py create mode 100644 libs/jinja2/testsuite/loader.py create mode 100644 libs/jinja2/testsuite/regression.py create mode 100644 libs/jinja2/testsuite/res/__init__.py create mode 100644 libs/jinja2/testsuite/res/templates/broken.html create mode 100644 libs/jinja2/testsuite/res/templates/foo/test.html create mode 100644 libs/jinja2/testsuite/res/templates/syntaxerror.html create mode 100644 libs/jinja2/testsuite/res/templates/test.html create mode 100644 libs/jinja2/testsuite/security.py create mode 100644 libs/jinja2/testsuite/tests.py create mode 100644 libs/jinja2/testsuite/utils.py create mode 100644 libs/jinja2/utils.py create mode 100644 libs/jinja2/visitor.py create mode 100644 libs/migrate/__init__.py create mode 100644 libs/migrate/changeset/__init__.py create mode 100644 libs/migrate/changeset/ansisql.py create mode 100644 libs/migrate/changeset/constraint.py create mode 100644 libs/migrate/changeset/databases/__init__.py create mode 100644 libs/migrate/changeset/databases/firebird.py create mode 100644 libs/migrate/changeset/databases/mysql.py create mode 100644 libs/migrate/changeset/databases/oracle.py create mode 100644 libs/migrate/changeset/databases/postgres.py create mode 100644 libs/migrate/changeset/databases/sqlite.py create mode 100644 libs/migrate/changeset/databases/visitor.py create mode 100644 libs/migrate/changeset/schema.py create mode 100644 libs/migrate/exceptions.py create mode 100644 libs/migrate/versioning/__init__.py create mode 100644 libs/migrate/versioning/api.py create mode 100644 libs/migrate/versioning/cfgparse.py create mode 100644 libs/migrate/versioning/config.py create mode 100644 libs/migrate/versioning/genmodel.py create mode 100644 libs/migrate/versioning/migrate_repository.py create mode 100644 libs/migrate/versioning/pathed.py create mode 100644 libs/migrate/versioning/repository.py create mode 100644 libs/migrate/versioning/schema.py create mode 100644 libs/migrate/versioning/schemadiff.py create mode 100644 libs/migrate/versioning/script/__init__.py create mode 100644 libs/migrate/versioning/script/base.py create mode 100644 libs/migrate/versioning/script/py.py create mode 100644 libs/migrate/versioning/script/sql.py create mode 100644 libs/migrate/versioning/shell.py create mode 100644 libs/migrate/versioning/template.py create mode 100644 libs/migrate/versioning/templates/__init__.py create mode 100644 libs/migrate/versioning/templates/manage.py_tmpl create mode 100644 libs/migrate/versioning/templates/manage/default.py_tmpl create mode 100644 libs/migrate/versioning/templates/manage/pylons.py_tmpl create mode 100644 libs/migrate/versioning/templates/repository/__init__.py create mode 100644 libs/migrate/versioning/templates/repository/default/README create mode 100644 libs/migrate/versioning/templates/repository/default/__init__.py create mode 100644 libs/migrate/versioning/templates/repository/default/migrate.cfg create mode 100644 libs/migrate/versioning/templates/repository/default/versions/__init__.py create mode 100644 libs/migrate/versioning/templates/repository/pylons/README create mode 100644 libs/migrate/versioning/templates/repository/pylons/__init__.py create mode 100644 libs/migrate/versioning/templates/repository/pylons/migrate.cfg create mode 100644 libs/migrate/versioning/templates/repository/pylons/versions/__init__.py create mode 100644 libs/migrate/versioning/templates/script/__init__.py create mode 100644 libs/migrate/versioning/templates/script/default.py_tmpl create mode 100644 libs/migrate/versioning/templates/script/pylons.py_tmpl create mode 100644 libs/migrate/versioning/templates/sql_script/default.py_tmpl create mode 100644 libs/migrate/versioning/templates/sql_script/pylons.py_tmpl create mode 100644 libs/migrate/versioning/util/__init__.py create mode 100644 libs/migrate/versioning/util/importpath.py create mode 100644 libs/migrate/versioning/util/keyedinstance.py create mode 100644 libs/migrate/versioning/version.py create mode 100644 libs/simplejson/__init__.py create mode 100644 libs/simplejson/_speedups.c create mode 100644 libs/simplejson/decoder.py create mode 100644 libs/simplejson/encoder.py create mode 100644 libs/simplejson/ordered_dict.py create mode 100644 libs/simplejson/scanner.py create mode 100644 libs/simplejson/tool.py create mode 100644 libs/sqlalchemy/__init__.py create mode 100644 libs/sqlalchemy/cextension/processors.c create mode 100644 libs/sqlalchemy/cextension/resultproxy.c create mode 100644 libs/sqlalchemy/connectors/__init__.py create mode 100644 libs/sqlalchemy/connectors/mxodbc.py create mode 100644 libs/sqlalchemy/connectors/pyodbc.py create mode 100644 libs/sqlalchemy/connectors/zxJDBC.py create mode 100644 libs/sqlalchemy/databases/__init__.py create mode 100644 libs/sqlalchemy/dialects/__init__.py create mode 100644 libs/sqlalchemy/dialects/access/__init__.py create mode 100644 libs/sqlalchemy/dialects/access/base.py create mode 100644 libs/sqlalchemy/dialects/firebird/__init__.py create mode 100644 libs/sqlalchemy/dialects/firebird/base.py create mode 100644 libs/sqlalchemy/dialects/firebird/kinterbasdb.py create mode 100644 libs/sqlalchemy/dialects/informix/__init__.py create mode 100644 libs/sqlalchemy/dialects/informix/base.py create mode 100644 libs/sqlalchemy/dialects/informix/informixdb.py create mode 100644 libs/sqlalchemy/dialects/maxdb/__init__.py create mode 100644 libs/sqlalchemy/dialects/maxdb/base.py create mode 100644 libs/sqlalchemy/dialects/maxdb/sapdb.py create mode 100644 libs/sqlalchemy/dialects/mssql/__init__.py create mode 100644 libs/sqlalchemy/dialects/mssql/adodbapi.py create mode 100644 libs/sqlalchemy/dialects/mssql/base.py create mode 100644 libs/sqlalchemy/dialects/mssql/information_schema.py create mode 100644 libs/sqlalchemy/dialects/mssql/mxodbc.py create mode 100644 libs/sqlalchemy/dialects/mssql/pymssql.py create mode 100644 libs/sqlalchemy/dialects/mssql/pyodbc.py create mode 100644 libs/sqlalchemy/dialects/mssql/zxjdbc.py create mode 100644 libs/sqlalchemy/dialects/mysql/__init__.py create mode 100644 libs/sqlalchemy/dialects/mysql/base.py create mode 100644 libs/sqlalchemy/dialects/mysql/mysqlconnector.py create mode 100644 libs/sqlalchemy/dialects/mysql/mysqldb.py create mode 100644 libs/sqlalchemy/dialects/mysql/oursql.py create mode 100644 libs/sqlalchemy/dialects/mysql/pyodbc.py create mode 100644 libs/sqlalchemy/dialects/mysql/zxjdbc.py create mode 100644 libs/sqlalchemy/dialects/oracle/__init__.py create mode 100644 libs/sqlalchemy/dialects/oracle/base.py create mode 100644 libs/sqlalchemy/dialects/oracle/cx_oracle.py create mode 100644 libs/sqlalchemy/dialects/oracle/zxjdbc.py create mode 100644 libs/sqlalchemy/dialects/postgres.py create mode 100644 libs/sqlalchemy/dialects/postgresql/__init__.py create mode 100644 libs/sqlalchemy/dialects/postgresql/base.py create mode 100644 libs/sqlalchemy/dialects/postgresql/pg8000.py create mode 100644 libs/sqlalchemy/dialects/postgresql/psycopg2.py create mode 100644 libs/sqlalchemy/dialects/postgresql/pypostgresql.py create mode 100644 libs/sqlalchemy/dialects/postgresql/zxjdbc.py create mode 100644 libs/sqlalchemy/dialects/sqlite/__init__.py create mode 100644 libs/sqlalchemy/dialects/sqlite/base.py create mode 100644 libs/sqlalchemy/dialects/sqlite/pysqlite.py create mode 100644 libs/sqlalchemy/dialects/sybase/__init__.py create mode 100644 libs/sqlalchemy/dialects/sybase/base.py create mode 100644 libs/sqlalchemy/dialects/sybase/mxodbc.py create mode 100644 libs/sqlalchemy/dialects/sybase/pyodbc.py create mode 100644 libs/sqlalchemy/dialects/sybase/pysybase.py create mode 100644 libs/sqlalchemy/dialects/type_migration_guidelines.txt create mode 100644 libs/sqlalchemy/engine/__init__.py create mode 100644 libs/sqlalchemy/engine/base.py create mode 100644 libs/sqlalchemy/engine/ddl.py create mode 100644 libs/sqlalchemy/engine/default.py create mode 100644 libs/sqlalchemy/engine/reflection.py create mode 100644 libs/sqlalchemy/engine/strategies.py create mode 100644 libs/sqlalchemy/engine/threadlocal.py create mode 100644 libs/sqlalchemy/engine/url.py create mode 100644 libs/sqlalchemy/exc.py create mode 100644 libs/sqlalchemy/ext/__init__.py create mode 100644 libs/sqlalchemy/ext/associationproxy.py create mode 100644 libs/sqlalchemy/ext/compiler.py create mode 100755 libs/sqlalchemy/ext/declarative.py create mode 100644 libs/sqlalchemy/ext/horizontal_shard.py create mode 100644 libs/sqlalchemy/ext/orderinglist.py create mode 100644 libs/sqlalchemy/ext/serializer.py create mode 100644 libs/sqlalchemy/ext/sqlsoup.py create mode 100644 libs/sqlalchemy/interfaces.py create mode 100644 libs/sqlalchemy/log.py create mode 100644 libs/sqlalchemy/orm/__init__.py create mode 100644 libs/sqlalchemy/orm/attributes.py create mode 100644 libs/sqlalchemy/orm/collections.py create mode 100644 libs/sqlalchemy/orm/dependency.py create mode 100644 libs/sqlalchemy/orm/dynamic.py create mode 100644 libs/sqlalchemy/orm/evaluator.py create mode 100644 libs/sqlalchemy/orm/exc.py create mode 100644 libs/sqlalchemy/orm/identity.py create mode 100644 libs/sqlalchemy/orm/interfaces.py create mode 100644 libs/sqlalchemy/orm/mapper.py create mode 100644 libs/sqlalchemy/orm/properties.py create mode 100644 libs/sqlalchemy/orm/query.py create mode 100644 libs/sqlalchemy/orm/scoping.py create mode 100644 libs/sqlalchemy/orm/session.py create mode 100644 libs/sqlalchemy/orm/shard.py create mode 100644 libs/sqlalchemy/orm/state.py create mode 100644 libs/sqlalchemy/orm/strategies.py create mode 100644 libs/sqlalchemy/orm/sync.py create mode 100644 libs/sqlalchemy/orm/unitofwork.py create mode 100644 libs/sqlalchemy/orm/util.py create mode 100644 libs/sqlalchemy/pool.py create mode 100644 libs/sqlalchemy/processors.py create mode 100644 libs/sqlalchemy/queue.py create mode 100644 libs/sqlalchemy/schema.py create mode 100644 libs/sqlalchemy/sql/__init__.py create mode 100644 libs/sqlalchemy/sql/compiler.py create mode 100644 libs/sqlalchemy/sql/expression.py create mode 100644 libs/sqlalchemy/sql/functions.py create mode 100644 libs/sqlalchemy/sql/operators.py create mode 100644 libs/sqlalchemy/sql/util.py create mode 100644 libs/sqlalchemy/sql/visitors.py create mode 100644 libs/sqlalchemy/test/__init__.py create mode 100644 libs/sqlalchemy/test/assertsql.py create mode 100644 libs/sqlalchemy/test/engines.py create mode 100644 libs/sqlalchemy/test/entities.py create mode 100644 libs/sqlalchemy/test/orm.py create mode 100644 libs/sqlalchemy/test/pickleable.py create mode 100644 libs/sqlalchemy/test/profiling.py create mode 100644 libs/sqlalchemy/test/requires.py create mode 100644 libs/sqlalchemy/test/schema.py create mode 100644 libs/sqlalchemy/test/testing.py create mode 100644 libs/sqlalchemy/test/util.py create mode 100644 libs/sqlalchemy/topological.py create mode 100644 libs/sqlalchemy/types.py create mode 100644 libs/sqlalchemy/util.py create mode 100644 libs/tempita/__init__.py create mode 100644 libs/tempita/_looper.py create mode 100644 libs/tempita/compat3.py create mode 100644 libs/werkzeug/__init__.py create mode 100644 libs/werkzeug/_internal.py create mode 100644 libs/werkzeug/contrib/__init__.py create mode 100644 libs/werkzeug/contrib/atom.py create mode 100644 libs/werkzeug/contrib/cache.py create mode 100644 libs/werkzeug/contrib/fixers.py create mode 100644 libs/werkzeug/contrib/iterio.py create mode 100644 libs/werkzeug/contrib/jsrouting.py create mode 100644 libs/werkzeug/contrib/kickstart.py create mode 100644 libs/werkzeug/contrib/limiter.py create mode 100644 libs/werkzeug/contrib/lint.py create mode 100644 libs/werkzeug/contrib/profiler.py create mode 100644 libs/werkzeug/contrib/securecookie.py create mode 100644 libs/werkzeug/contrib/sessions.py create mode 100644 libs/werkzeug/contrib/testtools.py create mode 100644 libs/werkzeug/contrib/wrappers.py create mode 100644 libs/werkzeug/datastructures.py create mode 100644 libs/werkzeug/debug/__init__.py create mode 100644 libs/werkzeug/debug/console.py create mode 100644 libs/werkzeug/debug/render.py create mode 100644 libs/werkzeug/debug/repr.py create mode 100644 libs/werkzeug/debug/shared/FONT_LICENSE create mode 100755 libs/werkzeug/debug/shared/console.png create mode 100644 libs/werkzeug/debug/shared/debugger.js create mode 100644 libs/werkzeug/debug/shared/jquery.js create mode 100755 libs/werkzeug/debug/shared/less.png create mode 100755 libs/werkzeug/debug/shared/more.png create mode 100755 libs/werkzeug/debug/shared/source.png create mode 100644 libs/werkzeug/debug/shared/style.css create mode 100644 libs/werkzeug/debug/shared/ubuntu.ttf create mode 100644 libs/werkzeug/debug/tbtools.py create mode 100644 libs/werkzeug/debug/utils.py create mode 100644 libs/werkzeug/exceptions.py create mode 100644 libs/werkzeug/formparser.py create mode 100644 libs/werkzeug/http.py create mode 100644 libs/werkzeug/local.py create mode 100644 libs/werkzeug/posixemulation.py create mode 100644 libs/werkzeug/routing.py create mode 100644 libs/werkzeug/script.py create mode 100644 libs/werkzeug/security.py create mode 100644 libs/werkzeug/serving.py create mode 100644 libs/werkzeug/templates.py create mode 100644 libs/werkzeug/test.py create mode 100644 libs/werkzeug/testapp.py create mode 100644 libs/werkzeug/urls.py create mode 100644 libs/werkzeug/useragents.py create mode 100644 libs/werkzeug/utils.py create mode 100644 libs/werkzeug/wrappers.py create mode 100644 libs/werkzeug/wsgi.py create mode 100644 libs/xmg/__init__.py create mode 100644 libs/xmg/xmg.py diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index bfc1557..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "libs"] - path = libs - url = git://github.com/CouchPotato/Dependencies.git diff --git a/libs b/libs deleted file mode 160000 index cd3c6a1..0000000 --- a/libs +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cd3c6a199fcb201fb6a68170bc51b44f34de9dd5 diff --git a/libs/README.md b/libs/README.md new file mode 100644 index 0000000..49fbd34 --- /dev/null +++ b/libs/README.md @@ -0,0 +1,4 @@ +Dependencies +=========== + +Holds all dependencies that are required by CouchPotato. diff --git a/libs/__init__.py b/libs/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/libs/__init__.py @@ -0,0 +1 @@ + diff --git a/libs/apscheduler/__init__.py b/libs/apscheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/apscheduler/expressions.py b/libs/apscheduler/expressions.py new file mode 100644 index 0000000..d19754d --- /dev/null +++ b/libs/apscheduler/expressions.py @@ -0,0 +1,176 @@ +""" +This module contains the expressions applicable for CronTrigger's fields. +""" +from calendar import monthrange +import re + +from apscheduler.util import asint + +__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', + 'WeekdayPositionExpression') + +WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] + + +class AllExpression(object): + value_re = re.compile(r'\*(?:/(?P\d+))?$') + + def __init__(self, step=None): + self.step = asint(step) + if self.step == 0: + raise ValueError('Increment must be higher than 0') + + def get_next_value(self, date, field): + start = field.get_value(date) + minval = field.get_min(date) + maxval = field.get_max(date) + start = max(start, minval) + + if not self.step: + next = start + else: + distance_to_next = (self.step - (start - minval)) % self.step + next = start + distance_to_next + + if next <= maxval: + return next + + def __str__(self): + if self.step: + return '*/%d' % self.step + return '*' + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.step) + + +class RangeExpression(AllExpression): + value_re = re.compile( + r'(?P\d+)(?:-(?P\d+))?(?:/(?P\d+))?$') + + def __init__(self, first, last=None, step=None): + AllExpression.__init__(self, step) + first = asint(first) + last = asint(last) + if last is None and step is None: + last = first + if last is not None and first > last: + raise ValueError('The minimum value in a range must not be ' + 'higher than the maximum') + self.first = first + self.last = last + + def get_next_value(self, date, field): + start = field.get_value(date) + minval = field.get_min(date) + maxval = field.get_max(date) + + # Apply range limits + minval = max(minval, self.first) + if self.last is not None: + maxval = min(maxval, self.last) + start = max(start, minval) + + if not self.step: + next = start + else: + distance_to_next = (self.step - (start - minval)) % self.step + next = start + distance_to_next + + if next <= maxval: + return next + + def __str__(self): + if self.last != self.first and self.last is not None: + range = '%d-%d' % (self.first, self.last) + else: + range = str(self.first) + + if self.step: + return '%s/%d' % (range, self.step) + return range + + def __repr__(self): + args = [str(self.first)] + if self.last != self.first and self.last is not None or self.step: + args.append(str(self.last)) + if self.step: + args.append(str(self.step)) + return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) + + +class WeekdayRangeExpression(RangeExpression): + value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', + re.IGNORECASE) + + def __init__(self, first, last=None): + try: + first_num = WEEKDAYS.index(first.lower()) + except ValueError: + raise ValueError('Invalid weekday name "%s"' % first) + + if last: + try: + last_num = WEEKDAYS.index(last.lower()) + except ValueError: + raise ValueError('Invalid weekday name "%s"' % last) + else: + last_num = None + + RangeExpression.__init__(self, first_num, last_num) + + def __str__(self): + if self.last != self.first and self.last is not None: + return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) + return WEEKDAYS[self.first] + + def __repr__(self): + args = ["'%s'" % WEEKDAYS[self.first]] + if self.last != self.first and self.last is not None: + args.append("'%s'" % WEEKDAYS[self.last]) + return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) + + +class WeekdayPositionExpression(AllExpression): + options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] + value_re = re.compile(r'(?P%s) +(?P(?:\d+|\w+))' + % '|'.join(options), re.IGNORECASE) + + def __init__(self, option_name, weekday_name): + try: + self.option_num = self.options.index(option_name.lower()) + except ValueError: + raise ValueError('Invalid weekday position "%s"' % option_name) + + try: + self.weekday = WEEKDAYS.index(weekday_name.lower()) + except ValueError: + raise ValueError('Invalid weekday name "%s"' % weekday_name) + + def get_next_value(self, date, field): + # Figure out the weekday of the month's first day and the number + # of days in that month + first_day_wday, last_day = monthrange(date.year, date.month) + + # Calculate which day of the month is the first of the target weekdays + first_hit_day = self.weekday - first_day_wday + 1 + if first_hit_day <= 0: + first_hit_day += 7 + + # Calculate what day of the month the target weekday would be + if self.option_num < 5: + target_day = first_hit_day + self.option_num * 7 + else: + target_day = first_hit_day + ((last_day - first_hit_day) / 7) * 7 + + if target_day <= last_day and target_day >= date.day: + return target_day + + def __str__(self): + return '%s %s' % (self.options[self.option_num], + WEEKDAYS[self.weekday]) + + def __repr__(self): + return "%s('%s', '%s')" % (self.__class__.__name__, + self.options[self.option_num], + WEEKDAYS[self.weekday]) diff --git a/libs/apscheduler/fields.py b/libs/apscheduler/fields.py new file mode 100644 index 0000000..ebc35f5 --- /dev/null +++ b/libs/apscheduler/fields.py @@ -0,0 +1,92 @@ +""" +Fields represent :class:`~apscheduler.triggers.CronTrigger` options which map +to :class:`~datetime.datetime` fields. +""" +from calendar import monthrange + +from apscheduler.expressions import * + +__all__ = ('BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField') + +MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, + 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0} +MAX_VALUES = {'year': 2 ** 63, 'month': 12, 'day:': 31, 'week': 53, + 'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59} + +class BaseField(object): + REAL = True + COMPILERS = [AllExpression, RangeExpression] + + def __init__(self, name, exprs): + self.name = name + self.compile_expressions(exprs) + + def get_min(self, dateval): + return MIN_VALUES[self.name] + + def get_max(self, dateval): + return MAX_VALUES[self.name] + + def get_value(self, dateval): + return getattr(dateval, self.name) + + def get_next_value(self, dateval): + smallest = None + for expr in self.expressions: + value = expr.get_next_value(dateval, self) + if smallest is None or (value is not None and value < smallest): + smallest = value + + return smallest + + def compile_expressions(self, exprs): + self.expressions = [] + + # Split a comma-separated expression list, if any + exprs = str(exprs).strip() + if ',' in exprs: + for expr in exprs.split(','): + self.compile_expression(expr) + else: + self.compile_expression(exprs) + + def compile_expression(self, expr): + for compiler in self.COMPILERS: + match = compiler.value_re.match(expr) + if match: + compiled_expr = compiler(**match.groupdict()) + self.expressions.append(compiled_expr) + return + + raise ValueError('Unrecognized expression "%s" for field "%s"' % + (expr, self.name)) + + def __str__(self): + expr_strings = (str(e) for e in self.expressions) + return ','.join(expr_strings) + + def __repr__(self): + return "%s('%s', '%s')" % (self.__class__.__name__, self.name, + str(self)) + + +class WeekField(BaseField): + REAL = False + + def get_value(self, dateval): + return dateval.isocalendar()[1] + + +class DayOfMonthField(BaseField): + COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression] + + def get_max(self, dateval): + return monthrange(dateval.year, dateval.month)[1] + + +class DayOfWeekField(BaseField): + REAL = False + COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] + + def get_value(self, dateval): + return dateval.weekday() diff --git a/libs/apscheduler/scheduler.py b/libs/apscheduler/scheduler.py new file mode 100644 index 0000000..cc50f49 --- /dev/null +++ b/libs/apscheduler/scheduler.py @@ -0,0 +1,407 @@ +""" +This module is the main part of the library, and is the only module that +regular users should be concerned with. +""" +from threading import Thread, Event, Lock +from datetime import datetime, timedelta +from logging import getLogger +import os + +from apscheduler.util import time_difference, asbool +from apscheduler.triggers import DateTrigger, IntervalTrigger, CronTrigger + + +logger = getLogger(__name__) + + +class Job(object): + """ + Represents a task scheduled in the scheduler. + """ + + def __init__(self, trigger, func, args, kwargs): + self.thread = None + self.trigger = trigger + self.func = func + self.args = args + self.kwargs = kwargs + if hasattr(func, '__name__'): + self.name = func.__name__ + else: + self.name = str(func) + + def run(self): + """ + Starts the execution of this job in a separate thread. + """ + if (self.thread and self.thread.isAlive()): + logger.info('Skipping run of job %s (previously triggered ' + 'instance is still running)', self) + else: + self.thread = Thread(target=self.run_in_thread) + self.thread.setDaemon(False) + self.thread.start() + + def run_in_thread(self): + """ + Runs the associated callable. + This method is executed in a dedicated thread. + """ + try: + self.func(*self.args, **self.kwargs) + except: + logger.exception('Error executing job "%s"', self) + raise + + def __str__(self): + return '%s: %s' % (self.name, repr(self.trigger)) + + def __repr__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self.name, + repr(self.trigger)) + + +class SchedulerShutdownError(Exception): + """ + Thrown when attempting to use the scheduler after + it's been shut down. + """ + + def __init__(self): + Exception.__init__(self, 'Scheduler has already been shut down') + + +class SchedulerAlreadyRunningError(Exception): + """ + Thrown when attempting to start the scheduler, but it's already running. + """ + + def __init__(self): + Exception.__init__(self, 'Scheduler is already running') + + +class Scheduler(object): + """ + This class is responsible for scheduling jobs and triggering + their execution. + """ + + stopped = False + thread = None + misfire_grace_time = 1 + daemonic = True + + def __init__(self, **config): + self.jobs = [] + self.jobs_lock = Lock() + self.wakeup = Event() + self.configure(config) + + def configure(self, config): + """ + Updates the configuration with the given options. + """ + for key, val in config.items(): + if key.startswith('apscheduler.'): + key = key[12:] + if key == 'misfire_grace_time': + self.misfire_grace_time = int(val) + elif key == 'daemonic': + self.daemonic = asbool(val) + + def start(self): + """ + Starts the scheduler in a new thread. + """ + if self.thread and self.thread.isAlive(): + raise SchedulerAlreadyRunningError + + self.stopped = False + self.thread = Thread(target=self.run, name='APScheduler') + self.thread.setDaemon(self.daemonic) + self.thread.start() + logger.info('Scheduler started') + + def shutdown(self, timeout=0): + """ + Shuts down the scheduler and terminates the thread. + Does not terminate any currently running jobs. + + :param timeout: time (in seconds) to wait for the scheduler thread to + terminate, 0 to wait forever, None to skip waiting + """ + if self.stopped or not self.thread.isAlive(): + return + + logger.info('Scheduler shutting down') + self.stopped = True + self.wakeup.set() + if timeout is not None: + self.thread.join(timeout) + self.jobs = [] + + def cron_schedule(self, year='*', month='*', day='*', week='*', + day_of_week='*', hour='*', minute='*', second='*', + args=None, kwargs=None): + """ + Decorator that causes its host function to be scheduled + according to the given parameters. + This decorator does not wrap its host function. + The scheduled function will be called without any arguments. + See :meth:`add_cron_job` for more information. + """ + def inner(func): + self.add_cron_job(func, year, month, day, week, day_of_week, hour, + minute, second, args, kwargs) + return func + return inner + + def interval_schedule(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, + start_date=None, repeat=0, args=None, kwargs=None): + """ + Decorator that causes its host function to be scheduled + for execution on specified intervals. + This decorator does not wrap its host function. + The scheduled function will be called without any arguments. + Note that the default repeat value is 0, which means to repeat forever. + See :meth:`add_delayed_job` for more information. + """ + def inner(func): + self.add_interval_job(func, weeks, days, hours, minutes, seconds, + start_date, repeat, args, kwargs) + return func + return inner + + def _add_job(self, trigger, func, args, kwargs): + """ + Adds a Job to the job list and notifies the scheduler thread. + + :param trigger: trigger for the given callable + :param args: list of positional arguments to call func with + :param kwargs: dict of keyword arguments to call func with + :return: the scheduled job + :rtype: Job + """ + if self.stopped: + raise SchedulerShutdownError + if not hasattr(func, '__call__'): + raise TypeError('func must be callable') + + if args is None: + args = [] + if kwargs is None: + kwargs = {} + + job = Job(trigger, func, args, kwargs) + self.jobs_lock.acquire() + try: + self.jobs.append(job) + finally: + self.jobs_lock.release() + logger.info('Added job "%s"', job) + + # Notify the scheduler about the new job + self.wakeup.set() + + return job + + def add_date_job(self, func, date, args=None, kwargs=None): + """ + Adds a job to be completed on a specific date and time. + + :param func: callable to run + :param args: positional arguments to call func with + :param kwargs: keyword arguments to call func with + """ + trigger = DateTrigger(date) + return self._add_job(trigger, func, args, kwargs) + + def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0, + seconds=0, start_date=None, repeat=0, args=None, + kwargs=None): + """ + Adds a job to be completed on specified intervals. + + :param func: callable to run + :param weeks: number of weeks to wait + :param days: number of days to wait + :param hours: number of hours to wait + :param minutes: number of minutes to wait + :param seconds: number of seconds to wait + :param start_date: when to first execute the job and start the + counter (default is after the given interval) + :param repeat: number of times the job will be run (0 = repeat + indefinitely) + :param args: list of positional arguments to call func with + :param kwargs: dict of keyword arguments to call func with + """ + interval = timedelta(weeks=weeks, days=days, hours=hours, + minutes=minutes, seconds=seconds) + trigger = IntervalTrigger(interval, repeat, start_date) + return self._add_job(trigger, func, args, kwargs) + + def add_cron_job(self, func, year='*', month='*', day='*', week='*', + day_of_week='*', hour='*', minute='*', second='*', + args=None, kwargs=None): + """ + Adds a job to be completed on times that match the given expressions. + + :param func: callable to run + :param year: year to run on + :param month: month to run on (0 = January) + :param day: day of month to run on + :param week: week of the year to run on + :param day_of_week: weekday to run on (0 = Monday) + :param hour: hour to run on + :param second: second to run on + :param args: list of positional arguments to call func with + :param kwargs: dict of keyword arguments to call func with + :return: the scheduled job + :rtype: Job + """ + trigger = CronTrigger(year=year, month=month, day=day, week=week, + day_of_week=day_of_week, hour=hour, + minute=minute, second=second) + return self._add_job(trigger, func, args, kwargs) + + def is_job_active(self, job): + """ + Determines if the given job is still on the job list. + + :return: True if the job is still active, False if not + """ + self.jobs_lock.acquire() + try: + return job in self.jobs + finally: + self.jobs_lock.release() + + def unschedule_job(self, job): + """ + Removes a job, preventing it from being fired any more. + """ + self.jobs_lock.acquire() + try: + self.jobs.remove(job) + finally: + self.jobs_lock.release() + logger.info('Removed job "%s"', job) + self.wakeup.set() + + def unschedule_func(self, func): + """ + Removes all jobs that would execute the given function. + """ + self.jobs_lock.acquire() + try: + remove_list = [job for job in self.jobs if job.func == func] + for job in remove_list: + self.jobs.remove(job) + logger.info('Removed job "%s"', job) + finally: + self.jobs_lock.release() + + # Have the scheduler calculate a new wakeup time + self.wakeup.set() + + def dump_jobs(self): + """ + Gives a textual listing of all jobs currently scheduled on this + scheduler. + + :rtype: str + """ + job_strs = [] + now = datetime.now() + self.jobs_lock.acquire() + try: + for job in self.jobs: + next_fire_time = job.trigger.get_next_fire_time(now) + job_str = '%s (next fire time: %s)' % (str(job), + next_fire_time) + job_strs.append(job_str) + finally: + self.jobs_lock.release() + + if job_strs: + return os.linesep.join(job_strs) + return 'No jobs currently scheduled.' + + def _get_next_wakeup_time(self, now): + """ + Determines the time of the next job execution, and removes finished + jobs. + + :param now: the result of datetime.now(), generated elsewhere for + consistency. + """ + next_wakeup = None + finished_jobs = [] + + self.jobs_lock.acquire() + try: + for job in self.jobs: + next_run = job.trigger.get_next_fire_time(now) + if next_run is None: + finished_jobs.append(job) + elif next_run and (next_wakeup is None or \ + next_run < next_wakeup): + next_wakeup = next_run + + # Clear out any finished jobs + for job in finished_jobs: + self.jobs.remove(job) + logger.info('Removed finished job "%s"', job) + finally: + self.jobs_lock.release() + + return next_wakeup + + def _get_current_jobs(self): + """ + Determines which jobs should be executed right now. + """ + current_jobs = [] + now = datetime.now() + start = now - timedelta(seconds=self.misfire_grace_time) + + self.jobs_lock.acquire() + try: + for job in self.jobs: + next_run = job.trigger.get_next_fire_time(start) + if next_run: + time_diff = time_difference(now, next_run) + if next_run < now and time_diff <= self.misfire_grace_time: + current_jobs.append(job) + finally: + self.jobs_lock.release() + + return current_jobs + + def run(self): + """ + Runs the main loop of the scheduler. + """ + self.wakeup.clear() + while not self.stopped: + # Execute any jobs scheduled to be run right now + for job in self._get_current_jobs(): + logger.debug('Executing job "%s"', job) + job.run() + + # Figure out when the next job should be run, and + # adjust the wait time accordingly + now = datetime.now() + next_wakeup_time = self._get_next_wakeup_time(now) + + # Sleep until the next job is scheduled to be run, + # or a new job is added, or the scheduler is stopped + if next_wakeup_time is not None: + wait_seconds = time_difference(next_wakeup_time, now) + logger.debug('Next wakeup is due at %s (in %f seconds)', + next_wakeup_time, wait_seconds) + self.wakeup.wait(wait_seconds) + else: + logger.debug('No jobs; waiting until a job is added') + self.wakeup.wait() + self.wakeup.clear() diff --git a/libs/apscheduler/triggers.py b/libs/apscheduler/triggers.py new file mode 100644 index 0000000..9886b2b --- /dev/null +++ b/libs/apscheduler/triggers.py @@ -0,0 +1,171 @@ +""" +Triggers determine the times when a job should be executed. +""" +from datetime import datetime, timedelta +from math import ceil + +from apscheduler.fields import * +from apscheduler.util import * + +__all__ = ('CronTrigger', 'DateTrigger', 'IntervalTrigger') + + +class CronTrigger(object): + FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', + 'minute', 'second') + FIELDS_MAP = {'year': BaseField, + 'month': BaseField, + 'week': WeekField, + 'day': DayOfMonthField, + 'day_of_week': DayOfWeekField, + 'hour': BaseField, + 'minute': BaseField, + 'second': BaseField} + + def __init__(self, **values): + self.fields = [] + for field_name in self.FIELD_NAMES: + exprs = values.get(field_name) or '*' + field_class = self.FIELDS_MAP[field_name] + field = field_class(field_name, exprs) + self.fields.append(field) + + def _increment_field_value(self, dateval, fieldnum): + """ + Increments the designated field and resets all less significant fields + to their minimum values. + + :type dateval: datetime + :type fieldnum: int + :type amount: int + :rtype: tuple + :return: a tuple containing the new date, and the number of the field + that was actually incremented + """ + i = 0 + values = {} + while i < len(self.fields): + field = self.fields[i] + if not field.REAL: + if i == fieldnum: + fieldnum -= 1 + i -= 1 + else: + i += 1 + continue + + if i < fieldnum: + values[field.name] = field.get_value(dateval) + i += 1 + elif i > fieldnum: + values[field.name] = field.get_min(dateval) + i += 1 + else: + value = field.get_value(dateval) + maxval = field.get_max(dateval) + if value == maxval: + fieldnum -= 1 + i -= 1 + else: + values[field.name] = value + 1 + i += 1 + + return datetime(**values), fieldnum + + def _set_field_value(self, dateval, fieldnum, new_value): + values = {} + for i, field in enumerate(self.fields): + if field.REAL: + if i < fieldnum: + values[field.name] = field.get_value(dateval) + elif i > fieldnum: + values[field.name] = field.get_min(dateval) + else: + values[field.name] = new_value + + return datetime(**values) + + def get_next_fire_time(self, start_date): + next_date = datetime_ceil(start_date) + fieldnum = 0 + while 0 <= fieldnum < len(self.fields): + field = self.fields[fieldnum] + curr_value = field.get_value(next_date) + next_value = field.get_next_value(next_date) + + if next_value is None: + # No valid value was found + next_date, fieldnum = self._increment_field_value(next_date, + fieldnum - 1) + elif next_value > curr_value: + # A valid, but higher than the starting value, was found + if field.REAL: + next_date = self._set_field_value(next_date, fieldnum, + next_value) + fieldnum += 1 + else: + next_date, fieldnum = self._increment_field_value(next_date, + fieldnum) + else: + # A valid value was found, no changes necessary + fieldnum += 1 + + if fieldnum >= 0: + return next_date + + def __repr__(self): + field_reprs = ("%s='%s'" % (f.name, str(f)) for f in self.fields + if str(f) != '*') + return '%s(%s)' % (self.__class__.__name__, ', '.join(field_reprs)) + + +class DateTrigger(object): + def __init__(self, run_date): + self.run_date = convert_to_datetime(run_date) + + def get_next_fire_time(self, start_date): + if self.run_date >= start_date: + return self.run_date + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, repr(self.run_date)) + + +class IntervalTrigger(object): + def __init__(self, interval, repeat, start_date=None): + if not isinstance(interval, timedelta): + raise TypeError('interval must be a timedelta') + if repeat < 0: + raise ValueError('Illegal value for repeat; expected >= 0, ' + 'received %s' % repeat) + + self.interval = interval + self.interval_length = timedelta_seconds(self.interval) + if self.interval_length == 0: + self.interval = timedelta(seconds=1) + self.interval_length = 1 + self.repeat = repeat + if start_date is None: + self.first_fire_date = datetime.now() + self.interval + else: + self.first_fire_date = convert_to_datetime(start_date) + self.first_fire_date -= timedelta(microseconds=\ + self.first_fire_date.microsecond) + if repeat > 0: + self.last_fire_date = self.first_fire_date + interval * (repeat - 1) + else: + self.last_fire_date = None + + def get_next_fire_time(self, start_date): + if start_date < self.first_fire_date: + return self.first_fire_date + if self.last_fire_date and start_date > self.last_fire_date: + return None + timediff_seconds = timedelta_seconds(start_date - self.first_fire_date) + next_interval_num = int(ceil(timediff_seconds / self.interval_length)) + return self.first_fire_date + self.interval * next_interval_num + + def __repr__(self): + return "%s(interval=%s, repeat=%d, start_date=%s)" % ( + self.__class__.__name__, repr(self.interval), self.repeat, + repr(self.first_fire_date)) diff --git a/libs/apscheduler/util.py b/libs/apscheduler/util.py new file mode 100644 index 0000000..7dfc767 --- /dev/null +++ b/libs/apscheduler/util.py @@ -0,0 +1,91 @@ +""" +This module contains several handy functions primarily meant for internal use. +""" + +from datetime import date, datetime, timedelta +from time import mktime + +__all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds', + 'time_difference', 'datetime_ceil') + + +def asint(text): + """ + Safely converts a string to an integer, returning None if the string + is None. + + :type text: str + :rtype: int + """ + if text is not None: + return int(text) + + +def asbool(obj): + """ + Interprets an object as a boolean value. + + :rtype: bool + """ + if isinstance(obj, str): + obj = obj.strip().lower() + if obj in ('true', 'yes', 'on', 'y', 't', '1'): + return True + if obj in ('false', 'no', 'off', 'n', 'f', '0'): + return False + raise ValueError('Unable to interpret value "%s" as boolean' % obj) + return bool(obj) + + +def convert_to_datetime(dateval): + """ + Converts a date object to a datetime object. + If an actual datetime object is passed, it is returned unmodified. + + :type dateval: date + :rtype: datetime + """ + if isinstance(dateval, datetime): + return dateval + elif isinstance(dateval, date): + return datetime.fromordinal(dateval.toordinal()) + raise TypeError('Expected date, got %s instead' % type(dateval)) + + +def timedelta_seconds(delta): + """ + Converts the given timedelta to seconds. + + :type delta: timedelta + :rtype: float + """ + return delta.days * 24 * 60 * 60 + delta.seconds + \ + delta.microseconds / 1000000.0 + + +def time_difference(date1, date2): + """ + Returns the time difference in seconds between the given two + datetime objects. The difference is calculated as: date1 - date2. + + :param date1: the later datetime + :type date1: datetime + :param date2: the earlier datetime + :type date2: datetime + :rtype: float + """ + later = mktime(date1.timetuple()) + earlier = mktime(date2.timetuple()) + return int(later - earlier) + + +def datetime_ceil(dateval): + """ + Rounds the given datetime object upwards. + + :type dateval: datetime + """ + if dateval.microsecond > 0: + return dateval + timedelta(seconds=1, + microseconds=-dateval.microsecond) + return dateval diff --git a/libs/argparse.py b/libs/argparse.py new file mode 100644 index 0000000..a060129 --- /dev/null +++ b/libs/argparse.py @@ -0,0 +1,2353 @@ +# -*- coding: utf-8 -*- + +# Copyright © 2006-2009 Steven J. Bethard . +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +""" + +__version__ = '1.1' +__all__ = [ + 'ArgumentParser', + 'ArgumentError', + 'Namespace', + 'Action', + 'FileType', + 'HelpFormatter', + 'RawDescriptionHelpFormatter', + 'RawTextHelpFormatter', + 'ArgumentDefaultsHelpFormatter', +] + + +import copy as _copy +import os as _os +import re as _re +import sys as _sys +import textwrap as _textwrap + +from gettext import gettext as _ + +try: + _set = set +except NameError: + from sets import Set as _set + +try: + _basestring = basestring +except NameError: + _basestring = str + +try: + _sorted = sorted +except NameError: + + def _sorted(iterable, reverse=False): + result = list(iterable) + result.sort() + if reverse: + result.reverse() + return result + + +def _callable(obj): + return hasattr(obj, '__call__') or hasattr(obj, '__bases__') + +# silence Python 2.6 buggy warnings about Exception.message +if _sys.version_info[:2] == (2, 6): + import warnings + warnings.filterwarnings( + action='ignore', + message='BaseException.message has been deprecated as of Python 2.6', + category=DeprecationWarning, + module='argparse') + + +SUPPRESS = '==SUPPRESS==' + +OPTIONAL = '?' +ZERO_OR_MORE = '*' +ONE_OR_MORE = '+' +PARSER = 'A...' +REMAINDER = '...' + +# ============================= +# Utility functions and classes +# ============================= + +class _AttributeHolder(object): + """Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + """ + + def __repr__(self): + type_name = type(self).__name__ + arg_strings = [] + for arg in self._get_args(): + arg_strings.append(repr(arg)) + for name, value in self._get_kwargs(): + arg_strings.append('%s=%r' % (name, value)) + return '%s(%s)' % (type_name, ', '.join(arg_strings)) + + def _get_kwargs(self): + return _sorted(self.__dict__.items()) + + def _get_args(self): + return [] + + +def _ensure_value(namespace, name, value): + if getattr(namespace, name, None) is None: + setattr(namespace, name, value) + return getattr(namespace, name) + + +# =============== +# Formatting Help +# =============== + +class HelpFormatter(object): + """Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def __init__(self, + prog, + indent_increment=2, + max_help_position=24, + width=None): + + # default setting for width + if width is None: + try: + width = int(_os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + + self._prog = prog + self._indent_increment = indent_increment + self._max_help_position = max_help_position + self._width = width + + self._current_indent = 0 + self._level = 0 + self._action_max_length = 0 + + self._root_section = self._Section(self, None) + self._current_section = self._root_section + + self._whitespace_matcher = _re.compile(r'\s+') + self._long_break_matcher = _re.compile(r'\n\n\n+') + + # =============================== + # Section and indentation methods + # =============================== + def _indent(self): + self._current_indent += self._indent_increment + self._level += 1 + + def _dedent(self): + self._current_indent -= self._indent_increment + assert self._current_indent >= 0, 'Indent decreased below 0.' + self._level -= 1 + + class _Section(object): + + def __init__(self, formatter, parent, heading=None): + self.formatter = formatter + self.parent = parent + self.heading = heading + self.items = [] + + def format_help(self): + # format the indented section + if self.parent is not None: + self.formatter._indent() + join = self.formatter._join_parts + for func, args in self.items: + func(*args) + item_help = join([func(*args) for func, args in self.items]) + if self.parent is not None: + self.formatter._dedent() + + # return nothing if the section was empty + if not item_help: + return '' + + # add the heading if the section was non-empty + if self.heading is not SUPPRESS and self.heading is not None: + current_indent = self.formatter._current_indent + heading = '%*s%s:\n' % (current_indent, '', self.heading) + else: + heading = '' + + # join the section-initial newline, the heading and the help + return join(['\n', heading, item_help, '\n']) + + def _add_item(self, func, args): + self._current_section.items.append((func, args)) + + # ======================== + # Message building methods + # ======================== + def start_section(self, heading): + self._indent() + section = self._Section(self, self._current_section, heading) + self._add_item(section.format_help, []) + self._current_section = section + + def end_section(self): + self._current_section = self._current_section.parent + self._dedent() + + def add_text(self, text): + if text is not SUPPRESS and text is not None: + self._add_item(self._format_text, [text]) + + def add_usage(self, usage, actions, groups, prefix=None): + if usage is not SUPPRESS: + args = usage, actions, groups, prefix + self._add_item(self._format_usage, args) + + def add_argument(self, action): + if action.help is not SUPPRESS: + + # find all invocations + get_invocation = self._format_action_invocation + invocations = [get_invocation(action)] + for subaction in self._iter_indented_subactions(action): + invocations.append(get_invocation(subaction)) + + # update the maximum item length + invocation_length = max([len(s) for s in invocations]) + action_length = invocation_length + self._current_indent + self._action_max_length = max(self._action_max_length, + action_length) + + # add the item to the list + self._add_item(self._format_action, [action]) + + def add_arguments(self, actions): + for action in actions: + self.add_argument(action) + + # ======================= + # Help-formatting methods + # ======================= + def format_help(self): + help = self._root_section.format_help() + if help: + help = self._long_break_matcher.sub('\n\n', help) + help = help.strip('\n') + '\n' + return help + + def _join_parts(self, part_strings): + return ''.join([part + for part in part_strings + if part and part is not SUPPRESS]) + + def _format_usage(self, usage, actions, groups, prefix): + if prefix is None: + prefix = _('usage: ') + + # if usage is specified, use that + if usage is not None: + usage = usage % dict(prog=self._prog) + + # if no optionals or positionals are available, usage is just prog + elif usage is None and not actions: + usage = '%(prog)s' % dict(prog=self._prog) + + # if optionals and positionals are available, calculate usage + elif usage is None: + prog = '%(prog)s' % dict(prog=self._prog) + + # split optionals from positionals + optionals = [] + positionals = [] + for action in actions: + if action.option_strings: + optionals.append(action) + else: + positionals.append(action) + + # build full usage string + format = self._format_actions_usage + action_usage = format(optionals + positionals, groups) + usage = ' '.join([s for s in [prog, action_usage] if s]) + + # wrap the usage parts if it's too long + text_width = self._width - self._current_indent + if len(prefix) + len(usage) > text_width: + + # break usage into wrappable parts + part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' + opt_usage = format(optionals, groups) + pos_usage = format(positionals, groups) + opt_parts = _re.findall(part_regexp, opt_usage) + pos_parts = _re.findall(part_regexp, pos_usage) + assert ' '.join(opt_parts) == opt_usage + assert ' '.join(pos_parts) == pos_usage + + # helper for wrapping lines + def get_lines(parts, indent, prefix=None): + lines = [] + line = [] + if prefix is not None: + line_len = len(prefix) - 1 + else: + line_len = len(indent) - 1 + for part in parts: + if line_len + 1 + len(part) > text_width: + lines.append(indent + ' '.join(line)) + line = [] + line_len = len(indent) - 1 + line.append(part) + line_len += len(part) + 1 + if line: + lines.append(indent + ' '.join(line)) + if prefix is not None: + lines[0] = lines[0][len(indent):] + return lines + + # if prog is short, follow it with optionals or positionals + if len(prefix) + len(prog) <= 0.75 * text_width: + indent = ' ' * (len(prefix) + len(prog) + 1) + if opt_parts: + lines = get_lines([prog] + opt_parts, indent, prefix) + lines.extend(get_lines(pos_parts, indent)) + elif pos_parts: + lines = get_lines([prog] + pos_parts, indent, prefix) + else: + lines = [prog] + + # if prog is long, put it on its own line + else: + indent = ' ' * len(prefix) + parts = opt_parts + pos_parts + lines = get_lines(parts, indent) + if len(lines) > 1: + lines = [] + lines.extend(get_lines(opt_parts, indent)) + lines.extend(get_lines(pos_parts, indent)) + lines = [prog] + lines + + # join lines into usage + usage = '\n'.join(lines) + + # prefix with 'usage:' + return '%s%s\n\n' % (prefix, usage) + + def _format_actions_usage(self, actions, groups): + # find group indices and identify actions in groups + group_actions = _set() + inserts = {} + for group in groups: + try: + start = actions.index(group._group_actions[0]) + except ValueError: + continue + else: + end = start + len(group._group_actions) + if actions[start:end] == group._group_actions: + for action in group._group_actions: + group_actions.add(action) + if not group.required: + inserts[start] = '[' + inserts[end] = ']' + else: + inserts[start] = '(' + inserts[end] = ')' + for i in range(start + 1, end): + inserts[i] = '|' + + # collect all actions format strings + parts = [] + for i, action in enumerate(actions): + + # suppressed arguments are marked with None + # remove | separators for suppressed arguments + if action.help is SUPPRESS: + parts.append(None) + if inserts.get(i) == '|': + inserts.pop(i) + elif inserts.get(i + 1) == '|': + inserts.pop(i + 1) + + # produce all arg strings + elif not action.option_strings: + part = self._format_args(action, action.dest) + + # if it's in a group, strip the outer [] + if action in group_actions: + if part[0] == '[' and part[-1] == ']': + part = part[1:-1] + + # add the action string to the list + parts.append(part) + + # produce the first way to invoke the option in brackets + else: + option_string = action.option_strings[0] + + # if the Optional doesn't take a value, format is: + # -s or --long + if action.nargs == 0: + part = '%s' % option_string + + # if the Optional takes a value, format is: + # -s ARGS or --long ARGS + else: + default = action.dest.upper() + args_string = self._format_args(action, default) + part = '%s %s' % (option_string, args_string) + + # make it look optional if it's not required or in a group + if not action.required and action not in group_actions: + part = '[%s]' % part + + # add the action string to the list + parts.append(part) + + # insert things at the necessary indices + for i in _sorted(inserts, reverse=True): + parts[i:i] = [inserts[i]] + + # join all the action items with spaces + text = ' '.join([item for item in parts if item is not None]) + + # clean up separators for mutually exclusive groups + open = r'[\[(]' + close = r'[\])]' + text = _re.sub(r'(%s) ' % open, r'\1', text) + text = _re.sub(r' (%s)' % close, r'\1', text) + text = _re.sub(r'%s *%s' % (open, close), r'', text) + text = _re.sub(r'\(([^|]*)\)', r'\1', text) + text = text.strip() + + # return the text + return text + + def _format_text(self, text): + if '%(prog)' in text: + text = text % dict(prog=self._prog) + text_width = self._width - self._current_indent + indent = ' ' * self._current_indent + return self._fill_text(text, text_width, indent) + '\n\n' + + def _format_action(self, action): + # determine the required width and the entry label + help_position = min(self._action_max_length + 2, + self._max_help_position) + help_width = self._width - help_position + action_width = help_position - self._current_indent - 2 + action_header = self._format_action_invocation(action) + + # ho nelp; start on same line and add a final newline + if not action.help: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + + # short action name; start on the same line and pad two spaces + elif len(action_header) <= action_width: + tup = self._current_indent, '', action_width, action_header + action_header = '%*s%-*s ' % tup + indent_first = 0 + + # long action name; start on the next line + else: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + indent_first = help_position + + # collect the pieces of the action help + parts = [action_header] + + # if there was help for the action, add lines of help text + if action.help: + help_text = self._expand_help(action) + help_lines = self._split_lines(help_text, help_width) + parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) + for line in help_lines[1:]: + parts.append('%*s%s\n' % (help_position, '', line)) + + # or add a newline if the description doesn't end with one + elif not action_header.endswith('\n'): + parts.append('\n') + + # if there are any sub-actions, add their help as well + for subaction in self._iter_indented_subactions(action): + parts.append(self._format_action(subaction)) + + # return a single string + return self._join_parts(parts) + + def _format_action_invocation(self, action): + if not action.option_strings: + metavar, = self._metavar_formatter(action, action.dest)(1) + return metavar + + else: + parts = [] + + # if the Optional doesn't take a value, format is: + # -s, --long + if action.nargs == 0: + parts.extend(action.option_strings) + + # if the Optional takes a value, format is: + # -s ARGS, --long ARGS + else: + default = action.dest.upper() + args_string = self._format_args(action, default) + for option_string in action.option_strings: + parts.append('%s %s' % (option_string, args_string)) + + return ', '.join(parts) + + def _metavar_formatter(self, action, default_metavar): + if action.metavar is not None: + result = action.metavar + elif action.choices is not None: + choice_strs = [str(choice) for choice in action.choices] + result = '{%s}' % ','.join(choice_strs) + else: + result = default_metavar + + def format(tuple_size): + if isinstance(result, tuple): + return result + else: + return (result, ) * tuple_size + return format + + def _format_args(self, action, default_metavar): + get_metavar = self._metavar_formatter(action, default_metavar) + if action.nargs is None: + result = '%s' % get_metavar(1) + elif action.nargs == OPTIONAL: + result = '[%s]' % get_metavar(1) + elif action.nargs == ZERO_OR_MORE: + result = '[%s [%s ...]]' % get_metavar(2) + elif action.nargs == ONE_OR_MORE: + result = '%s [%s ...]' % get_metavar(2) + elif action.nargs == REMAINDER: + result = '...' + elif action.nargs == PARSER: + result = '%s ...' % get_metavar(1) + else: + formats = ['%s' for _ in range(action.nargs)] + result = ' '.join(formats) % get_metavar(action.nargs) + return result + + def _expand_help(self, action): + params = dict(vars(action), prog=self._prog) + for name in list(params): + if params[name] is SUPPRESS: + del params[name] + for name in list(params): + if hasattr(params[name], '__name__'): + params[name] = params[name].__name__ + if params.get('choices') is not None: + choices_str = ', '.join([str(c) for c in params['choices']]) + params['choices'] = choices_str + return self._get_help_string(action) % params + + def _iter_indented_subactions(self, action): + try: + get_subactions = action._get_subactions + except AttributeError: + pass + else: + self._indent() + for subaction in get_subactions(): + yield subaction + self._dedent() + + def _split_lines(self, text, width): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.wrap(text, width) + + def _fill_text(self, text, width, indent): + text = self._whitespace_matcher.sub(' ', text).strip() + return _textwrap.fill(text, width, initial_indent=indent, + subsequent_indent=indent) + + def _get_help_string(self, action): + return action.help + + +class RawDescriptionHelpFormatter(HelpFormatter): + """Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _fill_text(self, text, width, indent): + return ''.join([indent + line for line in text.splitlines(True)]) + + +class RawTextHelpFormatter(RawDescriptionHelpFormatter): + """Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _split_lines(self, text, width): + return text.splitlines() + + +class ArgumentDefaultsHelpFormatter(HelpFormatter): + """Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_help_string(self, action): + help = action.help + if '%(default)' not in action.help: + if action.default is not SUPPRESS: + defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] + if action.option_strings or action.nargs in defaulting_nargs: + help += ' (default: %(default)s)' + return help + + +# ===================== +# Options and Arguments +# ===================== + +def _get_action_name(argument): + if argument is None: + return None + elif argument.option_strings: + return '/'.join(argument.option_strings) + elif argument.metavar not in (None, SUPPRESS): + return argument.metavar + elif argument.dest not in (None, SUPPRESS): + return argument.dest + else: + return None + + +class ArgumentError(Exception): + """An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + """ + + def __init__(self, argument, message): + self.argument_name = _get_action_name(argument) + self.message = message + + def __str__(self): + if self.argument_name is None: + format = '%(message)s' + else: + format = 'argument %(argument_name)s: %(message)s' + return format % dict(message=self.message, + argument_name=self.argument_name) + + +class ArgumentTypeError(Exception): + """An error from trying to convert a command line string to a type.""" + pass + + +# ============== +# Action classes +# ============== + +class Action(_AttributeHolder): + """Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- The type which the command-line arguments should be converted + to, should be one of 'string', 'int', 'float', 'complex' or a + callable object that accepts a single string argument. If None, + 'string' is assumed. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + """ + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + self.option_strings = option_strings + self.dest = dest + self.nargs = nargs + self.const = const + self.default = default + self.type = type + self.choices = choices + self.required = required + self.help = help + self.metavar = metavar + + def _get_kwargs(self): + names = [ + 'option_strings', + 'dest', + 'nargs', + 'const', + 'default', + 'type', + 'choices', + 'help', + 'metavar', + ] + return [(name, getattr(self, name)) for name in names] + + def __call__(self, parser, namespace, values, option_string=None): + raise NotImplementedError(_('.__call__() not defined')) + + +class _StoreAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for store actions must be > 0; if you ' + 'have nothing to store, actions such as store ' + 'true or store const may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_StoreAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_StoreConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=False, + required=False, + help=None): + super(_StoreTrueAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + required=required, + help=help) + + +class _StoreFalseAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=True, + required=False, + help=None): + super(_StoreFalseAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=False, + default=default, + required=required, + help=help) + + +class _AppendAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for append actions must be > 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_AppendAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(values) + setattr(namespace, self.dest, items) + + +class _AppendConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_AppendConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = _copy.copy(_ensure_value(namespace, self.dest, [])) + items.append(self.const) + setattr(namespace, self.dest, items) + + +class _CountAction(Action): + + def __init__(self, + option_strings, + dest, + default=None, + required=False, + help=None): + super(_CountAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = _ensure_value(namespace, self.dest, 0) + 1 + setattr(namespace, self.dest, new_count) + + +class _HelpAction(Action): + + def __init__(self, + option_strings, + dest=SUPPRESS, + default=SUPPRESS, + help=None): + super(_HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + parser.print_help() + parser.exit() + + +class _VersionAction(Action): + + def __init__(self, + option_strings, + version=None, + dest=SUPPRESS, + default=SUPPRESS, + help=None): + super(_VersionAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + self.version = version + + def __call__(self, parser, namespace, values, option_string=None): + version = self.version + if version is None: + version = parser.version + formatter = parser._get_formatter() + formatter.add_text(version) + parser.exit(message=formatter.format_help()) + + +class _SubParsersAction(Action): + + class _ChoicesPseudoAction(Action): + + def __init__(self, name, help): + sup = super(_SubParsersAction._ChoicesPseudoAction, self) + sup.__init__(option_strings=[], dest=name, help=help) + + def __init__(self, + option_strings, + prog, + parser_class, + dest=SUPPRESS, + help=None, + metavar=None): + + self._prog_prefix = prog + self._parser_class = parser_class + self._name_parser_map = {} + self._choices_actions = [] + + super(_SubParsersAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=PARSER, + choices=self._name_parser_map, + help=help, + metavar=metavar) + + def add_parser(self, name, **kwargs): + # set prog from the existing prefix + if kwargs.get('prog') is None: + kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + + # create a pseudo-action to hold the choice help + if 'help' in kwargs: + help = kwargs.pop('help') + choice_action = self._ChoicesPseudoAction(name, help) + self._choices_actions.append(choice_action) + + # create the parser and add it to the map + parser = self._parser_class(**kwargs) + self._name_parser_map[name] = parser + return parser + + def _get_subactions(self): + return self._choices_actions + + def __call__(self, parser, namespace, values, option_string=None): + parser_name = values[0] + arg_strings = values[1:] + + # set the parser name if requested + if self.dest is not SUPPRESS: + setattr(namespace, self.dest, parser_name) + + # select the parser + try: + parser = self._name_parser_map[parser_name] + except KeyError: + tup = parser_name, ', '.join(self._name_parser_map) + msg = _('unknown parser %r (choices: %s)' % tup) + raise ArgumentError(self, msg) + + # parse all the remaining options into the namespace + parser.parse_args(arg_strings, namespace) + + +# ============== +# Type classes +# ============== + +class FileType(object): + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + """ + + def __init__(self, mode='r', bufsize=None): + self._mode = mode + self._bufsize = bufsize + + def __call__(self, string): + # the special argument "-" means sys.std{in,out} + if string == '-': + if 'r' in self._mode: + return _sys.stdin + elif 'w' in self._mode: + return _sys.stdout + else: + msg = _('argument "-" with mode %r' % self._mode) + raise ValueError(msg) + + # all other arguments are used as file names + if self._bufsize: + return open(string, self._mode, self._bufsize) + else: + return open(string, self._mode) + + def __repr__(self): + args = [self._mode, self._bufsize] + args_str = ', '.join([repr(arg) for arg in args if arg is not None]) + return '%s(%s)' % (type(self).__name__, args_str) + +# =========================== +# Optional and Positional Parsing +# =========================== + +class Namespace(_AttributeHolder): + """Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + """ + + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + def __eq__(self, other): + return vars(self) == vars(other) + + def __ne__(self, other): + return not (self == other) + + def __contains__(self, key): + return key in self.__dict__ + + +class _ActionsContainer(object): + + def __init__(self, + description, + prefix_chars, + argument_default, + conflict_handler): + super(_ActionsContainer, self).__init__() + + self.description = description + self.argument_default = argument_default + self.prefix_chars = prefix_chars + self.conflict_handler = conflict_handler + + # set up registries + self._registries = {} + + # register actions + self.register('action', None, _StoreAction) + self.register('action', 'store', _StoreAction) + self.register('action', 'store_const', _StoreConstAction) + self.register('action', 'store_true', _StoreTrueAction) + self.register('action', 'store_false', _StoreFalseAction) + self.register('action', 'append', _AppendAction) + self.register('action', 'append_const', _AppendConstAction) + self.register('action', 'count', _CountAction) + self.register('action', 'help', _HelpAction) + self.register('action', 'version', _VersionAction) + self.register('action', 'parsers', _SubParsersAction) + + # raise an exception if the conflict handler is invalid + self._get_handler() + + # action storage + self._actions = [] + self._option_string_actions = {} + + # groups + self._action_groups = [] + self._mutually_exclusive_groups = [] + + # defaults storage + self._defaults = {} + + # determines whether an "option" looks like a negative number + self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') + + # whether or not there are any optionals that look like negative + # numbers -- uses a list so it can be shared and edited + self._has_negative_number_optionals = [] + + # ==================== + # Registration methods + # ==================== + def register(self, registry_name, value, object): + registry = self._registries.setdefault(registry_name, {}) + registry[value] = object + + def _registry_get(self, registry_name, value, default=None): + return self._registries[registry_name].get(value, default) + + # ================================== + # Namespace default accessor methods + # ================================== + def set_defaults(self, **kwargs): + self._defaults.update(kwargs) + + # if these defaults match any existing arguments, replace + # the previous default on the object with the new one + for action in self._actions: + if action.dest in kwargs: + action.default = kwargs[action.dest] + + def get_default(self, dest): + for action in self._actions: + if action.dest == dest and action.default is not None: + return action.default + return self._defaults.get(dest, None) + + + # ======================= + # Adding argument actions + # ======================= + def add_argument(self, *args, **kwargs): + """ + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + """ + + # if no positional args are supplied or only one is supplied and + # it doesn't look like an option string, parse a positional + # argument + chars = self.prefix_chars + if not args or len(args) == 1 and args[0][0] not in chars: + if args and 'dest' in kwargs: + raise ValueError('dest supplied twice for positional argument') + kwargs = self._get_positional_kwargs(*args, **kwargs) + + # otherwise, we're adding an optional argument + else: + kwargs = self._get_optional_kwargs(*args, **kwargs) + + # if no default was supplied, use the parser-level default + if 'default' not in kwargs: + dest = kwargs['dest'] + if dest in self._defaults: + kwargs['default'] = self._defaults[dest] + elif self.argument_default is not None: + kwargs['default'] = self.argument_default + + # create the action object, and add it to the parser + action_class = self._pop_action_class(kwargs) + if not _callable(action_class): + raise ValueError('unknown action "%s"' % action_class) + action = action_class(**kwargs) + + # raise an error if the action type is not callable + type_func = self._registry_get('type', action.type, action.type) + if not _callable(type_func): + raise ValueError('%r is not callable' % type_func) + + return self._add_action(action) + + def add_argument_group(self, *args, **kwargs): + group = _ArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group + + def add_mutually_exclusive_group(self, **kwargs): + group = _MutuallyExclusiveGroup(self, **kwargs) + self._mutually_exclusive_groups.append(group) + return group + + def _add_action(self, action): + # resolve any conflicts + self._check_conflict(action) + + # add to actions list + self._actions.append(action) + action.container = self + + # index the action by any option strings it has + for option_string in action.option_strings: + self._option_string_actions[option_string] = action + + # set the flag if any option strings look like negative numbers + for option_string in action.option_strings: + if self._negative_number_matcher.match(option_string): + if not self._has_negative_number_optionals: + self._has_negative_number_optionals.append(True) + + # return the created action + return action + + def _remove_action(self, action): + self._actions.remove(action) + + def _add_container_actions(self, container): + # collect groups by titles + title_group_map = {} + for group in self._action_groups: + if group.title in title_group_map: + msg = _('cannot merge actions - two groups are named %r') + raise ValueError(msg % (group.title)) + title_group_map[group.title] = group + + # map each action to its group + group_map = {} + for group in container._action_groups: + + # if a group with the title exists, use that, otherwise + # create a new group matching the container's group + if group.title not in title_group_map: + title_group_map[group.title] = self.add_argument_group( + title=group.title, + description=group.description, + conflict_handler=group.conflict_handler) + + # map the actions to their new group + for action in group._group_actions: + group_map[action] = title_group_map[group.title] + + # add container's mutually exclusive groups + # NOTE: if add_mutually_exclusive_group ever gains title= and + # description= then this code will need to be expanded as above + for group in container._mutually_exclusive_groups: + mutex_group = self.add_mutually_exclusive_group( + required=group.required) + + # map the actions to their new mutex group + for action in group._group_actions: + group_map[action] = mutex_group + + # add all actions to this container or their group + for action in container._actions: + group_map.get(action, self)._add_action(action) + + def _get_positional_kwargs(self, dest, **kwargs): + # make sure required is not specified + if 'required' in kwargs: + msg = _("'required' is an invalid argument for positionals") + raise TypeError(msg) + + # mark positional arguments as required if at least one is + # always required + if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: + kwargs['required'] = True + if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: + kwargs['required'] = True + + # return the keyword arguments with no option strings + return dict(kwargs, dest=dest, option_strings=[]) + + def _get_optional_kwargs(self, *args, **kwargs): + # determine short and long option strings + option_strings = [] + long_option_strings = [] + for option_string in args: + # error on strings that don't start with an appropriate prefix + if not option_string[0] in self.prefix_chars: + msg = _('invalid option string %r: ' + 'must start with a character %r') + tup = option_string, self.prefix_chars + raise ValueError(msg % tup) + + # strings starting with two prefix characters are long options + option_strings.append(option_string) + if option_string[0] in self.prefix_chars: + if len(option_string) > 1: + if option_string[1] in self.prefix_chars: + long_option_strings.append(option_string) + + # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' + dest = kwargs.pop('dest', None) + if dest is None: + if long_option_strings: + dest_option_string = long_option_strings[0] + else: + dest_option_string = option_strings[0] + dest = dest_option_string.lstrip(self.prefix_chars) + if not dest: + msg = _('dest= is required for options like %r') + raise ValueError(msg % option_string) + dest = dest.replace('-', '_') + + # return the updated keyword arguments + return dict(kwargs, dest=dest, option_strings=option_strings) + + def _pop_action_class(self, kwargs, default=None): + action = kwargs.pop('action', default) + return self._registry_get('action', action, action) + + def _get_handler(self): + # determine function from conflict handler string + handler_func_name = '_handle_conflict_%s' % self.conflict_handler + try: + return getattr(self, handler_func_name) + except AttributeError: + msg = _('invalid conflict_resolution value: %r') + raise ValueError(msg % self.conflict_handler) + + def _check_conflict(self, action): + + # find all options that conflict with this option + confl_optionals = [] + for option_string in action.option_strings: + if option_string in self._option_string_actions: + confl_optional = self._option_string_actions[option_string] + confl_optionals.append((option_string, confl_optional)) + + # resolve any conflicts + if confl_optionals: + conflict_handler = self._get_handler() + conflict_handler(action, confl_optionals) + + def _handle_conflict_error(self, action, conflicting_actions): + message = _('conflicting option string(s): %s') + conflict_string = ', '.join([option_string + for option_string, action + in conflicting_actions]) + raise ArgumentError(action, message % conflict_string) + + def _handle_conflict_resolve(self, action, conflicting_actions): + + # remove all conflicting options + for option_string, action in conflicting_actions: + + # remove the conflicting option + action.option_strings.remove(option_string) + self._option_string_actions.pop(option_string, None) + + # if the option now has no option string, remove it from the + # container holding it + if not action.option_strings: + action.container._remove_action(action) + + +class _ArgumentGroup(_ActionsContainer): + + def __init__(self, container, title=None, description=None, **kwargs): + # add any missing keyword arguments by checking the container + update = kwargs.setdefault + update('conflict_handler', container.conflict_handler) + update('prefix_chars', container.prefix_chars) + update('argument_default', container.argument_default) + super_init = super(_ArgumentGroup, self).__init__ + super_init(description=description, **kwargs) + + # group attributes + self.title = title + self._group_actions = [] + + # share most attributes with the container + self._registries = container._registries + self._actions = container._actions + self._option_string_actions = container._option_string_actions + self._defaults = container._defaults + self._has_negative_number_optionals = \ + container._has_negative_number_optionals + + def _add_action(self, action): + action = super(_ArgumentGroup, self)._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + super(_ArgumentGroup, self)._remove_action(action) + self._group_actions.remove(action) + + +class _MutuallyExclusiveGroup(_ArgumentGroup): + + def __init__(self, container, required=False): + super(_MutuallyExclusiveGroup, self).__init__(container) + self.required = required + self._container = container + + def _add_action(self, action): + if action.required: + msg = _('mutually exclusive arguments must be optional') + raise ValueError(msg) + action = self._container._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + self._container._remove_action(action) + self._group_actions.remove(action) + + +class ArgumentParser(_AttributeHolder, _ActionsContainer): + """Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + """ + + def __init__(self, + prog=None, + usage=None, + description=None, + epilog=None, + version=None, + parents=[], + formatter_class=HelpFormatter, + prefix_chars='-', + fromfile_prefix_chars=None, + argument_default=None, + conflict_handler='error', + add_help=True): + + if version is not None: + import warnings + warnings.warn( + """The "version" argument to ArgumentParser is deprecated. """ + """Please use """ + """"add_argument(..., action='version', version="N", ...)" """ + """instead""", DeprecationWarning) + + superinit = super(ArgumentParser, self).__init__ + superinit(description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler) + + # default setting for prog + if prog is None: + prog = _os.path.basename(_sys.argv[0]) + + self.prog = prog + self.usage = usage + self.epilog = epilog + self.version = version + self.formatter_class = formatter_class + self.fromfile_prefix_chars = fromfile_prefix_chars + self.add_help = add_help + + add_group = self.add_argument_group + self._positionals = add_group(_('positional arguments')) + self._optionals = add_group(_('optional arguments')) + self._subparsers = None + + # register types + def identity(string): + return string + self.register('type', None, identity) + + # add help and version arguments if necessary + # (using explicit default to override global argument_default) + if self.add_help: + self.add_argument( + '-h', '--help', action='help', default=SUPPRESS, + help=_('show this help message and exit')) + if self.version: + self.add_argument( + '-v', '--version', action='version', default=SUPPRESS, + version=self.version, + help=_("show program's version number and exit")) + + # add parent arguments and defaults + for parent in parents: + self._add_container_actions(parent) + try: + defaults = parent._defaults + except AttributeError: + pass + else: + self._defaults.update(defaults) + + # ======================= + # Pretty __repr__ methods + # ======================= + def _get_kwargs(self): + names = [ + 'prog', + 'usage', + 'description', + 'version', + 'formatter_class', + 'conflict_handler', + 'add_help', + ] + return [(name, getattr(self, name)) for name in names] + + # ================================== + # Optional/Positional adding methods + # ================================== + def add_subparsers(self, **kwargs): + if self._subparsers is not None: + self.error(_('cannot have multiple subparser arguments')) + + # add the parser class to the arguments if it's not present + kwargs.setdefault('parser_class', type(self)) + + if 'title' in kwargs or 'description' in kwargs: + title = _(kwargs.pop('title', 'subcommands')) + description = _(kwargs.pop('description', None)) + self._subparsers = self.add_argument_group(title, description) + else: + self._subparsers = self._positionals + + # prog defaults to the usage message of this parser, skipping + # optional arguments and with no "usage:" prefix + if kwargs.get('prog') is None: + formatter = self._get_formatter() + positionals = self._get_positional_actions() + groups = self._mutually_exclusive_groups + formatter.add_usage(self.usage, positionals, groups, '') + kwargs['prog'] = formatter.format_help().strip() + + # create the parsers action and add it to the positionals list + parsers_class = self._pop_action_class(kwargs, 'parsers') + action = parsers_class(option_strings=[], **kwargs) + self._subparsers._add_action(action) + + # return the created parsers action + return action + + def _add_action(self, action): + if action.option_strings: + self._optionals._add_action(action) + else: + self._positionals._add_action(action) + return action + + def _get_optional_actions(self): + return [action + for action in self._actions + if action.option_strings] + + def _get_positional_actions(self): + return [action + for action in self._actions + if not action.option_strings] + + # ===================================== + # Command line argument parsing methods + # ===================================== + def parse_args(self, args=None, namespace=None): + args, argv = self.parse_known_args(args, namespace) + if argv: + msg = _('unrecognized arguments: %s') + self.error(msg % ' '.join(argv)) + return args + + def parse_known_args(self, args=None, namespace=None): + # args default to the system args + if args is None: + args = _sys.argv[1:] + + # default Namespace built from parser defaults + if namespace is None: + namespace = Namespace() + + # add any action defaults that aren't present + for action in self._actions: + if action.dest is not SUPPRESS: + if not hasattr(namespace, action.dest): + if action.default is not SUPPRESS: + default = action.default + if isinstance(action.default, _basestring): + default = self._get_value(action, default) + setattr(namespace, action.dest, default) + + # add any parser defaults that aren't present + for dest in self._defaults: + if not hasattr(namespace, dest): + setattr(namespace, dest, self._defaults[dest]) + + # parse the arguments and exit if there are any errors + try: + return self._parse_known_args(args, namespace) + except ArgumentError: + err = _sys.exc_info()[1] + self.error(str(err)) + + def _parse_known_args(self, arg_strings, namespace): + # replace arg strings that are file references + if self.fromfile_prefix_chars is not None: + arg_strings = self._read_args_from_files(arg_strings) + + # map all mutually exclusive arguments to the other arguments + # they can't occur with + action_conflicts = {} + for mutex_group in self._mutually_exclusive_groups: + group_actions = mutex_group._group_actions + for i, mutex_action in enumerate(mutex_group._group_actions): + conflicts = action_conflicts.setdefault(mutex_action, []) + conflicts.extend(group_actions[:i]) + conflicts.extend(group_actions[i + 1:]) + + # find all option indices, and determine the arg_string_pattern + # which has an 'O' if there is an option at an index, + # an 'A' if there is an argument, or a '-' if there is a '--' + option_string_indices = {} + arg_string_pattern_parts = [] + arg_strings_iter = iter(arg_strings) + for i, arg_string in enumerate(arg_strings_iter): + + # all args after -- are non-options + if arg_string == '--': + arg_string_pattern_parts.append('-') + for arg_string in arg_strings_iter: + arg_string_pattern_parts.append('A') + + # otherwise, add the arg to the arg strings + # and note the index if it was an option + else: + option_tuple = self._parse_optional(arg_string) + if option_tuple is None: + pattern = 'A' + else: + option_string_indices[i] = option_tuple + pattern = 'O' + arg_string_pattern_parts.append(pattern) + + # join the pieces together to form the pattern + arg_strings_pattern = ''.join(arg_string_pattern_parts) + + # converts arg strings to the appropriate and then takes the action + seen_actions = _set() + seen_non_default_actions = _set() + + def take_action(action, argument_strings, option_string=None): + seen_actions.add(action) + argument_values = self._get_values(action, argument_strings) + + # error if this argument is not allowed with other previously + # seen arguments, assuming that actions that use the default + # value don't really count as "present" + if argument_values is not action.default: + seen_non_default_actions.add(action) + for conflict_action in action_conflicts.get(action, []): + if conflict_action in seen_non_default_actions: + msg = _('not allowed with argument %s') + action_name = _get_action_name(conflict_action) + raise ArgumentError(action, msg % action_name) + + # take the action if we didn't receive a SUPPRESS value + # (e.g. from a default) + if argument_values is not SUPPRESS: + action(self, namespace, argument_values, option_string) + + # function to convert arg_strings into an optional action + def consume_optional(start_index): + + # get the optional identified at this index + option_tuple = option_string_indices[start_index] + action, option_string, explicit_arg = option_tuple + + # identify additional optionals in the same arg string + # (e.g. -xyz is the same as -x -y -z if no args are required) + match_argument = self._match_argument + action_tuples = [] + while True: + + # if we found no optional action, skip it + if action is None: + extras.append(arg_strings[start_index]) + return start_index + 1 + + # if there is an explicit argument, try to match the + # optional's string arguments to only this + if explicit_arg is not None: + arg_count = match_argument(action, 'A') + + # if the action is a single-dash option and takes no + # arguments, try to parse more single-dash options out + # of the tail of the option string + chars = self.prefix_chars + if arg_count == 0 and option_string[1] not in chars: + action_tuples.append((action, [], option_string)) + for char in self.prefix_chars: + option_string = char + explicit_arg[0] + explicit_arg = explicit_arg[1:] or None + optionals_map = self._option_string_actions + if option_string in optionals_map: + action = optionals_map[option_string] + break + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if the action expect exactly one argument, we've + # successfully matched the option; exit the loop + elif arg_count == 1: + stop = start_index + 1 + args = [explicit_arg] + action_tuples.append((action, args, option_string)) + break + + # error if a double-dash option did not use the + # explicit argument + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if there is no explicit argument, try to match the + # optional's string arguments with the following strings + # if successful, exit the loop + else: + start = start_index + 1 + selected_patterns = arg_strings_pattern[start:] + arg_count = match_argument(action, selected_patterns) + stop = start + arg_count + args = arg_strings[start:stop] + action_tuples.append((action, args, option_string)) + break + + # add the Optional to the list and return the index at which + # the Optional's string args stopped + assert action_tuples + for action, args, option_string in action_tuples: + take_action(action, args, option_string) + return stop + + # the list of Positionals left to be parsed; this is modified + # by consume_positionals() + positionals = self._get_positional_actions() + + # function to convert arg_strings into positional actions + def consume_positionals(start_index): + # match as many Positionals as possible + match_partial = self._match_arguments_partial + selected_pattern = arg_strings_pattern[start_index:] + arg_counts = match_partial(positionals, selected_pattern) + + # slice off the appropriate arg strings for each Positional + # and add the Positional and its args to the list + for action, arg_count in zip(positionals, arg_counts): + args = arg_strings[start_index: start_index + arg_count] + start_index += arg_count + take_action(action, args) + + # slice off the Positionals that we just parsed and return the + # index at which the Positionals' string args stopped + positionals[:] = positionals[len(arg_counts):] + return start_index + + # consume Positionals and Optionals alternately, until we have + # passed the last option string + extras = [] + start_index = 0 + if option_string_indices: + max_option_string_index = max(option_string_indices) + else: + max_option_string_index = -1 + while start_index <= max_option_string_index: + + # consume any Positionals preceding the next option + next_option_string_index = min([ + index + for index in option_string_indices + if index >= start_index]) + if start_index != next_option_string_index: + positionals_end_index = consume_positionals(start_index) + + # only try to parse the next optional if we didn't consume + # the option string during the positionals parsing + if positionals_end_index > start_index: + start_index = positionals_end_index + continue + else: + start_index = positionals_end_index + + # if we consumed all the positionals we could and we're not + # at the index of an option string, there were extra arguments + if start_index not in option_string_indices: + strings = arg_strings[start_index:next_option_string_index] + extras.extend(strings) + start_index = next_option_string_index + + # consume the next optional and any arguments for it + start_index = consume_optional(start_index) + + # consume any positionals following the last Optional + stop_index = consume_positionals(start_index) + + # if we didn't consume all the argument strings, there were extras + extras.extend(arg_strings[stop_index:]) + + # if we didn't use all the Positional objects, there were too few + # arg strings supplied. + if positionals: + self.error(_('too few arguments')) + + # make sure all required actions were present + for action in self._actions: + if action.required: + if action not in seen_actions: + name = _get_action_name(action) + self.error(_('argument %s is required') % name) + + # make sure all required groups had one option present + for group in self._mutually_exclusive_groups: + if group.required: + for action in group._group_actions: + if action in seen_non_default_actions: + break + + # if no actions were used, report the error + else: + names = [_get_action_name(action) + for action in group._group_actions + if action.help is not SUPPRESS] + msg = _('one of the arguments %s is required') + self.error(msg % ' '.join(names)) + + # return the updated namespace and the extra arguments + return namespace, extras + + def _read_args_from_files(self, arg_strings): + # expand arguments referencing files + new_arg_strings = [] + for arg_string in arg_strings: + + # for regular arguments, just add them back into the list + if arg_string[0] not in self.fromfile_prefix_chars: + new_arg_strings.append(arg_string) + + # replace arguments referencing files with the file content + else: + try: + args_file = open(arg_string[1:]) + try: + arg_strings = [] + for arg_line in args_file.read().splitlines(): + for arg in self.convert_arg_line_to_args(arg_line): + arg_strings.append(arg) + arg_strings = self._read_args_from_files(arg_strings) + new_arg_strings.extend(arg_strings) + finally: + args_file.close() + except IOError: + err = _sys.exc_info()[1] + self.error(str(err)) + + # return the modified argument list + return new_arg_strings + + def convert_arg_line_to_args(self, arg_line): + return [arg_line] + + def _match_argument(self, action, arg_strings_pattern): + # match the pattern for this action to the arg strings + nargs_pattern = self._get_nargs_pattern(action) + match = _re.match(nargs_pattern, arg_strings_pattern) + + # raise an exception if we weren't able to find a match + if match is None: + nargs_errors = { + None: _('expected one argument'), + OPTIONAL: _('expected at most one argument'), + ONE_OR_MORE: _('expected at least one argument'), + } + default = _('expected %s argument(s)') % action.nargs + msg = nargs_errors.get(action.nargs, default) + raise ArgumentError(action, msg) + + # return the number of arguments matched + return len(match.group(1)) + + def _match_arguments_partial(self, actions, arg_strings_pattern): + # progressively shorten the actions list by slicing off the + # final actions until we find a match + result = [] + for i in range(len(actions), 0, -1): + actions_slice = actions[:i] + pattern = ''.join([self._get_nargs_pattern(action) + for action in actions_slice]) + match = _re.match(pattern, arg_strings_pattern) + if match is not None: + result.extend([len(string) for string in match.groups()]) + break + + # return the list of arg string counts + return result + + def _parse_optional(self, arg_string): + # if it's an empty string, it was meant to be a positional + if not arg_string: + return None + + # if it doesn't start with a prefix, it was meant to be positional + if not arg_string[0] in self.prefix_chars: + return None + + # if the option string is present in the parser, return the action + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + + # if it's just a single character, it was meant to be positional + if len(arg_string) == 1: + return None + + # if the option string before the "=" is present, return the action + if '=' in arg_string: + option_string, explicit_arg = arg_string.split('=', 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + + # search through all possible prefixes of the option string + # and all actions in the parser for possible interpretations + option_tuples = self._get_option_tuples(arg_string) + + # if multiple actions match, the option string was ambiguous + if len(option_tuples) > 1: + options = ', '.join([option_string + for action, option_string, explicit_arg in option_tuples]) + tup = arg_string, options + self.error(_('ambiguous option: %s could match %s') % tup) + + # if exactly one action matched, this segmentation is good, + # so return the parsed action + elif len(option_tuples) == 1: + option_tuple, = option_tuples + return option_tuple + + # if it was not found as an option, but it looks like a negative + # number, it was meant to be positional + # unless there are negative-number-like options + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + + # if it contains a space, it was meant to be a positional + if ' ' in arg_string: + return None + + # it was meant to be an optional but there is no such option + # in this parser (though it might be a valid option in a subparser) + return None, arg_string, None + + def _get_option_tuples(self, option_string): + result = [] + + # option strings starting with two prefix characters are only + # split at the '=' + chars = self.prefix_chars + if option_string[0] in chars and option_string[1] in chars: + if '=' in option_string: + option_prefix, explicit_arg = option_string.split('=', 1) + else: + option_prefix = option_string + explicit_arg = None + for option_string in self._option_string_actions: + if option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # single character options can be concatenated with their arguments + # but multiple character options always have to have their argument + # separate + elif option_string[0] in chars and option_string[1] not in chars: + option_prefix = option_string + explicit_arg = None + short_option_prefix = option_string[:2] + short_explicit_arg = option_string[2:] + + for option_string in self._option_string_actions: + if option_string == short_option_prefix: + action = self._option_string_actions[option_string] + tup = action, option_string, short_explicit_arg + result.append(tup) + elif option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # shouldn't ever get here + else: + self.error(_('unexpected option string: %s') % option_string) + + # return the collected option tuples + return result + + def _get_nargs_pattern(self, action): + # in all examples below, we have to allow for '--' args + # which are represented as '-' in the pattern + nargs = action.nargs + + # the default (None) is assumed to be a single argument + if nargs is None: + nargs_pattern = '(-*A-*)' + + # allow zero or one arguments + elif nargs == OPTIONAL: + nargs_pattern = '(-*A?-*)' + + # allow zero or more arguments + elif nargs == ZERO_OR_MORE: + nargs_pattern = '(-*[A-]*)' + + # allow one or more arguments + elif nargs == ONE_OR_MORE: + nargs_pattern = '(-*A[A-]*)' + + # allow any number of options or arguments + elif nargs == REMAINDER: + nargs_pattern = '([-AO]*)' + + # allow one argument followed by any number of options or arguments + elif nargs == PARSER: + nargs_pattern = '(-*A[-AO]*)' + + # all others should be integers + else: + nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + + # if this is an optional action, -- is not allowed + if action.option_strings: + nargs_pattern = nargs_pattern.replace('-*', '') + nargs_pattern = nargs_pattern.replace('-', '') + + # return the pattern + return nargs_pattern + + # ======================== + # Value conversion methods + # ======================== + def _get_values(self, action, arg_strings): + # for everything but PARSER args, strip out '--' + if action.nargs not in [PARSER, REMAINDER]: + arg_strings = [s for s in arg_strings if s != '--'] + + # optional argument produces a default when not present + if not arg_strings and action.nargs == OPTIONAL: + if action.option_strings: + value = action.const + else: + value = action.default + if isinstance(value, _basestring): + value = self._get_value(action, value) + self._check_value(action, value) + + # when nargs='*' on a positional, if there were no command-line + # args, use the default if it is anything other than None + elif (not arg_strings and action.nargs == ZERO_OR_MORE and + not action.option_strings): + if action.default is not None: + value = action.default + else: + value = arg_strings + self._check_value(action, value) + + # single argument or optional argument produces a single value + elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: + arg_string, = arg_strings + value = self._get_value(action, arg_string) + self._check_value(action, value) + + # REMAINDER arguments convert all values, checking none + elif action.nargs == REMAINDER: + value = [self._get_value(action, v) for v in arg_strings] + + # PARSER arguments convert all values, but check only the first + elif action.nargs == PARSER: + value = [self._get_value(action, v) for v in arg_strings] + self._check_value(action, value[0]) + + # all other types of nargs produce a list + else: + value = [self._get_value(action, v) for v in arg_strings] + for v in value: + self._check_value(action, v) + + # return the converted value + return value + + def _get_value(self, action, arg_string): + type_func = self._registry_get('type', action.type, action.type) + if not _callable(type_func): + msg = _('%r is not callable') + raise ArgumentError(action, msg % type_func) + + # convert the value to the appropriate type + try: + result = type_func(arg_string) + + # ArgumentTypeErrors indicate errors + except ArgumentTypeError: + name = getattr(action.type, '__name__', repr(action.type)) + msg = str(_sys.exc_info()[1]) + raise ArgumentError(action, msg) + + # TypeErrors or ValueErrors also indicate errors + except (TypeError, ValueError): + name = getattr(action.type, '__name__', repr(action.type)) + msg = _('invalid %s value: %r') + raise ArgumentError(action, msg % (name, arg_string)) + + # return the converted value + return result + + def _check_value(self, action, value): + # converted value must be one of the choices (if specified) + if action.choices is not None and value not in action.choices: + tup = value, ', '.join(map(repr, action.choices)) + msg = _('invalid choice: %r (choose from %s)') % tup + raise ArgumentError(action, msg) + + # ======================= + # Help-formatting methods + # ======================= + def format_usage(self): + formatter = self._get_formatter() + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + return formatter.format_help() + + def format_help(self): + formatter = self._get_formatter() + + # usage + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + + # description + formatter.add_text(self.description) + + # positionals, optionals and user-defined groups + for action_group in self._action_groups: + formatter.start_section(action_group.title) + formatter.add_text(action_group.description) + formatter.add_arguments(action_group._group_actions) + formatter.end_section() + + # epilog + formatter.add_text(self.epilog) + + # determine help from format above + return formatter.format_help() + + def format_version(self): + import warnings + warnings.warn( + 'The format_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + formatter = self._get_formatter() + formatter.add_text(self.version) + return formatter.format_help() + + def _get_formatter(self): + return self.formatter_class(prog=self.prog) + + # ===================== + # Help-printing methods + # ===================== + def print_usage(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_usage(), file) + + def print_help(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_help(), file) + + def print_version(self, file=None): + import warnings + warnings.warn( + 'The print_version method is deprecated -- the "version" ' + 'argument to ArgumentParser is no longer supported.', + DeprecationWarning) + self._print_message(self.format_version(), file) + + def _print_message(self, message, file=None): + if message: + if file is None: + file = _sys.stderr + file.write(message) + + # =============== + # Exiting methods + # =============== + def exit(self, status=0, message=None): + if message: + self._print_message(message, _sys.stderr) + _sys.exit(status) + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(_sys.stderr) + self.exit(2, _('%s: error: %s\n') % (self.prog, message)) diff --git a/libs/axel/__init__.py b/libs/axel/__init__.py new file mode 100644 index 0000000..14437ec --- /dev/null +++ b/libs/axel/__init__.py @@ -0,0 +1,13 @@ +# __init__.py +# +# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom +# +# This module is part of Axel and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +import inspect +from .axel import * +__all__ = sorted(name for name, obj in locals().items() + if not (name.startswith('_') or inspect.ismodule(obj))) +__all__.append('axel') +del inspect \ No newline at end of file diff --git a/libs/axel/axel.py b/libs/axel/axel.py new file mode 100644 index 0000000..6a369e3 --- /dev/null +++ b/libs/axel/axel.py @@ -0,0 +1,325 @@ +# axel.py +# +# Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom +# +# Based on an idea by Peter Thatcher, found on +# http://www.valuedlessons.com/2008/04/events-in-python.html +# +# This module is part of Axel and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +# +# Source: http://pypi.python.org/pypi/axel +# Docs: http://packages.python.org/axel + +import sys, threading, Queue + +class Event(object): + """ + Event object inspired by C# events. Handlers can be registered and + unregistered using += and -= operators. Execution and result are + influenced by the arguments passed to the constructor and += method. + + from axel import Event + + event = Event() + def on_event(*args, **kwargs): + return (args, kwargs) + + event += on_event # handler registration + print(event(10, 20, y=30)) + >> ((True, ((10, 20), {'y': 30}), ),) + + event -= on_event # handler is unregistered + print(event(10, 20, y=30)) + >> None + + class Mouse(object): + def __init__(self): + self.click = Event(self) + self.click += self.on_click # handler registration + + def on_click(self, sender, *args, **kwargs): + assert isinstance(sender, Mouse), 'Wrong sender' + return (args, kwargs) + + mouse = Mouse() + print(mouse.click(10, 20)) + >> ((True, ((10, 20), {}), + >> >),) + + mouse.click -= mouse.on_click # handler is unregistered + print(mouse.click(10, 20)) + >> None + """ + + def __init__(self, sender=None, asynch=False, exc_info=False, + lock=None, threads=3, traceback=False): + """ Creates an event + + asynch + if True handler's are executes asynchronous + exc_info + if True, result will contain sys.exc_info()[:2] on error + lock + threading.RLock used to synchronize execution + sender + event's sender. The sender is passed as the first argument to the + handler, only if is not None. For this case the handler must have + a placeholder in the arguments to receive the sender + threads + maximum number of threads that will be started + traceback + if True, the execution result will contain sys.exc_info() + on error. exc_info must be also True to get the traceback + + hash = hash(handler) + + Handlers are stored in a dictionary that has as keys the handler's hash + handlers = { + hash : (handler, memoize, timeout), + hash : (handler, memoize, timeout), ... + } + The execution result is cached using the following structure + memoize = { + hash : ((args, kwargs, result), (args, kwargs, result), ...), + hash : ((args, kwargs, result), ...), ... + } + The execution result is returned as a tuple having this structure + exec_result = ( + (True, result, handler), # on success + (False, error_info, handler), # on error + (None, None, handler), ... # asynchronous execution + ) + """ + self.asynchronous = asynch + self.exc_info = exc_info + self.lock = lock + self.sender = sender + self.threads = threads + self.traceback = traceback + self.handlers = {} + self.memoize = {} + + def handle(self, handler): + """ Registers a handler. The handler can be transmitted together + with two arguments as a list or dictionary. The arguments are: + + memoize + if True, the execution result will be cached in self.memoize + timeout + will allocate a predefined time interval for the execution + + If arguments are provided as a list, they are considered to have + this sequence: (handler, memoize, timeout) + + Examples: + event += handler + event += (handler, True, 1.5) + event += {'handler':handler, 'memoize':True, 'timeout':1.5} + """ + handler_, memoize, timeout = self._extract(handler) + self.handlers[hash(handler_)] = (handler_, memoize, timeout) + return self + + def unhandle(self, handler): + """ Unregisters a handler """ + handler_, memoize, timeout = self._extract(handler) + key = hash(handler_) + if not key in self.handlers: + raise ValueError('Handler "%s" was not found' % str(handler_)) + del self.handlers[key] + return self + + def fire(self, *args, **kwargs): + """ Stores all registered handlers in a queue for processing """ + self.queue = Queue.Queue() + self.result = [] + + if self.handlers: + max_threads = self._threads() + + for i in range(max_threads): + t = threading.Thread(target=self._execute, + args=args, kwargs=kwargs) + t.daemon = True + t.start() + + for handler in self.handlers: + self.queue.put(handler) + + if self.asynchronous: + handler_, memoize, timeout = self.handlers[handler] + self.result.append((None, None, handler_)) + + if not self.asynchronous: + self.queue.join() + + return tuple(self.result) or None + + def count(self): + """ Returns the count of registered handlers """ + return len(self.handlers) + + def clear(self): + """ Discards all registered handlers and cached results """ + self.handlers.clear() + self.memoize.clear() + + def _execute(self, *args, **kwargs): + """ Executes all handlers stored in the queue """ + while True: + try: + handler, memoize, timeout = self.handlers[self.queue.get()] + + if isinstance(self.lock, threading._RLock): + self.lock.acquire() #synchronization + + try: + r = self._memoize(memoize, timeout, handler, *args, **kwargs) + if not self.asynchronous: + self.result.append(tuple(r)) + + except Exception as err: + if not self.asynchronous: + self.result.append((False, self._error(sys.exc_info()), + handler)) + finally: + if isinstance(self.lock, threading._RLock): + self.lock.release() + + if not self.asynchronous: + self.queue.task_done() + + except Queue.Empty: + break + + def _extract(self, queue_item): + """ Extracts a handler and handler's arguments that can be provided + as list or dictionary. If arguments are provided as list, they are + considered to have this sequence: (handler, memoize, timeout) + Examples: + event += handler + event += (handler, True, 1.5) + event += {'handler':handler, 'memoize':True, 'timeout':1.5} + """ + assert queue_item, 'Invalid list of arguments' + handler = None + memoize = False + timeout = 0 + + if not isinstance(queue_item, (list, tuple, dict)): + handler = queue_item + elif isinstance(queue_item, (list, tuple)): + if len(queue_item) == 3: + handler, memoize, timeout = queue_item + elif len(queue_item) == 2: + handler, memoize, = queue_item + elif len(queue_item) == 1: + handler = queue_item + elif isinstance(queue_item, dict): + handler = queue_item.get('handler') + memoize = queue_item.get('memoize', False) + timeout = queue_item.get('timeout', 0) + return (handler, bool(memoize), float(timeout)) + + def _memoize(self, memoize, timeout, handler, *args, **kwargs): + """ Caches the execution result of successful executions + hash = hash(handler) + memoize = { + hash : ((args, kwargs, result), (args, kwargs, result), ...), + hash : ((args, kwargs, result), ...), ... + } + """ + if not isinstance(handler, Event) and self.sender is not None: + args = list(args)[:] + args.insert(0, self.sender) + + if not memoize: + if timeout <= 0: #no time restriction + return [True, handler(*args, **kwargs), handler] + + result = self._timeout(timeout, handler, *args, **kwargs) + if isinstance(result, tuple) and len(result) == 3: + if isinstance(result[1], Exception): #error occurred + return [False, self._error(result), handler] + return [True, result, handler] + else: + hash_ = hash(handler) + if hash_ in self.memoize: + for args_, kwargs_, result in self.memoize[hash_]: + if args_ == args and kwargs_ == kwargs: + return [True, result, handler] + + if timeout <= 0: #no time restriction + result = handler(*args, **kwargs) + else: + result = self._timeout(timeout, handler, *args, **kwargs) + if isinstance(result, tuple) and len(result) == 3: + if isinstance(result[1], Exception): #error occurred + return [False, self._error(result), handler] + + lock = threading.RLock() + lock.acquire() + try: + if hash_ not in self.memoize: + self.memoize[hash_] = [] + self.memoize[hash_].append((args, kwargs, result)) + return [True, result, handler] + finally: + lock.release() + + def _timeout(self, timeout, handler, *args, **kwargs): + """ Controls the time allocated for the execution of a method """ + t = spawn_thread(target=handler, args=args, kwargs=kwargs) + t.daemon = True + t.start() + t.join(timeout) + + if not t.is_alive(): + if t.exc_info: + return t.exc_info + return t.result + else: + try: + msg = '[%s] Execution was forcefully terminated' + raise RuntimeError(msg % t.name) + except: + return sys.exc_info() + + def _threads(self): + """ Calculates maximum number of threads that will be started """ + if self.threads < len(self.handlers): + return self.threads + return len(self.handlers) + + def _error(self, exc_info): + """ Retrieves the error info """ + if self.exc_info: + if self.traceback: + return exc_info + return exc_info[:2] + return exc_info[1] + + __iadd__ = handle + __isub__ = unhandle + __call__ = fire + __len__ = count + +class spawn_thread(threading.Thread): + """ Spawns a new thread and returns the execution result """ + + def __init__(self, target, args=(), kwargs={}, default=None): + threading.Thread.__init__(self) + self._target = target + self._args = args + self._kwargs = kwargs + self.result = default + self.exc_info = None + + def run(self): + try: + self.result = self._target(*self._args, **self._kwargs) + except: + self.exc_info = sys.exc_info() + finally: + del self._target, self._args, self._kwargs \ No newline at end of file diff --git a/libs/daemon.py b/libs/daemon.py new file mode 100644 index 0000000..b1e3e00 --- /dev/null +++ b/libs/daemon.py @@ -0,0 +1,181 @@ +## {{{ http://code.activestate.com/recipes/278731/ (r6) +"""Disk And Execution MONitor (Daemon) + +Configurable daemon behaviors: + + 1.) The current working directory set to the "/" directory. + 2.) The current file creation mode mask set to 0. + 3.) Close all open files (1024). + 4.) Redirect standard I/O streams to "/dev/null". + +A failed call to fork() now raises an exception. + +References: + 1) Advanced Programming in the Unix Environment: W. Richard Stevens + 2) Unix Programming Frequently Asked Questions: + http://www.erlenstar.demon.co.uk/unix/faq_toc.html +""" + +__author__ = "Chad J. Schroeder" +__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" + +__revision__ = "$Id$" +__version__ = "0.2" + +# Standard Python modules. +import os # Miscellaneous OS interfaces. +import sys # System-specific parameters and functions. + +# Default daemon parameters. +# File mode creation mask of the daemon. +UMASK = 0 + +# Default working directory for the daemon. +WORKDIR = "/" + +# Default maximum for the number of available file descriptors. +MAXFD = 1024 + +# The standard I/O file descriptors are redirected to /dev/null by default. +if (hasattr(os, "devnull")): + REDIRECT_TO = os.devnull +else: + REDIRECT_TO = "/dev/null" + +def createDaemon(): + """Detach a process from the controlling terminal and run it in the + background as a daemon. + """ + + try: + # Fork a child process so the parent can exit. This returns control to + # the command-line or shell. It also guarantees that the child will not + # be a process group leader, since the child receives a new process ID + # and inherits the parent's process group ID. This step is required + # to insure that the next call to os.setsid is successful. + pid = os.fork() + except OSError, e: + raise Exception, "%s [%d]" % (e.strerror, e.errno) + + if (pid == 0): # The first child. + # To become the session leader of this new session and the process group + # leader of the new process group, we call os.setsid(). The process is + # also guaranteed not to have a controlling terminal. + os.setsid() + + # Is ignoring SIGHUP necessary? + # + # It's often suggested that the SIGHUP signal should be ignored before + # the second fork to avoid premature termination of the process. The + # reason is that when the first child terminates, all processes, e.g. + # the second child, in the orphaned group will be sent a SIGHUP. + # + # "However, as part of the session management system, there are exactly + # two cases where SIGHUP is sent on the death of a process: + # + # 1) When the process that dies is the session leader of a session that + # is attached to a terminal device, SIGHUP is sent to all processes + # in the foreground process group of that terminal device. + # 2) When the death of a process causes a process group to become + # orphaned, and one or more processes in the orphaned group are + # stopped, then SIGHUP and SIGCONT are sent to all members of the + # orphaned group." [2] + # + # The first case can be ignored since the child is guaranteed not to have + # a controlling terminal. The second case isn't so easy to dismiss. + # The process group is orphaned when the first child terminates and + # POSIX.1 requires that every STOPPED process in an orphaned process + # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the + # second child is not STOPPED though, we can safely forego ignoring the + # SIGHUP signal. In any case, there are no ill-effects if it is ignored. + # + # import signal # Set handlers for asynchronous events. + # signal.signal(signal.SIGHUP, signal.SIG_IGN) + + try: + # Fork a second child and exit immediately to prevent zombies. This + # causes the second child process to be orphaned, making the init + # process responsible for its cleanup. And, since the first child is + # a session leader without a controlling terminal, it's possible for + # it to acquire one by opening a terminal in the future (System V- + # based systems). This second fork guarantees that the child is no + # longer a session leader, preventing the daemon from ever acquiring + # a controlling terminal. + pid = os.fork() # Fork a second child. + except OSError, e: + raise Exception, "%s [%d]" % (e.strerror, e.errno) + + if (pid == 0): # The second child. + # Since the current working directory may be a mounted filesystem, we + # avoid the issue of not being able to unmount the filesystem at + # shutdown time by changing it to the root directory. + os.chdir(WORKDIR) + # We probably don't want the file mode creation mask inherited from + # the parent, so we give the child complete control over permissions. + os.umask(UMASK) + else: + # exit() or _exit()? See below. + os._exit(0) # Exit parent (the first child) of the second child. + else: + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). + os._exit(0) # Exit parent of the first child. + + # Close all open file descriptors. This prevents the child from keeping + # open any file descriptors inherited from the parent. There is a variety + # of methods to accomplish this task. Three are listed below. + # + # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum + # number of open file descriptors to close. If it doesn't exists, use + # the default value (configurable). + # + # try: + # maxfd = os.sysconf("SC_OPEN_MAX") + # except (AttributeError, ValueError): + # maxfd = MAXFD + # + # OR + # + # if (os.sysconf_names.has_key("SC_OPEN_MAX")): + # maxfd = os.sysconf("SC_OPEN_MAX") + # else: + # maxfd = MAXFD + # + # OR + # + # Use the getrlimit method to retrieve the maximum file descriptor number + # that can be opened by this process. If there is not limit on the + # resource, use the default value. + # + import resource # Resource usage information. + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + # Iterate through and close all file descriptors. + for fd in range(0, maxfd): + try: + os.close(fd) + except OSError: # ERROR, fd wasn't open to begin with (ignored) + pass + + # Redirect the standard I/O file descriptors to the specified file. Since + # the daemon has no controlling terminal, most daemons redirect stdin, + # stdout, and stderr to /dev/null. This is done to prevent side-effects + # from reads and writes to the standard I/O file descriptors. + + # This call to open is guaranteed to return the lowest file descriptor, + # which will be 0 (stdin), since it was closed above. + os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) + + # Duplicate standard input to standard output and standard error. + os.dup2(0, 1) # standard output (1) + os.dup2(0, 2) # standard error (2) + + return(0) + diff --git a/libs/decorator.py b/libs/decorator.py new file mode 100644 index 0000000..a7815ab --- /dev/null +++ b/libs/decorator.py @@ -0,0 +1,209 @@ +########################## LICENCE ############################### +## +## Copyright (c) 2005-2011, Michele Simionato +## All rights reserved. +## +## Redistributions of source code must retain the above copyright +## notice, this list of conditions and the following disclaimer. +## Redistributions in bytecode form must reproduce the above copyright +## notice, this list of conditions and the following disclaimer in +## the documentation and/or other materials provided with the +## distribution. + +## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +## DAMAGE. + +""" +Decorator module, see http://pypi.python.org/pypi/decorator +for the documentation. +""" + +__version__ = '3.3.0' + +__all__ = ["decorator", "FunctionMaker", "partial"] + +import sys, re, inspect + +try: + from functools import partial +except ImportError: # for Python version < 2.5 + class partial(object): + "A simple replacement of functools.partial" + def __init__(self, func, *args, **kw): + self.func = func + self.args = args + self.keywords = kw + def __call__(self, *otherargs, **otherkw): + kw = self.keywords.copy() + kw.update(otherkw) + return self.func(*(self.args + otherargs), **kw) + +if sys.version >= '3': + from inspect import getfullargspec +else: + class getfullargspec(object): + "A quick and dirty replacement for getfullargspec for Python 2.X" + def __init__(self, f): + self.args, self.varargs, self.varkw, self.defaults = \ + inspect.getargspec(f) + self.kwonlyargs = [] + self.kwonlydefaults = None + self.annotations = getattr(f, '__annotations__', {}) + def __iter__(self): + yield self.args + yield self.varargs + yield self.varkw + yield self.defaults + +DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(') + +# basic functionality +class FunctionMaker(object): + """ + An object with the ability to create functions with a given signature. + It has attributes name, doc, module, signature, defaults, dict and + methods update and make. + """ + def __init__(self, func=None, name=None, signature=None, + defaults=None, doc=None, module=None, funcdict=None): + self.shortsignature = signature + if func: + # func can be a class or a callable, but not an instance method + self.name = func.__name__ + if self.name == '': # small hack for lambda functions + self.name = '_lambda_' + self.doc = func.__doc__ + self.module = func.__module__ + if inspect.isfunction(func): + argspec = getfullargspec(func) + for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', + 'kwonlydefaults', 'annotations'): + setattr(self, a, getattr(argspec, a)) + for i, arg in enumerate(self.args): + setattr(self, 'arg%d' % i, arg) + self.signature = inspect.formatargspec( + formatvalue=lambda val: "", *argspec)[1:-1] + allargs = list(self.args) + if self.varargs: + allargs.append('*' + self.varargs) + if self.varkw: + allargs.append('**' + self.varkw) + try: + self.shortsignature = ', '.join(allargs) + except TypeError: # exotic signature, valid only in Python 2.X + self.shortsignature = self.signature + self.dict = func.__dict__.copy() + # func=None happens when decorating a caller + if name: + self.name = name + if signature is not None: + self.signature = signature + if defaults: + self.defaults = defaults + if doc: + self.doc = doc + if module: + self.module = module + if funcdict: + self.dict = funcdict + # check existence required attributes + assert hasattr(self, 'name') + if not hasattr(self, 'signature'): + raise TypeError('You are decorating a non function: %s' % func) + + def update(self, func, **kw): + "Update the signature of func with the data in self" + func.__name__ = self.name + func.__doc__ = getattr(self, 'doc', None) + func.__dict__ = getattr(self, 'dict', {}) + func.func_defaults = getattr(self, 'defaults', ()) + callermodule = sys._getframe(3).f_globals.get('__name__', '?') + func.__module__ = getattr(self, 'module', callermodule) + func.__dict__.update(kw) + + def make(self, src_templ, evaldict=None, addsource=False, **attrs): + "Make a new function from a given template and update the signature" + src = src_templ % vars(self) # expand name and signature + evaldict = evaldict or {} + mo = DEF.match(src) + if mo is None: + raise SyntaxError('not a valid function template\n%s' % src) + name = mo.group(1) # extract the function name + names = set([name] + [arg.strip(' *') for arg in + self.shortsignature.split(',')]) + for n in names: + if n in ('_func_', '_call_'): + raise NameError('%s is overridden in\n%s' % (n, src)) + if not src.endswith('\n'): # add a newline just for safety + src += '\n' # this is needed in old versions of Python + try: + code = compile(src, '', 'single') + # print >> sys.stderr, 'Compiling %s' % src + exec code in evaldict + except: + print >> sys.stderr, 'Error in generated code:' + print >> sys.stderr, src + raise + func = evaldict[name] + if addsource: + attrs['__source__'] = src + self.update(func, **attrs) + return func + + @classmethod + def create(cls, obj, body, evaldict, defaults=None, + doc=None, module=None, addsource=True,**attrs): + """ + Create a function from the strings name, signature and body. + evaldict is the evaluation dictionary. If addsource is true an attribute + __source__ is added to the result. The attributes attrs are added, + if any. + """ + if isinstance(obj, str): # "name(signature)" + name, rest = obj.strip().split('(', 1) + signature = rest[:-1] #strip a right parens + func = None + else: # a function + name = None + signature = None + func = obj + self = cls(func, name, signature, defaults, doc, module) + ibody = '\n'.join(' ' + line for line in body.splitlines()) + return self.make('def %(name)s(%(signature)s):\n' + ibody, + evaldict, addsource, **attrs) + +def decorator(caller, func=None): + """ + decorator(caller) converts a caller function into a decorator; + decorator(caller, func) decorates a function using a caller. + """ + if func is not None: # returns a decorated function + evaldict = func.func_globals.copy() + evaldict['_call_'] = caller + evaldict['_func_'] = func + return FunctionMaker.create( + func, "return _call_(_func_, %(shortsignature)s)", + evaldict, undecorated=func) + else: # returns a decorator + if isinstance(caller, partial): + return partial(decorator, caller) + # otherwise assume caller is a function + first = inspect.getargspec(caller)[0][0] # first arg + evaldict = caller.func_globals.copy() + evaldict['_call_'] = caller + evaldict['decorator'] = decorator + return FunctionMaker.create( + '%s(%s)' % (caller.__name__, first), + 'return decorator(_call_, %s)' % first, + evaldict, undecorated=caller, + doc=caller.__doc__, module=caller.__module__) diff --git a/libs/elixir/__init__.py b/libs/elixir/__init__.py new file mode 100644 index 0000000..e92ad7f --- /dev/null +++ b/libs/elixir/__init__.py @@ -0,0 +1,119 @@ +''' +Elixir package + +A declarative layer on top of the `SQLAlchemy library +`_. It is a fairly thin wrapper, which provides +the ability to create simple Python classes that map directly to relational +database tables (this pattern is often referred to as the Active Record design +pattern), providing many of the benefits of traditional databases +without losing the convenience of Python objects. + +Elixir is intended to replace the ActiveMapper SQLAlchemy extension, and the +TurboEntity project but does not intend to replace SQLAlchemy's core features, +and instead focuses on providing a simpler syntax for defining model objects +when you do not need the full expressiveness of SQLAlchemy's manual mapper +definitions. +''' + +try: + set +except NameError: + from sets import Set as set + +import sqlalchemy +from sqlalchemy.types import * + +from elixir.options import using_options, using_table_options, \ + using_mapper_options, options_defaults, \ + using_options_defaults +from elixir.entity import Entity, EntityBase, EntityMeta, EntityDescriptor, \ + setup_entities, cleanup_entities +from elixir.fields import has_field, Field +from elixir.relationships import belongs_to, has_one, has_many, \ + has_and_belongs_to_many, \ + ManyToOne, OneToOne, OneToMany, ManyToMany +from elixir.properties import has_property, GenericProperty, ColumnProperty, \ + Synonym +from elixir.statements import Statement +from elixir.collection import EntityCollection, GlobalEntityCollection + + +__version__ = '0.7.1' + +__all__ = ['Entity', 'EntityBase', 'EntityMeta', 'EntityCollection', + 'entities', + 'Field', 'has_field', + 'has_property', 'GenericProperty', 'ColumnProperty', 'Synonym', + 'belongs_to', 'has_one', 'has_many', 'has_and_belongs_to_many', + 'ManyToOne', 'OneToOne', 'OneToMany', 'ManyToMany', + 'using_options', 'using_table_options', 'using_mapper_options', + 'options_defaults', 'using_options_defaults', + 'metadata', 'session', + 'create_all', 'drop_all', + 'setup_all', 'cleanup_all', + 'setup_entities', 'cleanup_entities'] + \ + sqlalchemy.types.__all__ + +__doc_all__ = ['create_all', 'drop_all', + 'setup_all', 'cleanup_all', + 'metadata', 'session'] + +# default session +session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker()) + +# default metadata +metadata = sqlalchemy.MetaData() + +metadatas = set() + +# default entity collection +entities = GlobalEntityCollection() + + +def create_all(*args, **kwargs): + '''Create the necessary tables for all declared entities''' + for md in metadatas: + md.create_all(*args, **kwargs) + + +def drop_all(*args, **kwargs): + '''Drop tables for all declared entities''' + for md in metadatas: + md.drop_all(*args, **kwargs) + + +def setup_all(create_tables=False, *args, **kwargs): + '''Setup the table and mapper of all entities in the default entity + collection. + + This is called automatically if any entity of the collection is configured + with the `autosetup` option and it is first accessed, + instanciated (called) or the create_all method of a metadata containing + tables from any of those entities is called. + ''' + setup_entities(entities) + + # issue the "CREATE" SQL statements + if create_tables: + create_all(*args, **kwargs) + + +def cleanup_all(drop_tables=False, *args, **kwargs): + '''Clear all mappers, clear the session, and clear all metadatas. + Optionally drops the tables. + ''' + session.close() + + cleanup_entities(entities) + + sqlalchemy.orm.clear_mappers() + entities.clear() + + if drop_tables: + drop_all(*args, **kwargs) + + for md in metadatas: + md.clear() + metadatas.clear() + + diff --git a/libs/elixir/collection.py b/libs/elixir/collection.py new file mode 100644 index 0000000..7ba8b66 --- /dev/null +++ b/libs/elixir/collection.py @@ -0,0 +1,127 @@ +''' +Default entity collection implementation +''' +import sys +import re + +from elixir.py23compat import rsplit + +class BaseCollection(list): + def __init__(self, entities=None): + list.__init__(self) + if entities is not None: + self.extend(entities) + + def extend(self, entities): + for e in entities: + self.append(e) + + def clear(self): + del self[:] + + def resolve_absolute(self, key, full_path, entity=None, root=None): + if root is None: + root = entity._descriptor.resolve_root + if root: + full_path = '%s.%s' % (root, full_path) + module_path, classname = rsplit(full_path, '.', 1) + module = sys.modules[module_path] + res = getattr(module, classname, None) + if res is None: + if entity is not None: + raise Exception("Couldn't resolve target '%s' <%s> in '%s'!" + % (key, full_path, entity.__name__)) + else: + raise Exception("Couldn't resolve target '%s' <%s>!" + % (key, full_path)) + return res + + def __getattr__(self, key): + return self.resolve(key) + +# default entity collection +class GlobalEntityCollection(BaseCollection): + def __init__(self, entities=None): + # _entities is a dict of entities keyed on their name. + self._entities = {} + super(GlobalEntityCollection, self).__init__(entities) + + def append(self, entity): + ''' + Add an entity to the collection. + ''' + super(EntityCollection, self).append(entity) + + existing_entities = self._entities.setdefault(entity.__name__, []) + existing_entities.append(entity) + + def resolve(self, key, entity=None): + ''' + Resolve a key to an Entity. The optional `entity` argument is the + "source" entity when resolving relationship targets. + ''' + # Do we have a fully qualified entity name? + if '.' in key: + return self.resolve_absolute(key, key, entity) + else: + # Otherwise we look in the entities of this collection + res = self._entities.get(key, None) + if res is None: + if entity: + raise Exception("Couldn't resolve target '%s' in '%s'" + % (key, entity.__name__)) + else: + raise Exception("This collection does not contain any " + "entity corresponding to the key '%s'!" + % key) + elif len(res) > 1: + raise Exception("'%s' resolves to several entities, you should" + " use the full path (including the full module" + " name) to that entity." % key) + else: + return res[0] + + def clear(self): + self._entities = {} + super(GlobalEntityCollection, self).clear() + +# backward compatible name +EntityCollection = GlobalEntityCollection + +_leading_dots = re.compile('^([.]*).*$') + +class RelativeEntityCollection(BaseCollection): + # the entity=None does not make any sense with a relative entity collection + def resolve(self, key, entity): + ''' + Resolve a key to an Entity. The optional `entity` argument is the + "source" entity when resolving relationship targets. + ''' + full_path = key + + if '.' not in key or key.startswith('.'): + # relative target + + # any leading dot is stripped and with each dot removed, + # the entity_module is stripped of one more chunk (starting with + # the last one). + num_dots = _leading_dots.match(full_path).end(1) + full_path = full_path[num_dots:] + chunks = entity.__module__.split('.') + chunkstokeep = len(chunks) - num_dots + if chunkstokeep < 0: + raise Exception("Couldn't resolve relative target " + "'%s' relative to '%s'" % (key, entity.__module__)) + entity_module = '.'.join(chunks[:chunkstokeep]) + + if entity_module and entity_module is not '__main__': + full_path = '%s.%s' % (entity_module, full_path) + + root = '' + else: + root = None + return self.resolve_absolute(key, full_path, entity, root=root) + + def __getattr__(self, key): + raise NotImplementedError + diff --git a/libs/elixir/entity.py b/libs/elixir/entity.py new file mode 100644 index 0000000..5057457 --- /dev/null +++ b/libs/elixir/entity.py @@ -0,0 +1,1172 @@ +''' +This module provides the ``Entity`` base class, as well as its metaclass +``EntityMeta``. +''' + +from py23compat import sorted + +import sys +import types +import warnings + +from copy import deepcopy + +import sqlalchemy +from sqlalchemy import Table, Column, Integer, desc, ForeignKey, and_, \ + ForeignKeyConstraint +from sqlalchemy.orm import MapperExtension, mapper, object_session, \ + EXT_CONTINUE, polymorphic_union, ScopedSession, \ + ColumnProperty +from sqlalchemy.sql import ColumnCollection + +import elixir +from elixir.statements import process_mutators, MUTATORS +from elixir import options +from elixir.properties import Property + +DEBUG = False +try: + from sqlalchemy.orm import EXT_PASS + SA05orlater = False +except ImportError: + SA05orlater = True + +__doc_all__ = ['Entity', 'EntityMeta'] + + +def session_mapper_factory(scoped_session): + def session_mapper(cls, *args, **kwargs): + if kwargs.pop('save_on_init', True): + old_init = cls.__init__ + def __init__(self, *args, **kwargs): + old_init(self, *args, **kwargs) + scoped_session.add(self) + cls.__init__ = __init__ + cls.query = scoped_session.query_property() + return mapper(cls, *args, **kwargs) + return session_mapper + + +class EntityDescriptor(object): + ''' + EntityDescriptor describes fields and options needed for table creation. + ''' + + def __init__(self, entity): + self.entity = entity + self.parent = None + + bases = [] + for base in entity.__bases__: + if isinstance(base, EntityMeta): + if is_entity(base) and not is_abstract_entity(base): + if self.parent: + raise Exception( + '%s entity inherits from several entities, ' + 'and this is not supported.' + % self.entity.__name__) + else: + self.parent = base + bases.extend(base._descriptor.bases) + self.parent._descriptor.children.append(entity) + else: + bases.append(base) + self.bases = bases + if not is_entity(entity) or is_abstract_entity(entity): + return + + # entity.__module__ is not always reliable (eg in mod_python) + self.module = sys.modules.get(entity.__module__) + + self.builders = [] + + #XXX: use entity.__subclasses__ ? + self.children = [] + + # used for multi-table inheritance + self.join_condition = None + self.has_pk = False + self._pk_col_done = False + + # columns and constraints waiting for a table to exist + self._columns = ColumnCollection() + self.constraints = [] + + # properties (it is only useful for checking dupe properties at the + # moment, and when adding properties before the mapper is created, + # which shouldn't happen). + self.properties = {} + + # + self.relationships = [] + + # set default value for options + self.table_args = [] + + # base class(es) options_defaults + options_defaults = self.options_defaults() + + complete_defaults = options.options_defaults.copy() + complete_defaults.update({ + 'metadata': elixir.metadata, + 'session': elixir.session, + 'collection': elixir.entities + }) + + # set default value for other options + for key in options.valid_options: + value = options_defaults.get(key, complete_defaults[key]) + if isinstance(value, dict): + value = value.copy() + setattr(self, key, value) + + # override options with module-level defaults defined + for key in ('metadata', 'session', 'collection'): + attr = '__%s__' % key + if hasattr(self.module, attr): + setattr(self, key, getattr(self.module, attr)) + + def options_defaults(self): + base_defaults = {} + for base in self.bases: + base_defaults.update(base._descriptor.options_defaults()) + base_defaults.update(getattr(self.entity, 'options_defaults', {})) + return base_defaults + + def setup_options(self): + ''' + Setup any values that might depend on the "using_options" class + mutator. For example, the tablename or the metadata. + ''' + elixir.metadatas.add(self.metadata) + if self.collection is not None: + self.collection.append(self.entity) + + entity = self.entity + if self.parent: + if self.inheritance == 'single': + self.tablename = self.parent._descriptor.tablename + + if not self.tablename: + if self.shortnames: + self.tablename = entity.__name__.lower() + else: + modulename = entity.__module__.replace('.', '_') + tablename = "%s_%s" % (modulename, entity.__name__) + self.tablename = tablename.lower() + elif hasattr(self.tablename, '__call__'): + self.tablename = self.tablename(entity) + + if not self.identity: + if 'polymorphic_identity' in self.mapper_options: + self.identity = self.mapper_options['polymorphic_identity'] + else: + #TODO: include module name (We could have b.Account inherit + # from a.Account) + self.identity = entity.__name__.lower() + elif 'polymorphic_identity' in self.mapper_options: + raise Exception('You cannot use the "identity" option and the ' + 'polymorphic_identity mapper option at the same ' + 'time.') + elif hasattr(self.identity, '__call__'): + self.identity = self.identity(entity) + + if self.polymorphic: + if not isinstance(self.polymorphic, basestring): + self.polymorphic = options.DEFAULT_POLYMORPHIC_COL_NAME + + #--------------------- + # setup phase methods + + def setup_autoload_table(self): + self.setup_table(True) + + def create_pk_cols(self): + """ + Create primary_key columns. That is, call the 'create_pk_cols' + builders then add a primary key to the table if it hasn't already got + one and needs one. + + This method is "semi-recursive" in some cases: it calls the + create_keys method on ManyToOne relationships and those in turn call + create_pk_cols on their target. It shouldn't be possible to have an + infinite loop since a loop of primary_keys is not a valid situation. + """ + if self._pk_col_done: + return + + self.call_builders('create_pk_cols') + + if not self.autoload: + if self.parent: + if self.inheritance == 'multi': + # Add columns with foreign keys to the parent's primary + # key columns + parent_desc = self.parent._descriptor + tablename = parent_desc.table_fullname + join_clauses = [] + for pk_col in parent_desc.primary_keys: + colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \ + {'entity': self.parent.__name__.lower(), + 'key': pk_col.key} + + # It seems like SA ForeignKey is not happy being given + # a real column object when said column is not yet + # attached to a table + pk_col_name = "%s.%s" % (tablename, pk_col.key) + fk = ForeignKey(pk_col_name, ondelete='cascade') + col = Column(colname, pk_col.type, fk, + primary_key=True) + self.add_column(col) + join_clauses.append(col == pk_col) + self.join_condition = and_(*join_clauses) + elif self.inheritance == 'concrete': + # Copy primary key columns from the parent. + for col in self.parent._descriptor.columns: + if col.primary_key: + self.add_column(col.copy()) + elif not self.has_pk and self.auto_primarykey: + if isinstance(self.auto_primarykey, basestring): + colname = self.auto_primarykey + else: + colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME + + self.add_column( + Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE, + primary_key=True)) + self._pk_col_done = True + + def setup_relkeys(self): + self.call_builders('create_non_pk_cols') + + def before_table(self): + self.call_builders('before_table') + + def setup_table(self, only_autoloaded=False): + ''' + Create a SQLAlchemy table-object with all columns that have been + defined up to this point. + ''' + if self.entity.table is not None: + return + + if self.autoload != only_autoloaded: + return + + kwargs = self.table_options + if self.autoload: + args = self.table_args + kwargs['autoload'] = True + else: + if self.parent: + if self.inheritance == 'single': + # we know the parent is setup before the child + self.entity.table = self.parent.table + + # re-add the entity columns to the parent entity so that + # they are added to the parent's table (whether the + # parent's table is already setup or not). + for col in self._columns: + self.parent._descriptor.add_column(col) + for constraint in self.constraints: + self.parent._descriptor.add_constraint(constraint) + return + elif self.inheritance == 'concrete': + #TODO: we should also copy columns from the parent table + # if the parent is a base (abstract?) entity (whatever the + # inheritance type -> elif will need to be changed) + + # Copy all non-primary key columns from parent table + # (primary key columns have already been copied earlier). + for col in self.parent._descriptor.columns: + if not col.primary_key: + self.add_column(col.copy()) + + for con in self.parent._descriptor.constraints: + self.add_constraint( + ForeignKeyConstraint( + [e.parent.key for e in con.elements], + [e._get_colspec() for e in con.elements], + name=con.name, #TODO: modify it + onupdate=con.onupdate, ondelete=con.ondelete, + use_alter=con.use_alter)) + + if self.polymorphic and \ + self.inheritance in ('single', 'multi') and \ + self.children and not self.parent: + self.add_column(Column(self.polymorphic, + options.POLYMORPHIC_COL_TYPE)) + + if self.version_id_col: + if not isinstance(self.version_id_col, basestring): + self.version_id_col = options.DEFAULT_VERSION_ID_COL_NAME + self.add_column(Column(self.version_id_col, Integer)) + + args = list(self.columns) + self.constraints + self.table_args + self.entity.table = Table(self.tablename, self.metadata, + *args, **kwargs) + if DEBUG: + print self.entity.table.repr2() + + def setup_reltables(self): + self.call_builders('create_tables') + + def after_table(self): + self.call_builders('after_table') + + def setup_events(self): + def make_proxy_method(methods): + def proxy_method(self, mapper, connection, instance): + for func in methods: + ret = func(instance) + # I couldn't commit myself to force people to + # systematicaly return EXT_CONTINUE in all their event + # methods. + # But not doing that diverge to how SQLAlchemy works. + # I should try to convince Mike to do EXT_CONTINUE by + # default, and stop processing as the special case. +# if ret != EXT_CONTINUE: + if ret is not None and ret != EXT_CONTINUE: + return ret + return EXT_CONTINUE + return proxy_method + + # create a list of callbacks for each event + methods = {} + + all_methods = getmembers(self.entity, + lambda a: isinstance(a, types.MethodType)) + + for name, method in all_methods: + for event in getattr(method, '_elixir_events', []): + event_methods = methods.setdefault(event, []) + event_methods.append(method) + + if not methods: + return + + # transform that list into methods themselves + for event in methods: + methods[event] = make_proxy_method(methods[event]) + + # create a custom mapper extension class, tailored to our entity + ext = type('EventMapperExtension', (MapperExtension,), methods)() + + # then, make sure that the entity's mapper has our mapper extension + self.add_mapper_extension(ext) + + def before_mapper(self): + self.call_builders('before_mapper') + + def _get_children(self): + children = self.children[:] + for child in self.children: + children.extend(child._descriptor._get_children()) + return children + + def translate_order_by(self, order_by): + if isinstance(order_by, basestring): + order_by = [order_by] + + order = [] + for colname in order_by: + col = self.get_column(colname.strip('-')) + if colname.startswith('-'): + col = desc(col) + order.append(col) + return order + + def setup_mapper(self): + ''' + Initializes and assign a mapper to the entity. + At this point the mapper will usually have no property as they are + added later. + ''' + if self.entity.mapper: + return + + # for now we don't support the "abstract" parent class in a concrete + # inheritance scenario as demonstrated in + # sqlalchemy/test/orm/inheritance/concrete.py + # this should be added along other + kwargs = {} + if self.order_by: + kwargs['order_by'] = self.translate_order_by(self.order_by) + + if self.version_id_col: + kwargs['version_id_col'] = self.get_column(self.version_id_col) + + if self.inheritance in ('single', 'concrete', 'multi'): + if self.parent and \ + (self.inheritance != 'concrete' or self.polymorphic): + # non-polymorphic concrete doesn't need this + kwargs['inherits'] = self.parent.mapper + + if self.inheritance == 'multi' and self.parent: + kwargs['inherit_condition'] = self.join_condition + + if self.polymorphic: + if self.children: + if self.inheritance == 'concrete': + keys = [(self.identity, self.entity.table)] + keys.extend([(child._descriptor.identity, child.table) + for child in self._get_children()]) + # Having the same alias name for an entity and one of + # its child (which is a parent itself) shouldn't cause + # any problem because the join shouldn't be used at + # the same time. But in reality, some versions of SA + # do misbehave on this. Since it doesn't hurt to have + # different names anyway, here they go. + pjoin = polymorphic_union( + dict(keys), self.polymorphic, + 'pjoin_%s' % self.identity) + + kwargs['with_polymorphic'] = ('*', pjoin) + kwargs['polymorphic_on'] = \ + getattr(pjoin.c, self.polymorphic) + elif not self.parent: + kwargs['polymorphic_on'] = \ + self.get_column(self.polymorphic) + + if self.children or self.parent: + kwargs['polymorphic_identity'] = self.identity + + if self.parent and self.inheritance == 'concrete': + kwargs['concrete'] = True + + if self.parent and self.inheritance == 'single': + args = [] + else: + args = [self.entity.table] + + # let user-defined kwargs override Elixir-generated ones, though that's + # not very usefull since most of them expect Column instances. + kwargs.update(self.mapper_options) + + #TODO: document this! + if 'primary_key' in kwargs: + cols = self.entity.table.c + kwargs['primary_key'] = [getattr(cols, colname) for + colname in kwargs['primary_key']] + + # do the mapping + if self.session is None: + self.entity.mapper = mapper(self.entity, *args, **kwargs) + elif isinstance(self.session, ScopedSession): + session_mapper = session_mapper_factory(self.session) + self.entity.mapper = session_mapper(self.entity, *args, **kwargs) + else: + raise Exception("Failed to map entity '%s' with its table or " + "selectable. You can only bind an Entity to a " + "ScopedSession object or None for manual session " + "management." + % self.entity.__name__) + + def after_mapper(self): + self.call_builders('after_mapper') + + def setup_properties(self): + self.call_builders('create_properties') + + def finalize(self): + self.call_builders('finalize') + self.entity._setup_done = True + + #---------------- + # helper methods + + def call_builders(self, what): + for builder in self.builders: + if hasattr(builder, what): + getattr(builder, what)() + + def add_column(self, col, check_duplicate=None): + '''when check_duplicate is None, the value of the allowcoloverride + option of the entity is used. + ''' + if check_duplicate is None: + check_duplicate = not self.allowcoloverride + + if col.key in self._columns: + if check_duplicate: + raise Exception("Column '%s' already exist in '%s' ! " % + (col.key, self.entity.__name__)) + else: + del self._columns[col.key] + self._columns.add(col) + + if col.primary_key: + self.has_pk = True + + # Autosetup triggers shouldn't be active anymore at this point, so we + # can theoretically access the entity's table safely. But the problem + # is that if, for some reason, the trigger removal phase didn't + # happen, we'll get an infinite loop. So we just make sure we don't + # get one in any case. + table = type.__getattribute__(self.entity, 'table') + if table is not None: + if check_duplicate and col.key in table.columns.keys(): + raise Exception("Column '%s' already exist in table '%s' ! " % + (col.key, table.name)) + table.append_column(col) + if DEBUG: + print "table.append_column(%s)" % col + + def add_constraint(self, constraint): + self.constraints.append(constraint) + + table = self.entity.table + if table is not None: + table.append_constraint(constraint) + + def add_property(self, name, property, check_duplicate=True): + if check_duplicate and name in self.properties: + raise Exception("property '%s' already exist in '%s' ! " % + (name, self.entity.__name__)) + self.properties[name] = property + +#FIXME: something like this is needed to propagate the relationships from +# parent entities to their children in a concrete inheritance scenario. But +# this doesn't work because of the backref matching code. In most case +# (test_concrete.py) it doesn't even happen at all. +# if self.children and self.inheritance == 'concrete': +# for child in self.children: +# child._descriptor.add_property(name, property) + + mapper = self.entity.mapper + if mapper: + mapper.add_property(name, property) + if DEBUG: + print "mapper.add_property('%s', %s)" % (name, repr(property)) + + def add_mapper_extension(self, extension): + extensions = self.mapper_options.get('extension', []) + if not isinstance(extensions, list): + extensions = [extensions] + extensions.append(extension) + self.mapper_options['extension'] = extensions + + def get_column(self, key, check_missing=True): + #TODO: this needs to work whether the table is already setup or not + #TODO: support SA table/autoloaded entity + try: + return self.columns[key] + except KeyError: + if check_missing: + raise Exception("No column named '%s' found in the table of " + "the '%s' entity!" + % (key, self.entity.__name__)) + + def get_inverse_relation(self, rel, check_reverse=True): + ''' + Return the inverse relation of rel, if any, None otherwise. + ''' + + matching_rel = None + for other_rel in self.relationships: + if rel.is_inverse(other_rel): + if matching_rel is None: + matching_rel = other_rel + else: + raise Exception( + "Several relations match as inverse of the '%s' " + "relation in entity '%s'. You should specify " + "inverse relations manually by using the inverse " + "keyword." + % (rel.name, rel.entity.__name__)) + # When a matching inverse is found, we check that it has only + # one relation matching as its own inverse. We don't need the result + # of the method though. But we do need to be careful not to start an + # infinite recursive loop. + if matching_rel and check_reverse: + rel.entity._descriptor.get_inverse_relation(matching_rel, False) + + return matching_rel + + def find_relationship(self, name): + for rel in self.relationships: + if rel.name == name: + return rel + if self.parent: + return self.parent._descriptor.find_relationship(name) + else: + return None + + #------------------------ + # some useful properties + + def table_fullname(self): + ''' + Complete name of the table for the related entity. + Includes the schema name if there is one specified. + ''' + schema = self.table_options.get('schema', None) + if schema is not None: + return "%s.%s" % (schema, self.tablename) + else: + return self.tablename + table_fullname = property(table_fullname) + + def columns(self): + if self.entity.table is not None: + return self.entity.table.columns + else: + #FIXME: depending on the type of inheritance, we should also + # return the parent entity's columns (for example for order_by + # using a column defined in the parent. + return self._columns + columns = property(columns) + + def primary_keys(self): + """ + Returns the list of primary key columns of the entity. + + This property isn't valid before the "create_pk_cols" phase. + """ + if self.autoload: + return [col for col in self.entity.table.primary_key.columns] + else: + if self.parent and self.inheritance == 'single': + return self.parent._descriptor.primary_keys + else: + return [col for col in self.columns if col.primary_key] + primary_keys = property(primary_keys) + + def table(self): + if self.entity.table is not None: + return self.entity.table + else: + return FakeTable(self) + table = property(table) + + def primary_key_properties(self): + """ + Returns the list of (mapper) properties corresponding to the primary + key columns of the table of the entity. + + This property caches its value, so it shouldn't be called before the + entity is fully set up. + """ + if not hasattr(self, '_pk_props'): + col_to_prop = {} + mapper = self.entity.mapper + for prop in mapper.iterate_properties: + if isinstance(prop, ColumnProperty): + for col in prop.columns: + for col in col.proxy_set: + col_to_prop[col] = prop + pk_cols = [c for c in mapper.mapped_table.c if c.primary_key] + self._pk_props = [col_to_prop[c] for c in pk_cols] + return self._pk_props + primary_key_properties = property(primary_key_properties) + +class FakePK(object): + def __init__(self, descriptor): + self.descriptor = descriptor + + def columns(self): + return self.descriptor.primary_keys + columns = property(columns) + +class FakeTable(object): + def __init__(self, descriptor): + self.descriptor = descriptor + self.primary_key = FakePK(descriptor) + + def columns(self): + return self.descriptor.columns + columns = property(columns) + + def fullname(self): + ''' + Complete name of the table for the related entity. + Includes the schema name if there is one specified. + ''' + schema = self.descriptor.table_options.get('schema', None) + if schema is not None: + return "%s.%s" % (schema, self.descriptor.tablename) + else: + return self.descriptor.tablename + fullname = property(fullname) + + +class TriggerProxy(object): + """ + A class that serves as a "trigger" ; accessing its attributes runs + the setup_all function. + + Note that the `setup_all` is called on each access of the attribute. + """ + + def __init__(self, class_, attrname): + self.class_ = class_ + self.attrname = attrname + + def __getattr__(self, name): + elixir.setup_all() + #FIXME: it's possible to get an infinite loop here if setup_all doesn't + #remove the triggers for this entity. This can happen if the entity is + #not in the `entities` list for some reason. + proxied_attr = getattr(self.class_, self.attrname) + return getattr(proxied_attr, name) + + def __repr__(self): + proxied_attr = getattr(self.class_, self.attrname) + return "" % (self.class_.__name__) + + +class TriggerAttribute(object): + + def __init__(self, attrname): + self.attrname = attrname + + def __get__(self, instance, owner): + #FIXME: it's possible to get an infinite loop here if setup_all doesn't + #remove the triggers for this entity. This can happen if the entity is + #not in the `entities` list for some reason. + elixir.setup_all() + return getattr(owner, self.attrname) + +def is_entity(cls): + """ + Scan the bases classes of `cls` to see if any is an instance of + EntityMeta. If we don't find any, it means it is either an unrelated class + or an entity base class (like the 'Entity' class). + """ + for base in cls.__bases__: + if isinstance(base, EntityMeta): + return True + return False + + +# Note that we don't use inspect.getmembers because of +# http://bugs.python.org/issue1785 +# See also http://elixir.ematia.de/trac/changeset/262 +def getmembers(object, predicate=None): + base_props = [] + for key in dir(object): + try: + value = getattr(object, key) + except AttributeError: + continue + if not predicate or predicate(value): + base_props.append((key, value)) + return base_props + +def is_abstract_entity(dict_or_cls): + if not isinstance(dict_or_cls, dict): + dict_or_cls = dict_or_cls.__dict__ + for mutator, args, kwargs in dict_or_cls.get(MUTATORS, []): + if 'abstract' in kwargs: + return kwargs['abstract'] + + return False + +def instrument_class(cls): + """ + Instrument a class as an Entity. This is usually done automatically through + the EntityMeta metaclass. + """ + # Create the entity descriptor + desc = cls._descriptor = EntityDescriptor(cls) + + # Process mutators + # We *do* want mutators to be processed for base/abstract classes + # (so that statements like using_options_defaults work). + process_mutators(cls) + + # We do not want to do any more processing for base/abstract classes + # (Entity et al.). + if not is_entity(cls) or is_abstract_entity(cls): + return + + cls.table = None + cls.mapper = None + + # Copy the properties ('Property' instances) of the entity base class(es). + # We use getmembers (instead of __dict__) so that we also get the + # properties from the parents of the base class if any. + base_props = [] + for base in cls.__bases__: + if isinstance(base, EntityMeta) and \ + (not is_entity(base) or is_abstract_entity(base)): + base_props += [(name, deepcopy(attr)) for name, attr in + getmembers(base, lambda a: isinstance(a, Property))] + + # Process attributes (using the assignment syntax), looking for + # 'Property' instances and attaching them to this entity. + properties = [(name, attr) for name, attr in cls.__dict__.iteritems() + if isinstance(attr, Property)] + sorted_props = sorted(base_props + properties, + key=lambda i: i[1]._counter) + for name, prop in sorted_props: + prop.attach(cls, name) + + # setup misc options here (like tablename etc.) + desc.setup_options() + + # create trigger proxies + # TODO: support entity_name... It makes sense only for autoloaded + # tables for now, and would make more sense if we support "external" + # tables + if desc.autosetup: + _install_autosetup_triggers(cls) + + +class EntityMeta(type): + """ + Entity meta class. + You should only use it directly if you want to define your own base class + for your entities (ie you don't want to use the provided 'Entity' class). + """ + + def __init__(cls, name, bases, dict_): + instrument_class(cls) + + def __call__(cls, *args, **kwargs): + if cls._descriptor.autosetup and not hasattr(cls, '_setup_done'): + elixir.setup_all() + return type.__call__(cls, *args, **kwargs) + + def __setattr__(cls, key, value): + if isinstance(value, Property): + if hasattr(cls, '_setup_done'): + raise Exception('Cannot set attribute on a class after ' + 'setup_all') + else: + value.attach(cls, key) + else: + type.__setattr__(cls, key, value) + + +def _install_autosetup_triggers(cls, entity_name=None): + #TODO: move as much as possible of those "_private" values to the + # descriptor, so that we don't mess the initial class. + warnings.warn("The 'autosetup' option on entities is deprecated. " + "Please call setup_all() manually after all your entities have been " + "declared.", DeprecationWarning, stacklevel=4) + tablename = cls._descriptor.tablename + schema = cls._descriptor.table_options.get('schema', None) + cls._table_key = sqlalchemy.schema._get_table_key(tablename, schema) + + table_proxy = TriggerProxy(cls, 'table') + + md = cls._descriptor.metadata + md.tables[cls._table_key] = table_proxy + + # We need to monkeypatch the metadata's table iterator method because + # otherwise it doesn't work if the setup is triggered by the + # metadata.create_all(). + # This is because ManyToMany relationships add tables AFTER the list + # of tables that are going to be created is "computed" + # (metadata.tables.values()). + # see: + # - table_iterator method in MetaData class in sqlalchemy/schema.py + # - visit_metadata method in sqlalchemy/ansisql.py + if SA05orlater: + warnings.warn( + "The automatic setup via metadata.create_all() through " + "the autosetup option doesn't work with SQLAlchemy 0.5 and later!") + else: + # SA 0.6 does not use table_iterator anymore (it was already deprecated + # since SA 0.5.0) + original_table_iterator = md.table_iterator + if not hasattr(original_table_iterator, + '_non_elixir_patched_iterator'): + def table_iterator(*args, **kwargs): + elixir.setup_all() + return original_table_iterator(*args, **kwargs) + table_iterator.__doc__ = original_table_iterator.__doc__ + table_iterator._non_elixir_patched_iterator = \ + original_table_iterator + md.table_iterator = table_iterator + + #TODO: we might want to add all columns that will be available as + #attributes on the class itself (in SA 0.4+). This is a pretty + #rare usecase, as people will normally hit the query attribute before the + #column attributes, but I've seen people hitting this problem... + for name in ('c', 'table', 'mapper', 'query'): + setattr(cls, name, TriggerAttribute(name)) + + cls._has_triggers = True + + +def _cleanup_autosetup_triggers(cls): + if not hasattr(cls, '_has_triggers'): + return + + for name in ('table', 'mapper'): + setattr(cls, name, None) + + for name in ('c', 'query'): + delattr(cls, name) + + desc = cls._descriptor + md = desc.metadata + + # the fake table could have already been removed (namely in a + # single table inheritance scenario) + md.tables.pop(cls._table_key, None) + + # restore original table iterator if not done already + if not SA05orlater: + if hasattr(md.table_iterator, '_non_elixir_patched_iterator'): + md.table_iterator = \ + md.table_iterator._non_elixir_patched_iterator + + del cls._has_triggers + + +def setup_entities(entities): + '''Setup all entities in the list passed as argument''' + + for entity in entities: + # delete all Elixir properties so that it doesn't interfere with + # SQLAlchemy. At this point they should have be converted to + # builders. + for name, attr in entity.__dict__.items(): + if isinstance(attr, Property): + delattr(entity, name) + + if entity._descriptor.autosetup: + _cleanup_autosetup_triggers(entity) + + for method_name in ( + 'setup_autoload_table', 'create_pk_cols', 'setup_relkeys', + 'before_table', 'setup_table', 'setup_reltables', 'after_table', + 'setup_events', + 'before_mapper', 'setup_mapper', 'after_mapper', + 'setup_properties', + 'finalize'): +# if DEBUG: +# print "=" * 40 +# print method_name +# print "=" * 40 + for entity in entities: +# print entity.__name__, "...", + if hasattr(entity, '_setup_done'): +# print "already done" + continue + method = getattr(entity._descriptor, method_name) + method() +# print "ok" + + +def cleanup_entities(entities): + """ + Try to revert back the list of entities passed as argument to the state + they had just before their setup phase. It will not work entirely for + autosetup entities as we need to remove the autosetup triggers. + + As of now, this function is *not* functional in that it doesn't revert to + the exact same state the entities were before setup. For example, the + properties do not work yet as those would need to be regenerated (since the + columns they are based on are regenerated too -- and as such the + corresponding joins are not correct) but this doesn't happen because of + the way relationship setup is designed to be called only once (especially + the backref stuff in create_properties). + """ + for entity in entities: + desc = entity._descriptor + if desc.autosetup: + _cleanup_autosetup_triggers(entity) + + if hasattr(entity, '_setup_done'): + del entity._setup_done + + entity.table = None + entity.mapper = None + + desc._pk_col_done = False + desc.has_pk = False + desc._columns = ColumnCollection() + desc.constraints = [] + desc.properties = {} + +class EntityBase(object): + """ + This class holds all methods of the "Entity" base class, but does not act + as a base class itself (it does not use the EntityMeta metaclass), but + rather as a parent class for Entity. This is meant so that people who want + to provide their own base class but don't want to loose or copy-paste all + the methods of Entity can do so by inheriting from EntityBase: + + .. sourcecode:: python + + class MyBase(EntityBase): + __metaclass__ = EntityMeta + + def myCustomMethod(self): + # do something great + """ + + def __init__(self, **kwargs): + self.set(**kwargs) + + def set(self, **kwargs): + for key, value in kwargs.iteritems(): + setattr(self, key, value) + + def update_or_create(cls, data, surrogate=True): + pk_props = cls._descriptor.primary_key_properties + + # if all pk are present and not None + if not [1 for p in pk_props if data.get(p.key) is None]: + pk_tuple = tuple([data[prop.key] for prop in pk_props]) + record = cls.query.get(pk_tuple) + if record is None: + if surrogate: + raise Exception("cannot create surrogate with pk") + else: + record = cls() + else: + if surrogate: + record = cls() + else: + raise Exception("cannot create non surrogate without pk") + record.from_dict(data) + return record + update_or_create = classmethod(update_or_create) + + def from_dict(self, data): + """ + Update a mapped class with data from a JSON-style nested dict/list + structure. + """ + # surrogate can be guessed from autoincrement/sequence but I guess + # that's not 100% reliable, so we'll need an override + + mapper = sqlalchemy.orm.object_mapper(self) + + for key, value in data.iteritems(): + if isinstance(value, dict): + dbvalue = getattr(self, key) + rel_class = mapper.get_property(key).mapper.class_ + pk_props = rel_class._descriptor.primary_key_properties + + # If the data doesn't contain any pk, and the relationship + # already has a value, update that record. + if not [1 for p in pk_props if p.key in data] and \ + dbvalue is not None: + dbvalue.from_dict(value) + else: + record = rel_class.update_or_create(value) + setattr(self, key, record) + elif isinstance(value, list) and \ + value and isinstance(value[0], dict): + + rel_class = mapper.get_property(key).mapper.class_ + new_attr_value = [] + for row in value: + if not isinstance(row, dict): + raise Exception( + 'Cannot send mixed (dict/non dict) data ' + 'to list relationships in from_dict data.') + record = rel_class.update_or_create(row) + new_attr_value.append(record) + setattr(self, key, new_attr_value) + else: + setattr(self, key, value) + + def to_dict(self, deep={}, exclude=[]): + """Generate a JSON-style nested dict/list structure from an object.""" + col_prop_names = [p.key for p in self.mapper.iterate_properties \ + if isinstance(p, ColumnProperty)] + data = dict([(name, getattr(self, name)) + for name in col_prop_names if name not in exclude]) + for rname, rdeep in deep.iteritems(): + dbdata = getattr(self, rname) + #FIXME: use attribute names (ie coltoprop) instead of column names + fks = self.mapper.get_property(rname).remote_side + exclude = [c.name for c in fks] + if dbdata is None: + data[rname] = None + elif isinstance(dbdata, list): + data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata] + else: + data[rname] = dbdata.to_dict(rdeep, exclude) + return data + + # session methods + def flush(self, *args, **kwargs): + return object_session(self).flush([self], *args, **kwargs) + + def delete(self, *args, **kwargs): + return object_session(self).delete(self, *args, **kwargs) + + def expire(self, *args, **kwargs): + return object_session(self).expire(self, *args, **kwargs) + + def refresh(self, *args, **kwargs): + return object_session(self).refresh(self, *args, **kwargs) + + def expunge(self, *args, **kwargs): + return object_session(self).expunge(self, *args, **kwargs) + + # This bunch of session methods, along with all the query methods below + # only make sense when using a global/scoped/contextual session. + def _global_session(self): + return self._descriptor.session.registry() + _global_session = property(_global_session) + + def merge(self, *args, **kwargs): + return self._global_session.merge(self, *args, **kwargs) + + def save(self, *args, **kwargs): + return self._global_session.save(self, *args, **kwargs) + + def update(self, *args, **kwargs): + return self._global_session.update(self, *args, **kwargs) + + # only exist in SA < 0.5 + # IMO, the replacement (session.add) doesn't sound good enough to be added + # here. For example: "o = Order(); o.add()" is not very telling. It's + # better to leave it as "session.add(o)" + def save_or_update(self, *args, **kwargs): + return self._global_session.save_or_update(self, *args, **kwargs) + + # query methods + def get_by(cls, *args, **kwargs): + """ + Returns the first instance of this class matching the given criteria. + This is equivalent to: + session.query(MyClass).filter_by(...).first() + """ + return cls.query.filter_by(*args, **kwargs).first() + get_by = classmethod(get_by) + + def get(cls, *args, **kwargs): + """ + Return the instance of this class based on the given identifier, + or None if not found. This is equivalent to: + session.query(MyClass).get(...) + """ + return cls.query.get(*args, **kwargs) + get = classmethod(get) + + +class Entity(EntityBase): + ''' + The base class for all entities + + All Elixir model objects should inherit from this class. Statements can + appear within the body of the definition of an entity to define its + fields, relationships, and other options. + + Here is an example: + + .. sourcecode:: python + + class Person(Entity): + name = Field(Unicode(128)) + birthdate = Field(DateTime, default=datetime.now) + + Please note, that if you don't specify any primary keys, Elixir will + automatically create one called ``id``. + + For further information, please refer to the provided examples or + tutorial. + ''' + __metaclass__ = EntityMeta + + diff --git a/libs/elixir/events.py b/libs/elixir/events.py new file mode 100644 index 0000000..b160928 --- /dev/null +++ b/libs/elixir/events.py @@ -0,0 +1,30 @@ +__all__ = [ + 'before_insert', + 'after_insert', + 'before_update', + 'after_update', + 'before_delete', + 'after_delete', + 'reconstructor' +] + +def create_decorator(event_name): + def decorator(func): + if not hasattr(func, '_elixir_events'): + func._elixir_events = [] + func._elixir_events.append(event_name) + return func + return decorator + +before_insert = create_decorator('before_insert') +after_insert = create_decorator('after_insert') +before_update = create_decorator('before_update') +after_update = create_decorator('after_update') +before_delete = create_decorator('before_delete') +after_delete = create_decorator('after_delete') +try: + from sqlalchemy.orm import reconstructor +except ImportError: + def reconstructor(func): + raise Exception('The reconstructor method decorator is only ' + 'available with SQLAlchemy 0.5 and later') diff --git a/libs/elixir/ext/__init__.py b/libs/elixir/ext/__init__.py new file mode 100644 index 0000000..c8708f2 --- /dev/null +++ b/libs/elixir/ext/__init__.py @@ -0,0 +1,5 @@ +''' +Ext package + +Additional Elixir statements and functionality. +''' diff --git a/libs/elixir/ext/associable.py b/libs/elixir/ext/associable.py new file mode 100644 index 0000000..2a37c4b --- /dev/null +++ b/libs/elixir/ext/associable.py @@ -0,0 +1,234 @@ +''' +Associable Elixir Statement Generator + +========== +Associable +========== + +About Polymorphic Associations +------------------------------ + +A frequent pattern in database schemas is the has_and_belongs_to_many, or a +many-to-many table. Quite often multiple tables will refer to a single one +creating quite a few many-to-many intermediate tables. + +Polymorphic associations lower the amount of many-to-many tables by setting up +a table that allows relations to any other table in the database, and relates +it to the associable table. In some implementations, this layout does not +enforce referential integrity with database foreign key constraints, this +implementation uses an additional many-to-many table with foreign key +constraints to avoid this problem. + +.. note: + SQLite does not support foreign key constraints, so referential integrity + can only be enforced using database backends with such support. + +Elixir Statement Generator for Polymorphic Associations +------------------------------------------------------- + +The ``associable`` function generates the intermediary tables for an Elixir +entity that should be associable with other Elixir entities and returns an +Elixir Statement for use with them. This automates the process of creating the +polymorphic association tables and ensuring their referential integrity. + +Matching select_XXX and select_by_XXX are also added to the associated entity +which allow queries to be run for the associated objects. + +Example usage: + +.. sourcecode:: python + + class Tag(Entity): + name = Field(Unicode) + + acts_as_taggable = associable(Tag) + + class Entry(Entity): + title = Field(Unicode) + acts_as_taggable('tags') + + class Article(Entity): + title = Field(Unicode) + acts_as_taggable('tags') + +Or if one of the entities being associated should only have a single member of +the associated table: + +.. sourcecode:: python + + class Address(Entity): + street = Field(String(130)) + city = Field(String(100)) + + is_addressable = associable(Address, 'addresses') + + class Person(Entity): + name = Field(Unicode) + orders = OneToMany('Order') + is_addressable() + + class Order(Entity): + order_num = Field(primary_key=True) + item_count = Field(Integer) + person = ManyToOne('Person') + is_addressable('address', uselist=False) + + home = Address(street='123 Elm St.', city='Spooksville') + user = Person(name='Jane Doe') + user.addresses.append(home) + + neworder = Order(item_count=4) + neworder.address = home + user.orders.append(neworder) + + # Queries using the added helpers + Person.select_by_addresses(city='Cupertino') + Person.select_addresses(and_(Address.c.street=='132 Elm St', + Address.c.city=='Smallville')) + +Statement Options +----------------- + +The generated Elixir Statement has several options available: + ++---------------+-------------------------------------------------------------+ +| Option Name | Description | ++===============+=============================================================+ +| ``name`` | Specify a custom name for the Entity attribute. This is | +| | used to declare the attribute used to access the associated | +| | table values. Otherwise, the name will use the plural_name | +| | provided to the associable call. | ++---------------+-------------------------------------------------------------+ +| ``uselist`` | Whether or not the associated table should be represented | +| | as a list, or a single property. It should be set to False | +| | when the entity should only have a single associated | +| | entity. Defaults to True. | ++---------------+-------------------------------------------------------------+ +| ``lazy`` | Determines eager loading of the associated entity objects. | +| | Defaults to False, to indicate that they should not be | +| | lazily loaded. | ++---------------+-------------------------------------------------------------+ +''' +from elixir.statements import Statement +import sqlalchemy as sa + +__doc_all__ = ['associable'] + + +def associable(assoc_entity, plural_name=None, lazy=True): + ''' + Generate an associable Elixir Statement + ''' + interface_name = assoc_entity._descriptor.tablename + able_name = interface_name + 'able' + + if plural_name: + attr_name = "%s_rel" % plural_name + else: + plural_name = interface_name + attr_name = "%s_rel" % interface_name + + class GenericAssoc(object): + + def __init__(self, tablename): + self.type = tablename + + #TODO: inherit from entity builder + class Associable(object): + """An associable Elixir Statement object""" + + def __init__(self, entity, name=None, uselist=True, lazy=True): + self.entity = entity + self.lazy = lazy + self.uselist = uselist + + if name is None: + self.name = plural_name + else: + self.name = name + + def after_table(self): + col = sa.Column('%s_assoc_id' % interface_name, sa.Integer, + sa.ForeignKey('%s.id' % able_name)) + self.entity._descriptor.add_column(col) + + if not hasattr(assoc_entity, '_assoc_table'): + metadata = assoc_entity._descriptor.metadata + association_table = sa.Table("%s" % able_name, metadata, + sa.Column('id', sa.Integer, primary_key=True), + sa.Column('type', sa.String(40), nullable=False), + ) + tablename = "%s_to_%s" % (able_name, interface_name) + association_to_table = sa.Table(tablename, metadata, + sa.Column('assoc_id', sa.Integer, + sa.ForeignKey(association_table.c.id, + ondelete="CASCADE"), + primary_key=True), + #FIXME: this assumes a single id col + sa.Column('%s_id' % interface_name, sa.Integer, + sa.ForeignKey(assoc_entity.table.c.id, + ondelete="RESTRICT"), + primary_key=True), + ) + + assoc_entity._assoc_table = association_table + assoc_entity._assoc_to_table = association_to_table + + def after_mapper(self): + if not hasattr(assoc_entity, '_assoc_mapper'): + assoc_entity._assoc_mapper = sa.orm.mapper( + GenericAssoc, assoc_entity._assoc_table, properties={ + 'targets': sa.orm.relation( + assoc_entity, + secondary=assoc_entity._assoc_to_table, + lazy=lazy, backref='associations', + order_by=assoc_entity.mapper.order_by) + }) + + entity = self.entity + entity.mapper.add_property( + attr_name, + sa.orm.relation(GenericAssoc, lazy=self.lazy, + backref='_backref_%s' % entity.table.name) + ) + + if self.uselist: + def get(self): + if getattr(self, attr_name) is None: + setattr(self, attr_name, + GenericAssoc(entity.table.name)) + return getattr(self, attr_name).targets + setattr(entity, self.name, property(get)) + else: + # scalar based property decorator + def get(self): + attr = getattr(self, attr_name) + if attr is not None: + return attr.targets[0] + else: + return None + def set(self, value): + if getattr(self, attr_name) is None: + setattr(self, attr_name, + GenericAssoc(entity.table.name)) + getattr(self, attr_name).targets = [value] + setattr(entity, self.name, property(get, set)) + + # self.name is both set via mapper synonym and the python + # property, but that's how synonym properties work. + # adding synonym property after "real" property otherwise it + # breaks when using SQLAlchemy > 0.4.1 + entity.mapper.add_property(self.name, sa.orm.synonym(attr_name)) + + # add helper methods + def select_by(cls, **kwargs): + return cls.query.join([attr_name, 'targets']) \ + .filter_by(**kwargs).all() + setattr(entity, 'select_by_%s' % self.name, classmethod(select_by)) + + def select(cls, *args, **kwargs): + return cls.query.join([attr_name, 'targets']) \ + .filter(*args, **kwargs).all() + setattr(entity, 'select_%s' % self.name, classmethod(select)) + + return Statement(Associable) diff --git a/libs/elixir/ext/encrypted.py b/libs/elixir/ext/encrypted.py new file mode 100644 index 0000000..410855d --- /dev/null +++ b/libs/elixir/ext/encrypted.py @@ -0,0 +1,124 @@ +''' +An encryption plugin for Elixir utilizing the excellent PyCrypto library, which +can be downloaded here: http://www.amk.ca/python/code/crypto + +Values for columns that are specified to be encrypted will be transparently +encrypted and safely encoded for storage in a unicode column using the powerful +and secure Blowfish Cipher using a specified "secret" which can be passed into +the plugin at class declaration time. + +Example usage: + +.. sourcecode:: python + + from elixir import * + from elixir.ext.encrypted import acts_as_encrypted + + class Person(Entity): + name = Field(Unicode) + password = Field(Unicode) + ssn = Field(Unicode) + acts_as_encrypted(for_fields=['password', 'ssn'], + with_secret='secret') + +The above Person entity will automatically encrypt and decrypt the password and +ssn columns on save, update, and load. Different secrets can be specified on +an entity by entity basis, for added security. + +**Important note**: instance attributes are encrypted in-place. This means that +if one of the encrypted attributes of an instance is accessed after the +instance has been flushed to the database (and thus encrypted), the value for +that attribute will be crypted in the in-memory object in addition to the +database row. +''' + +from Crypto.Cipher import Blowfish +from elixir.statements import Statement +from sqlalchemy.orm import MapperExtension, EXT_CONTINUE, EXT_STOP + +try: + from sqlalchemy.orm import EXT_PASS + SA05orlater = False +except ImportError: + SA05orlater = True + +__all__ = ['acts_as_encrypted'] +__doc_all__ = [] + + +# +# encryption and decryption functions +# + +def encrypt_value(value, secret): + return Blowfish.new(secret, Blowfish.MODE_CFB) \ + .encrypt(value).encode('string_escape') + +def decrypt_value(value, secret): + return Blowfish.new(secret, Blowfish.MODE_CFB) \ + .decrypt(value.decode('string_escape')) + + +# +# acts_as_encrypted statement +# + +class ActsAsEncrypted(object): + + def __init__(self, entity, for_fields=[], with_secret='abcdef'): + + def perform_encryption(instance, encrypt=True): + encrypted = getattr(instance, '_elixir_encrypted', None) + if encrypted is encrypt: + # skipping encryption or decryption, as it is already done + return + else: + # marking instance as already encrypted/decrypted + instance._elixir_encrypted = encrypt + + if encrypt: + func = encrypt_value + else: + func = decrypt_value + + for column_name in for_fields: + current_value = getattr(instance, column_name) + if current_value: + setattr(instance, column_name, + func(current_value, with_secret)) + + def perform_decryption(instance): + perform_encryption(instance, encrypt=False) + + class EncryptedMapperExtension(MapperExtension): + + def before_insert(self, mapper, connection, instance): + perform_encryption(instance) + return EXT_CONTINUE + + def before_update(self, mapper, connection, instance): + perform_encryption(instance) + return EXT_CONTINUE + + if SA05orlater: + def reconstruct_instance(self, mapper, instance): + perform_decryption(instance) + # no special return value is required for + # reconstruct_instance, but you never know... + return EXT_CONTINUE + else: + def populate_instance(self, mapper, selectcontext, row, + instance, *args, **kwargs): + mapper.populate_instance(selectcontext, instance, row, + *args, **kwargs) + perform_decryption(instance) + # EXT_STOP because we already did populate the instance and + # the normal processing should not happen + return EXT_STOP + + # make sure that the entity's mapper has our mapper extension + entity._descriptor.add_mapper_extension(EncryptedMapperExtension()) + + +acts_as_encrypted = Statement(ActsAsEncrypted) + diff --git a/libs/elixir/ext/list.py b/libs/elixir/ext/list.py new file mode 100644 index 0000000..3f91c3f --- /dev/null +++ b/libs/elixir/ext/list.py @@ -0,0 +1,251 @@ +''' +This extension is DEPRECATED. Please use the orderinglist SQLAlchemy +extension instead. + +For details: +http://www.sqlalchemy.org/docs/05/reference/ext/orderinglist.html + +For an Elixir example: +http://elixir.ematia.de/trac/wiki/Recipes/UsingEntityForOrderedList +or +http://elixir.ematia.de/trac/browser/elixir/0.7.0/tests/test_o2m.py#L155 + + + +An ordered-list plugin for Elixir to help you make an entity be able to be +managed in a list-like way. Much inspiration comes from the Ruby on Rails +acts_as_list plugin, which is currently more full-featured than this plugin. + +Once you flag an entity with an `acts_as_list()` statement, a column will be +added to the entity called `position` which will be an integer column that is +managed for you by the plugin. You can pass an alternative column name to +the plugin using the `column_name` keyword argument. + +In addition, your entity will get a series of new methods attached to it, +including: + ++----------------------+------------------------------------------------------+ +| Method Name | Description | ++======================+======================================================+ +| ``move_lower`` | Move the item lower in the list | ++----------------------+------------------------------------------------------+ +| ``move_higher`` | Move the item higher in the list | ++----------------------+------------------------------------------------------+ +| ``move_to_bottom`` | Move the item to the bottom of the list | ++----------------------+------------------------------------------------------+ +| ``move_to_top`` | Move the item to the top of the list | ++----------------------+------------------------------------------------------+ +| ``move_to`` | Move the item to a specific position in the list | ++----------------------+------------------------------------------------------+ + + +Sometimes, your entities that represent list items will be a part of different +lists. To implement this behavior, simply pass the `acts_as_list` statement a +callable that returns a "qualifier" SQLAlchemy expression. This expression will +be added to the generated WHERE clauses used by the plugin. + +Example model usage: + +.. sourcecode:: python + + from elixir import * + from elixir.ext.list import acts_as_list + + class ToDo(Entity): + subject = Field(String(128)) + owner = ManyToOne('Person') + + def qualify(self): + return ToDo.owner_id == self.owner_id + + acts_as_list(qualifier=qualify) + + class Person(Entity): + name = Field(String(64)) + todos = OneToMany('ToDo', order_by='position') + + +The above example can then be used to manage ordered todo lists for people. +Note that you must set the `order_by` property on the `Person.todo` relation in +order for the relation to respect the ordering. Here is an example of using +this model in practice: + +.. sourcecode:: python + + p = Person.query.filter_by(name='Jonathan').one() + p.todos.append(ToDo(subject='Three')) + p.todos.append(ToDo(subject='Two')) + p.todos.append(ToDo(subject='One')) + session.commit(); session.clear() + + p = Person.query.filter_by(name='Jonathan').one() + p.todos[0].move_to_bottom() + p.todos[2].move_to_top() + session.commit(); session.clear() + + p = Person.query.filter_by(name='Jonathan').one() + assert p.todos[0].subject == 'One' + assert p.todos[1].subject == 'Two' + assert p.todos[2].subject == 'Three' + + +For more examples, refer to the unit tests for this plugin. +''' + +from elixir.statements import Statement +from elixir.events import before_insert, before_delete +from sqlalchemy import Column, Integer, select, func, literal, and_ +import warnings + +__all__ = ['acts_as_list'] +__doc_all__ = [] + + +def get_entity_where(instance): + clauses = [] + for column in instance.table.primary_key.columns: + instance_value = getattr(instance, column.name) + clauses.append(column == instance_value) + return and_(*clauses) + + +class ListEntityBuilder(object): + + def __init__(self, entity, qualifier=None, column_name='position'): + warnings.warn("The act_as_list extension is deprecated. Please use " + "SQLAlchemy's orderinglist extension instead", + DeprecationWarning, stacklevel=6) + self.entity = entity + self.qualifier_method = qualifier + self.column_name = column_name + + def create_non_pk_cols(self): + if self.entity._descriptor.autoload: + for c in self.entity.table.c: + if c.name == self.column_name: + self.position_column = c + if not hasattr(self, 'position_column'): + raise Exception( + "Could not find column '%s' in autoloaded table '%s', " + "needed by entity '%s'." % (self.column_name, + self.entity.table.name, self.entity.__name__)) + else: + self.position_column = Column(self.column_name, Integer) + self.entity._descriptor.add_column(self.position_column) + + def after_table(self): + position_column = self.position_column + position_column_name = self.column_name + + qualifier_method = self.qualifier_method + if not qualifier_method: + qualifier_method = lambda self: None + + def _init_position(self): + s = select( + [(func.max(position_column)+1).label('value')], + qualifier_method(self) + ).union( + select([literal(1).label('value')]) + ) + a = s.alias() + # we use a second func.max to get the maximum between 1 and the + # real max position if any exist + setattr(self, position_column_name, select([func.max(a.c.value)])) + + # Note that this method could be rewritten more simply like below, + # but because this extension is going to be deprecated anyway, + # I don't want to risk breaking something I don't want to maintain. +# setattr(self, position_column_name, select( +# [func.coalesce(func.max(position_column), 0) + 1], +# qualifier_method(self) +# )) + _init_position = before_insert(_init_position) + + def _shift_items(self): + self.table.update( + and_( + position_column > getattr(self, position_column_name), + qualifier_method(self) + ), + values={ + position_column : position_column - 1 + } + ).execute() + _shift_items = before_delete(_shift_items) + + def move_to_bottom(self): + # move the items that were above this item up one + self.table.update( + and_( + position_column >= getattr(self, position_column_name), + qualifier_method(self) + ), + values = { + position_column : position_column - 1 + } + ).execute() + + # move this item to the max position + # MySQL does not support the correlated subquery, so we need to + # execute the query (through scalar()). See ticket #34. + self.table.update( + get_entity_where(self), + values={ + position_column : select( + [func.max(position_column) + 1], + qualifier_method(self) + ).scalar() + } + ).execute() + + def move_to_top(self): + self.move_to(1) + + def move_to(self, position): + current_position = getattr(self, position_column_name) + + # determine which direction we're moving + if position < current_position: + where = and_( + position <= position_column, + position_column < current_position, + qualifier_method(self) + ) + modifier = 1 + elif position > current_position: + where = and_( + current_position < position_column, + position_column <= position, + qualifier_method(self) + ) + modifier = -1 + + # shift the items in between the current and new positions + self.table.update(where, values = { + position_column : position_column + modifier + }).execute() + + # update this item's position to the desired position + self.table.update(get_entity_where(self)) \ + .execute(**{position_column_name: position}) + + def move_lower(self): + # replace for ex.: p.todos.insert(x + 1, p.todos.pop(x)) + self.move_to(getattr(self, position_column_name) + 1) + + def move_higher(self): + self.move_to(getattr(self, position_column_name) - 1) + + + # attach new methods to entity + self.entity._init_position = _init_position + self.entity._shift_items = _shift_items + self.entity.move_lower = move_lower + self.entity.move_higher = move_higher + self.entity.move_to_bottom = move_to_bottom + self.entity.move_to_top = move_to_top + self.entity.move_to = move_to + + +acts_as_list = Statement(ListEntityBuilder) diff --git a/libs/elixir/ext/perform_ddl.py b/libs/elixir/ext/perform_ddl.py new file mode 100644 index 0000000..bb8528d --- /dev/null +++ b/libs/elixir/ext/perform_ddl.py @@ -0,0 +1,106 @@ +''' +DDL statements for Elixir. + +Entities having the perform_ddl statement, will automatically execute the +given DDL statement, at the given moment: ether before or after the table +creation in SQL. + +The 'when' argument can be either 'before-create' or 'after-create'. +The 'statement' argument can be one of: + +- a single string statement +- a list of string statements, in which case, each of them will be executed + in turn. +- a callable which should take no argument and return either a single string + or a list of strings. + +In each string statement, you may use the special '%(fullname)s' construct, +that will be replaced with the real table name including schema, if unknown +to you. Also, self explained '%(table)s' and '%(schema)s' may be used here. + +You would use this extension to handle non elixir sql statemts, like triggers +etc. + +.. sourcecode:: python + + class Movie(Entity): + title = Field(Unicode(30), primary_key=True) + year = Field(Integer) + + perform_ddl('after-create', + "insert into %(fullname)s values ('Alien', 1979)") + +preload_data is a more specific statement meant to preload data in your +entity table from a list of tuples (of fields values for each row). + +.. sourcecode:: python + + class Movie(Entity): + title = Field(Unicode(30), primary_key=True) + year = Field(Integer) + + preload_data(('title', 'year'), + [(u'Alien', 1979), (u'Star Wars', 1977)]) + preload_data(('year', 'title'), + [(1982, u'Blade Runner')]) + preload_data(data=[(u'Batman', 1966)]) +''' + +from elixir.statements import Statement +from elixir.properties import EntityBuilder +from sqlalchemy import DDL + +__all__ = ['perform_ddl', 'preload_data'] +__doc_all__ = [] + +# +# the perform_ddl statement +# +class PerformDDLEntityBuilder(EntityBuilder): + + def __init__(self, entity, when, statement, on=None, context=None): + self.entity = entity + self.when = when + self.statement = statement + self.on = on + self.context = context + + def after_table(self): + statement = self.statement + if hasattr(statement, '__call__'): + statement = statement() + if not isinstance(statement, list): + statement = [statement] + for s in statement: + ddl = DDL(s, self.on, self.context) + ddl.execute_at(self.when, self.entity.table) + +perform_ddl = Statement(PerformDDLEntityBuilder) + +# +# the preload_data statement +# +class PreloadDataEntityBuilder(EntityBuilder): + + def __init__(self, entity, columns=None, data=None): + self.entity = entity + self.columns = columns + self.data = data + + def after_table(self): + all_columns = [col.name for col in self.entity.table.columns] + def onload(event, schema_item, connection): + columns = self.columns + if columns is None: + columns = all_columns + data = self.data + if hasattr(data, '__call__'): + data = data() + insert = schema_item.insert() + connection.execute(insert, + [dict(zip(columns, values)) for values in data]) + + self.entity.table.append_ddl_listener('after-create', onload) + +preload_data = Statement(PreloadDataEntityBuilder) + diff --git a/libs/elixir/ext/versioned.py b/libs/elixir/ext/versioned.py new file mode 100644 index 0000000..75f406b --- /dev/null +++ b/libs/elixir/ext/versioned.py @@ -0,0 +1,288 @@ +''' +A versioning plugin for Elixir. + +Entities that are marked as versioned with the `acts_as_versioned` statement +will automatically have a history table created and a timestamp and version +column added to their tables. In addition, versioned entities are provided +with four new methods: revert, revert_to, compare_with and get_as_of, and one +new attribute: versions. Entities with compound primary keys are supported. + +The `versions` attribute will contain a list of previous versions of the +instance, in increasing version number order. + +The `get_as_of` method will retrieve a previous version of the instance "as of" +a specified datetime. If the current version is the most recent, it will be +returned. + +The `revert` method will rollback the current instance to its previous version, +if possible. Once reverted, the current instance will be expired from the +session, and you will need to fetch it again to retrieve the now reverted +instance. + +The `revert_to` method will rollback the current instance to the specified +version number, if possibe. Once reverted, the current instance will be expired +from the session, and you will need to fetch it again to retrieve the now +reverted instance. + +The `compare_with` method will compare the instance with a previous version. A +dictionary will be returned with each field difference as an element in the +dictionary where the key is the field name and the value is a tuple of the +format (current_value, version_value). Version instances also have a +`compare_with` method so that two versions can be compared. + +Also included in the module is a `after_revert` decorator that can be used to +decorate methods on the versioned entity that will be called following that +instance being reverted. + +The acts_as_versioned statement also accepts an optional `ignore` argument +that consists of a list of strings, specifying names of fields. Changes in +those fields will not result in a version increment. In addition, you can +pass in an optional `check_concurrent` argument, which will use SQLAlchemy's +built-in optimistic concurrency mechanisms. + +Note that relationships that are stored in mapping tables will not be included +as part of the versioning process, and will need to be handled manually. Only +values within the entity's main table will be versioned into the history table. +''' + +from datetime import datetime +import inspect + +from sqlalchemy import Table, Column, and_, desc +from sqlalchemy.orm import mapper, MapperExtension, EXT_CONTINUE, \ + object_session + +from elixir import Integer, DateTime +from elixir.statements import Statement +from elixir.properties import EntityBuilder +from elixir.entity import getmembers + +__all__ = ['acts_as_versioned', 'after_revert'] +__doc_all__ = [] + +# +# utility functions +# + +def get_entity_where(instance): + clauses = [] + for column in instance.table.primary_key.columns: + instance_value = getattr(instance, column.name) + clauses.append(column==instance_value) + return and_(*clauses) + + +def get_history_where(instance): + clauses = [] + history_columns = instance.__history_table__.primary_key.columns + for column in instance.table.primary_key.columns: + instance_value = getattr(instance, column.name) + history_column = getattr(history_columns, column.name) + clauses.append(history_column==instance_value) + return and_(*clauses) + + +# +# a mapper extension to track versions on insert, update, and delete +# + +class VersionedMapperExtension(MapperExtension): + def before_insert(self, mapper, connection, instance): + version_colname, timestamp_colname = \ + instance.__class__.__versioned_column_names__ + setattr(instance, version_colname, 1) + setattr(instance, timestamp_colname, datetime.now()) + return EXT_CONTINUE + + def before_update(self, mapper, connection, instance): + old_values = instance.table.select(get_entity_where(instance)) \ + .execute().fetchone() + + # SA might've flagged this for an update even though it didn't change. + # This occurs when a relation is updated, thus marking this instance + # for a save/update operation. We check here against the last version + # to ensure we really should save this version and update the version + # data. + ignored = instance.__class__.__ignored_fields__ + version_colname, timestamp_colname = \ + instance.__class__.__versioned_column_names__ + for key in instance.table.c.keys(): + if key in ignored: + continue + if getattr(instance, key) != old_values[key]: + # the instance was really updated, so we create a new version + dict_values = dict(old_values.items()) + connection.execute( + instance.__class__.__history_table__.insert(), dict_values) + old_version = getattr(instance, version_colname) + setattr(instance, version_colname, old_version + 1) + setattr(instance, timestamp_colname, datetime.now()) + break + + return EXT_CONTINUE + + def before_delete(self, mapper, connection, instance): + connection.execute(instance.__history_table__.delete( + get_history_where(instance) + )) + return EXT_CONTINUE + + +versioned_mapper_extension = VersionedMapperExtension() + + +# +# the acts_as_versioned statement +# + +class VersionedEntityBuilder(EntityBuilder): + + def __init__(self, entity, ignore=None, check_concurrent=False, + column_names=None): + self.entity = entity + self.add_mapper_extension(versioned_mapper_extension) + #TODO: we should rather check that the version_id_col isn't set + # externally + self.check_concurrent = check_concurrent + + # Changes in these fields will be ignored + if column_names is None: + column_names = ['version', 'timestamp'] + entity.__versioned_column_names__ = column_names + if ignore is None: + ignore = [] + ignore.extend(column_names) + entity.__ignored_fields__ = ignore + + def create_non_pk_cols(self): + # add a version column to the entity, along with a timestamp + version_colname, timestamp_colname = \ + self.entity.__versioned_column_names__ + #XXX: fail in case the columns already exist? + #col_names = [col.name for col in self.entity._descriptor.columns] + #if version_colname not in col_names: + self.add_table_column(Column(version_colname, Integer)) + #if timestamp_colname not in col_names: + self.add_table_column(Column(timestamp_colname, DateTime)) + + # add a concurrent_version column to the entity, if required + if self.check_concurrent: + self.entity._descriptor.version_id_col = 'concurrent_version' + + # we copy columns from the main entity table, so we need it to exist first + def after_table(self): + entity = self.entity + version_colname, timestamp_colname = \ + entity.__versioned_column_names__ + + # look for events + after_revert_events = [] + for name, func in getmembers(entity, inspect.ismethod): + if getattr(func, '_elixir_after_revert', False): + after_revert_events.append(func) + + # create a history table for the entity + skipped_columns = [version_colname] + if self.check_concurrent: + skipped_columns.append('concurrent_version') + + columns = [ + column.copy() for column in entity.table.c + if column.name not in skipped_columns + ] + columns.append(Column(version_colname, Integer, primary_key=True)) + table = Table(entity.table.name + '_history', entity.table.metadata, + *columns + ) + entity.__history_table__ = table + + # create an object that represents a version of this entity + class Version(object): + pass + + # map the version class to the history table for this entity + Version.__name__ = entity.__name__ + 'Version' + Version.__versioned_entity__ = entity + mapper(Version, entity.__history_table__) + + version_col = getattr(table.c, version_colname) + timestamp_col = getattr(table.c, timestamp_colname) + + # attach utility methods and properties to the entity + def get_versions(self): + v = object_session(self).query(Version) \ + .filter(get_history_where(self)) \ + .order_by(version_col) \ + .all() + # history contains all the previous records. + # Add the current one to the list to get all the versions + v.append(self) + return v + + def get_as_of(self, dt): + # if the passed in timestamp is older than our current version's + # time stamp, then the most recent version is our current version + if getattr(self, timestamp_colname) < dt: + return self + + # otherwise, we need to look to the history table to get our + # older version + sess = object_session(self) + query = sess.query(Version) \ + .filter(and_(get_history_where(self), + timestamp_col <= dt)) \ + .order_by(desc(timestamp_col)).limit(1) + return query.first() + + def revert_to(self, to_version): + if isinstance(to_version, Version): + to_version = getattr(to_version, version_colname) + + old_version = table.select(and_( + get_history_where(self), + version_col == to_version + )).execute().fetchone() + + entity.table.update(get_entity_where(self)).execute( + dict(old_version.items()) + ) + + table.delete(and_(get_history_where(self), + version_col >= to_version)).execute() + self.expire() + for event in after_revert_events: + event(self) + + def revert(self): + assert getattr(self, version_colname) > 1 + self.revert_to(getattr(self, version_colname) - 1) + + def compare_with(self, version): + differences = {} + for column in self.table.c: + if column.name in (version_colname, 'concurrent_version'): + continue + this = getattr(self, column.name) + that = getattr(version, column.name) + if this != that: + differences[column.name] = (this, that) + return differences + + entity.versions = property(get_versions) + entity.get_as_of = get_as_of + entity.revert_to = revert_to + entity.revert = revert + entity.compare_with = compare_with + Version.compare_with = compare_with + +acts_as_versioned = Statement(VersionedEntityBuilder) + + +def after_revert(func): + """ + Decorator for watching for revert events. + """ + func._elixir_after_revert = True + return func + + diff --git a/libs/elixir/fields.py b/libs/elixir/fields.py new file mode 100644 index 0000000..8659cdd --- /dev/null +++ b/libs/elixir/fields.py @@ -0,0 +1,191 @@ +''' +This module provides support for defining the fields (columns) of your +entities. Elixir currently supports two syntaxes to do so: the default +`Attribute-based syntax`_ as well as the has_field_ DSL statement. + +Attribute-based syntax +---------------------- + +Here is a quick example of how to use the object-oriented syntax. + +.. sourcecode:: python + + class Person(Entity): + id = Field(Integer, primary_key=True) + name = Field(String(50), required=True) + ssn = Field(String(50), unique=True) + biography = Field(Text) + join_date = Field(DateTime, default=datetime.datetime.now) + photo = Field(Binary, deferred=True) + _email = Field(String(20), colname='email', synonym='email') + + def _set_email(self, email): + self._email = email + def _get_email(self): + return self._email + email = property(_get_email, _set_email) + + +The Field class takes one mandatory argument, which is its type. Please refer +to SQLAlchemy documentation for a list of `types supported by SQLAlchemy +`_. + +Following that first mandatory argument, fields can take any number of +optional keyword arguments. Please note that all the **arguments** that are +**not specifically processed by Elixir**, as mentioned in the documentation +below **are passed on to the SQLAlchemy ``Column`` object**. Please refer to +the `SQLAlchemy Column object's documentation +`_ for more details about other +supported keyword arguments. + +The following Elixir-specific arguments are supported: + ++-------------------+---------------------------------------------------------+ +| Argument Name | Description | ++===================+=========================================================+ +| ``required`` | Specify whether or not this field can be set to None | +| | (left without a value). Defaults to ``False``, unless | +| | the field is a primary key. | ++-------------------+---------------------------------------------------------+ +| ``colname`` | Specify a custom name for the column of this field. By | +| | default the column will have the same name as the | +| | attribute. | ++-------------------+---------------------------------------------------------+ +| ``deferred`` | Specify whether this particular column should be | +| | fetched by default (along with the other columns) when | +| | an instance of the entity is fetched from the database | +| | or rather only later on when this particular column is | +| | first referenced. This can be useful when one wants to | +| | avoid loading a large text or binary field into memory | +| | when its not needed. Individual columns can be lazy | +| | loaded by themselves (by using ``deferred=True``) | +| | or placed into groups that lazy-load together (by using | +| | ``deferred`` = `"group_name"`). | ++-------------------+---------------------------------------------------------+ +| ``synonym`` | Specify a synonym name for this field. The field will | +| | also be usable under that name in keyword-based Query | +| | functions such as filter_by. The Synonym class (see the | +| | `properties` module) provides a similar functionality | +| | with an (arguably) nicer syntax, but a limited scope. | ++-------------------+---------------------------------------------------------+ + +has_field +--------- + +The `has_field` statement allows you to define fields one at a time. + +The first argument is the name of the field, the second is its type. Following +these, any number of keyword arguments can be specified for additional +behavior. The following arguments are supported: + ++-------------------+---------------------------------------------------------+ +| Argument Name | Description | ++===================+=========================================================+ +| ``through`` | Specify a relation name to go through. This field will | +| | not exist as a column on the database but will be a | +| | property which automatically proxy values to the | +| | ``attribute`` attribute of the object pointed to by the | +| | relation. If the ``attribute`` argument is not present, | +| | the name of the current field will be used. In an | +| | has_field statement, you can only proxy through a | +| | belongs_to or an has_one relationship. | ++-------------------+---------------------------------------------------------+ +| ``attribute`` | Name of the "endpoint" attribute to proxy to. This | +| | should only be used in combination with the ``through`` | +| | argument. | ++-------------------+---------------------------------------------------------+ + + +Here is a quick example of how to use ``has_field``. + +.. sourcecode:: python + + class Person(Entity): + has_field('id', Integer, primary_key=True) + has_field('name', String(50)) +''' +from sqlalchemy import Column +from sqlalchemy.orm import deferred, synonym +from sqlalchemy.ext.associationproxy import association_proxy + +from elixir.statements import ClassMutator +from elixir.properties import Property + +__doc_all__ = ['Field'] + + +class Field(Property): + ''' + Represents the definition of a 'field' on an entity. + + This class represents a column on the table where the entity is stored. + ''' + + def __init__(self, type, *args, **kwargs): + super(Field, self).__init__() + + self.colname = kwargs.pop('colname', None) + self.synonym = kwargs.pop('synonym', None) + self.deferred = kwargs.pop('deferred', False) + if 'required' in kwargs: + kwargs['nullable'] = not kwargs.pop('required') + self.type = type + self.primary_key = kwargs.get('primary_key', False) + + self.column = None + self.property = None + + self.args = args + self.kwargs = kwargs + + def attach(self, entity, name): + # If no colname was defined (through the 'colname' kwarg), set + # it to the name of the attr. + if self.colname is None: + self.colname = name + super(Field, self).attach(entity, name) + + def create_pk_cols(self): + if self.primary_key: + self.create_col() + + def create_non_pk_cols(self): + if not self.primary_key: + self.create_col() + + def create_col(self): + self.column = Column(self.colname, self.type, + *self.args, **self.kwargs) + self.add_table_column(self.column) + + def create_properties(self): + if self.deferred: + group = None + if isinstance(self.deferred, basestring): + group = self.deferred + self.property = deferred(self.column, group=group) + elif self.name != self.colname: + # if the property name is different from the column name, we need + # to add an explicit property (otherwise nothing is needed as it's + # done automatically by SA) + self.property = self.column + + if self.property is not None: + self.add_mapper_property(self.name, self.property) + + if self.synonym: + self.add_mapper_property(self.synonym, synonym(self.name)) + + +def has_field_handler(entity, name, *args, **kwargs): + if 'through' in kwargs: + setattr(entity, name, + association_proxy(kwargs.pop('through'), + kwargs.pop('attribute', name), + **kwargs)) + return + field = Field(*args, **kwargs) + field.attach(entity, name) + +has_field = ClassMutator(has_field_handler) diff --git a/libs/elixir/options.py b/libs/elixir/options.py new file mode 100644 index 0000000..9284b04 --- /dev/null +++ b/libs/elixir/options.py @@ -0,0 +1,285 @@ +''' +This module provides support for defining several options on your Elixir +entities. There are three different kinds of options that can be set +up, and for this there are three different statements: using_options_, +using_table_options_ and using_mapper_options_. + +Alternatively, these options can be set on all Elixir entities by modifying +the `options_defaults` dictionary before defining any entity. + +`using_options` +--------------- +The 'using_options' DSL statement allows you to set up some additional +behaviors on your model objects, including table names, ordering, and +more. To specify an option, simply supply the option as a keyword +argument onto the statement, as follows: + +.. sourcecode:: python + + class Person(Entity): + name = Field(Unicode(64)) + + using_options(shortnames=True, order_by='name') + +The list of supported arguments are as follows: + ++---------------------+-------------------------------------------------------+ +| Option Name | Description | ++=====================+=======================================================+ +| ``inheritance`` | Specify the type of inheritance this entity must use. | +| | It can be one of ``single``, ``concrete`` or | +| | ``multi``. Defaults to ``single``. | +| | Note that polymorphic concrete inheritance is | +| | currently not implemented. See: | +| | http://www.sqlalchemy.org/docs/05/mappers.html | +| | #mapping-class-inheritance-hierarchies for an | +| | explanation of the different kinds of inheritances. | ++---------------------+-------------------------------------------------------+ +| ``abstract`` | Set 'abstract'=True to declare abstract entity. | +| | Abstract base classes are useful when you want to put | +| | some common information into a number of other | +| | entities. Abstract entity will not be used to create | +| | any database table. Instead, when it is used as a base| +| | class for other entity, its fields will be added to | +| | those of the child class. | ++---------------------+-------------------------------------------------------+ +| ``polymorphic`` | Whether the inheritance should be polymorphic or not. | +| | Defaults to ``True``. The column used to store the | +| | type of each row is named "row_type" by default. You | +| | can change this by passing the desired name for the | +| | column to this argument. | ++---------------------+-------------------------------------------------------+ +| ``identity`` | Specify a custom polymorphic identity. When using | +| | polymorphic inheritance, this value (usually a | +| | string) will represent this particular entity (class) | +| | . It will be used to differentiate it from other | +| | entities (classes) in your inheritance hierarchy when | +| | loading from the database instances of different | +| | entities in that hierarchy at the same time. | +| | This value will be stored by default in the | +| | "row_type" column of the entity's table (see above). | +| | You can either provide a | +| | plain string or a callable. The callable will be | +| | given the entity (ie class) as argument and must | +| | return a value (usually a string) representing the | +| | polymorphic identity of that entity. | +| | By default, this value is automatically generated: it | +| | is the name of the entity lower-cased. | ++---------------------+-------------------------------------------------------+ +| ``metadata`` | Specify a custom MetaData for this entity. | +| | By default, entities uses the global | +| | ``elixir.metadata``. | +| | This option can also be set for all entities of a | +| | module by setting the ``__metadata__`` attribute of | +| | that module. | ++---------------------+-------------------------------------------------------+ +| ``autoload`` | Automatically load column definitions from the | +| | existing database table. | ++---------------------+-------------------------------------------------------+ +| ``tablename`` | Specify a custom tablename. You can either provide a | +| | plain string or a callable. The callable will be | +| | given the entity (ie class) as argument and must | +| | return a string representing the name of the table | +| | for that entity. By default, the tablename is | +| | automatically generated: it is a concatenation of the | +| | full module-path to the entity and the entity (class) | +| | name itself. The result is lower-cased and separated | +| | by underscores ("_"), eg.: for an entity named | +| | "MyEntity" in the module "project1.model", the | +| | generated table name will be | +| | "project1_model_myentity". | ++---------------------+-------------------------------------------------------+ +| ``shortnames`` | Specify whether or not the automatically generated | +| | table names include the full module-path | +| | to the entity. If ``shortnames`` is ``True``, only | +| | the entity name is used. Defaults to ``False``. | ++---------------------+-------------------------------------------------------+ +| ``auto_primarykey`` | If given as string, it will represent the | +| | auto-primary-key's column name. If this option | +| | is True, it will allow auto-creation of a primary | +| | key if there's no primary key defined for the | +| | corresponding entity. If this option is False, | +| | it will disallow auto-creation of a primary key. | +| | Defaults to ``True``. | ++---------------------+-------------------------------------------------------+ +| ``version_id_col`` | If this option is True, it will create a version | +| | column automatically using the default name. If given | +| | as string, it will create the column using that name. | +| | This can be used to prevent concurrent modifications | +| | to the entity's table rows (i.e. it will raise an | +| | exception if it happens). Defaults to ``False``. | ++---------------------+-------------------------------------------------------+ +| ``order_by`` | How to order select results. Either a string or a | +| | list of strings, composed of the field name, | +| | optionally lead by a minus (for descending order). | ++---------------------+-------------------------------------------------------+ +| ``session`` | Specify a custom contextual session for this entity. | +| | By default, entities uses the global | +| | ``elixir.session``. | +| | This option takes a ``ScopedSession`` object or | +| | ``None``. In the later case your entity will be | +| | mapped using a non-contextual mapper which requires | +| | manual session management, as seen in pure SQLAlchemy.| +| | This option can also be set for all entities of a | +| | module by setting the ``__session__`` attribute of | +| | that module. | ++---------------------+-------------------------------------------------------+ +| ``autosetup`` | DEPRECATED. Specify whether that entity will contain | +| | automatic setup triggers. | +| | That is if this entity will be | +| | automatically setup (along with all other entities | +| | which were already declared) if any of the following | +| | condition happen: some of its attributes are accessed | +| | ('c', 'table', 'mapper' or 'query'), instanciated | +| | (called) or the create_all method of this entity's | +| | metadata is called. Defaults to ``False``. | ++---------------------+-------------------------------------------------------+ +| ``allowcoloverride``| Specify whether it is allowed to override columns. | +| | By default, Elixir forbids you to add a column to an | +| | entity's table which already exist in that table. If | +| | you set this option to ``True`` it will skip that | +| | check. Use with care as it is easy to shoot oneself | +| | in the foot when overriding columns. | ++---------------------+-------------------------------------------------------+ + +For examples, please refer to the examples and unit tests. + +`using_table_options` +--------------------- +The 'using_table_options' DSL statement allows you to set up some +additional options on your entity table. It is meant only to handle the +options which are not supported directly by the 'using_options' statement. +By opposition to the 'using_options' statement, these options are passed +directly to the underlying SQLAlchemy Table object (both non-keyword arguments +and keyword arguments) without any processing. + +For further information, please refer to the `SQLAlchemy table's documentation +`_. + +You might also be interested in the section about `constraints +`_. + +`using_mapper_options` +---------------------- +The 'using_mapper_options' DSL statement allows you to set up some +additional options on your entity mapper. It is meant only to handle the +options which are not supported directly by the 'using_options' statement. +By opposition to the 'using_options' statement, these options are passed +directly to the underlying SQLAlchemy mapper (as keyword arguments) +without any processing. + +For further information, please refer to the `SQLAlchemy mapper +function's documentation +`_. + +`using_options_defaults` +------------------------ +The 'using_options_defaults' DSL statement allows you to set up some +default options on a custom base class. These will be used as the default value +for options of all its subclasses. Note that any option not set within the +using_options_defaults (nor specifically on a particular Entity) will use the +global defaults, so you don't have to provide a default value for all options, +but only those you want to change. Please also note that this statement does +not work on normal entities, and the normal using_options statement does not +work on base classes (because normal options do not and should not propagate to +the children classes). +''' + +from sqlalchemy import Integer, String + +from elixir.statements import ClassMutator + +__doc_all__ = ['options_defaults'] + +OLD_M2MCOL_NAMEFORMAT = "%(tablename)s_%(key)s%(numifself)s" +ALTERNATE_M2MCOL_NAMEFORMAT = "%(inversename)s_%(key)s" + +def default_m2m_column_formatter(data): + if data['selfref']: + return ALTERNATE_M2MCOL_NAMEFORMAT % data + else: + return OLD_M2MCOL_NAMEFORMAT % data + +NEW_M2MCOL_NAMEFORMAT = default_m2m_column_formatter + +# format constants +FKCOL_NAMEFORMAT = "%(relname)s_%(key)s" +M2MCOL_NAMEFORMAT = NEW_M2MCOL_NAMEFORMAT +CONSTRAINT_NAMEFORMAT = "%(tablename)s_%(colnames)s_fk" +MULTIINHERITANCECOL_NAMEFORMAT = "%(entity)s_%(key)s" + +# other global constants +DEFAULT_AUTO_PRIMARYKEY_NAME = "id" +DEFAULT_AUTO_PRIMARYKEY_TYPE = Integer +DEFAULT_VERSION_ID_COL_NAME = "row_version" +DEFAULT_POLYMORPHIC_COL_NAME = "row_type" +POLYMORPHIC_COL_SIZE = 40 +POLYMORPHIC_COL_TYPE = String(POLYMORPHIC_COL_SIZE) + +# debugging/migration help +MIGRATION_TO_07_AID = False + +# +options_defaults = dict( + abstract=False, + autosetup=False, + inheritance='single', + polymorphic=True, + identity=None, + autoload=False, + tablename=None, + shortnames=False, + auto_primarykey=True, + version_id_col=False, + allowcoloverride=False, + order_by=None, + resolve_root=None, + mapper_options={}, + table_options={} +) + +valid_options = options_defaults.keys() + [ + 'metadata', + 'session', + 'collection' +] + + +def using_options_defaults_handler(entity, **kwargs): + for kwarg in kwargs: + if kwarg not in valid_options: + raise Exception("'%s' is not a valid option for Elixir entities." + % kwarg) + + # We use __dict__ instead of hasattr to not check its presence within the + # parent, and thus update the parent dict instead of creating a local dict. + if not entity.__dict__.get('options_defaults'): + entity.options_defaults = {} + entity.options_defaults.update(kwargs) + + +def using_options_handler(entity, *args, **kwargs): + for kwarg in kwargs: + if kwarg in valid_options: + setattr(entity._descriptor, kwarg, kwargs[kwarg]) + else: + raise Exception("'%s' is not a valid option for Elixir entities." + % kwarg) + + +def using_table_options_handler(entity, *args, **kwargs): + entity._descriptor.table_args.extend(list(args)) + entity._descriptor.table_options.update(kwargs) + + +def using_mapper_options_handler(entity, *args, **kwargs): + entity._descriptor.mapper_options.update(kwargs) + + +using_options_defaults = ClassMutator(using_options_defaults_handler) +using_options = ClassMutator(using_options_handler) +using_table_options = ClassMutator(using_table_options_handler) +using_mapper_options = ClassMutator(using_mapper_options_handler) diff --git a/libs/elixir/properties.py b/libs/elixir/properties.py new file mode 100644 index 0000000..68ff8fa --- /dev/null +++ b/libs/elixir/properties.py @@ -0,0 +1,244 @@ +''' +This module provides support for defining properties on your entities. It both +provides, the `Property` class which acts as a building block for common +properties such as fields and relationships (for those, please consult the +corresponding modules), but also provides some more specialized properties, +such as `ColumnProperty` and `Synonym`. It also provides the GenericProperty +class which allows you to wrap any SQLAlchemy property, and its DSL-syntax +equivalent: has_property_. + +`has_property` +-------------- +The ``has_property`` statement allows you to define properties which rely on +their entity's table (and columns) being defined before they can be declared +themselves. The `has_property` statement takes two arguments: first the name of +the property to be defined and second a function (often given as an anonymous +lambda) taking one argument and returning the desired SQLAlchemy property. That +function will be called whenever the entity table is completely defined, and +will be given the .c attribute of the entity as argument (as a way to access +the entity columns). + +Here is a quick example of how to use ``has_property``. + +.. sourcecode:: python + + class OrderLine(Entity): + has_field('quantity', Float) + has_field('unit_price', Float) + has_property('price', + lambda c: column_property( + (c.quantity * c.unit_price).label('price'))) +''' + +from elixir.statements import PropertyStatement +from sqlalchemy.orm import column_property, synonym + +__doc_all__ = ['EntityBuilder', 'Property', 'GenericProperty', + 'ColumnProperty'] + +class EntityBuilder(object): + ''' + Abstract base class for all entity builders. An Entity builder is a class + of objects which can be added to an Entity (usually by using special + properties or statements) to "build" that entity. Building an entity, + meaning to add columns to its "main" table, create other tables, add + properties to its mapper, ... To do so an EntityBuilder must override the + corresponding method(s). This is to ensure the different operations happen + in the correct order (for example, that the table is fully created before + the mapper that use it is defined). + ''' + def create_pk_cols(self): + pass + + def create_non_pk_cols(self): + pass + + def before_table(self): + pass + + def create_tables(self): + ''' + Subclasses may override this method to create tables. + ''' + + def after_table(self): + pass + + def create_properties(self): + ''' + Subclasses may override this method to add properties to the involved + entity. + ''' + + def before_mapper(self): + pass + + def after_mapper(self): + pass + + def finalize(self): + pass + + # helper methods + def add_table_column(self, column): + self.entity._descriptor.add_column(column) + + def add_mapper_property(self, name, prop): + self.entity._descriptor.add_property(name, prop) + + def add_mapper_extension(self, ext): + self.entity._descriptor.add_mapper_extension(ext) + + +class CounterMeta(type): + ''' + A simple meta class which adds a ``_counter`` attribute to the instances of + the classes it is used on. This counter is simply incremented for each new + instance. + ''' + counter = 0 + + def __call__(self, *args, **kwargs): + instance = type.__call__(self, *args, **kwargs) + instance._counter = CounterMeta.counter + CounterMeta.counter += 1 + return instance + + +class Property(EntityBuilder): + ''' + Abstract base class for all properties of an Entity. + ''' + __metaclass__ = CounterMeta + + def __init__(self, *args, **kwargs): + self.entity = None + self.name = None + + def attach(self, entity, name): + """Attach this property to its entity, using 'name' as name. + + Properties will be attached in the order they were declared. + """ + self.entity = entity + self.name = name + + # register this property as a builder + entity._descriptor.builders.append(self) + + def __repr__(self): + return "Property(%s, %s)" % (self.name, self.entity) + + +class GenericProperty(Property): + ''' + Generic catch-all class to wrap an SQLAlchemy property. + + .. sourcecode:: python + + class OrderLine(Entity): + quantity = Field(Float) + unit_price = Field(Numeric) + price = GenericProperty(lambda c: column_property( + (c.quantity * c.unit_price).label('price'))) + ''' + + def __init__(self, prop, *args, **kwargs): + super(GenericProperty, self).__init__(*args, **kwargs) + self.prop = prop + #XXX: move this to Property? + self.args = args + self.kwargs = kwargs + + def create_properties(self): + if hasattr(self.prop, '__call__'): + prop_value = self.prop(self.entity.table.c) + else: + prop_value = self.prop + prop_value = self.evaluate_property(prop_value) + self.add_mapper_property(self.name, prop_value) + + def evaluate_property(self, prop): + if self.args or self.kwargs: + raise Exception('superfluous arguments passed to GenericProperty') + return prop + + +class ColumnProperty(GenericProperty): + ''' + A specialized form of the GenericProperty to generate SQLAlchemy + ``column_property``'s. + + It takes a function (often given as an anonymous lambda) as its first + argument. Other arguments and keyword arguments are forwarded to the + column_property construct. That first-argument function must accept exactly + one argument and must return the desired (scalar-returning) SQLAlchemy + ClauseElement. + + The function will be called whenever the entity table is completely + defined, and will be given + the .c attribute of the table of the entity as argument (as a way to + access the entity columns). The ColumnProperty will first wrap your + ClauseElement in an + "empty" label (ie it will be labelled automatically during queries), + then wrap that in a column_property. + + .. sourcecode:: python + + class OrderLine(Entity): + quantity = Field(Float) + unit_price = Field(Numeric) + price = ColumnProperty(lambda c: c.quantity * c.unit_price, + deferred=True) + + Please look at the `corresponding SQLAlchemy + documentation `_ for details. + ''' + + def evaluate_property(self, prop): + return column_property(prop.label(None), *self.args, **self.kwargs) + + +class Synonym(GenericProperty): + ''' + This class represents a synonym property of another property (column, ...) + of an entity. As opposed to the `synonym` kwarg to the Field class (which + share the same goal), this class can be used to define a synonym of a + property defined in a parent class (of the current class). On the other + hand, it cannot define a synonym for the purpose of using a standard python + property in queries. See the Field class for details on that usage. + + .. sourcecode:: python + + class Person(Entity): + name = Field(String(30)) + primary_email = Field(String(100)) + email_address = Synonym('primary_email') + + class User(Person): + user_name = Synonym('name') + password = Field(String(20)) + ''' + + def evaluate_property(self, prop): + return synonym(prop, *self.args, **self.kwargs) + +#class Composite(GenericProperty): +# def __init__(self, prop): +# super(GenericProperty, self).__init__() +# self.prop = prop + +# def evaluate_property(self, prop): +# return composite(prop.label(self.name)) + +#start = Composite(Point, lambda c: (c.x1, c.y1)) + +#mapper(Vertex, vertices, properties={ +# 'start':composite(Point, vertices.c.x1, vertices.c.y1), +# 'end':composite(Point, vertices.c.x2, vertices.c.y2) +#}) + + +has_property = PropertyStatement(GenericProperty) + diff --git a/libs/elixir/py23compat.py b/libs/elixir/py23compat.py new file mode 100644 index 0000000..0d6b1b8 --- /dev/null +++ b/libs/elixir/py23compat.py @@ -0,0 +1,73 @@ +# Some helper functions to get by without Python 2.4 + +# set +try: + set = set +except NameError: + from sets import Set as set + +orig_cmp = cmp +# [].sort +def sort_list(l, cmp=None, key=None, reverse=False): + try: + l.sort(cmp, key, reverse) + except TypeError, e: + if not str(e).startswith('sort expected at most 1 arguments'): + raise + if cmp is None: + cmp = orig_cmp + if key is not None: + # the cmp=cmp parameter is required to get the original comparator + # into the lambda namespace + cmp = lambda self, other, cmp=cmp: cmp(key(self), key(other)) + if reverse: + cmp = lambda self, other, cmp=cmp: -cmp(self,other) + l.sort(cmp) + +# sorted +try: + sorted = sorted +except NameError: + # global name 'sorted' doesn't exist in Python2.3 + # this provides a poor-man's emulation of the sorted built-in method + def sorted(l, cmp=None, key=None, reverse=False): + sorted_list = list(l) + sort_list(sorted_list, cmp, key, reverse) + return sorted_list + +# rsplit +try: + ''.rsplit + def rsplit(s, delim, maxsplit): + return s.rsplit(delim, maxsplit) + +except AttributeError: + def rsplit(s, delim, maxsplit): + """Return a list of the words of the string s, scanning s + from the end. To all intents and purposes, the resulting + list of words is the same as returned by split(), except + when the optional third argument maxsplit is explicitly + specified and nonzero. When maxsplit is nonzero, at most + maxsplit number of splits - the rightmost ones - occur, + and the remainder of the string is returned as the first + element of the list (thus, the list will have at most + maxsplit+1 elements). New in version 2.4. + >>> rsplit('foo.bar.baz', '.', 0) + ['foo.bar.baz'] + >>> rsplit('foo.bar.baz', '.', 1) + ['foo.bar', 'baz'] + >>> rsplit('foo.bar.baz', '.', 2) + ['foo', 'bar', 'baz'] + >>> rsplit('foo.bar.baz', '.', 99) + ['foo', 'bar', 'baz'] + """ + assert maxsplit >= 0 + + if maxsplit == 0: return [s] + + # the following lines perform the function, but inefficiently. + # This may be adequate for compatibility purposes + items = s.split(delim) + if maxsplit < len(items): + items[:-maxsplit] = [delim.join(items[:-maxsplit])] + return items diff --git a/libs/elixir/relationships.py b/libs/elixir/relationships.py new file mode 100644 index 0000000..f825120 --- /dev/null +++ b/libs/elixir/relationships.py @@ -0,0 +1,1257 @@ +''' +This module provides support for defining relationships between your Elixir +entities. Elixir currently supports two syntaxes to do so: the default +`Attribute-based syntax`_ which supports the following types of relationships: +ManyToOne_, OneToMany_, OneToOne_ and ManyToMany_, as well as a +`DSL-based syntax`_ which provides the following statements: belongs_to_, +has_many_, has_one_ and has_and_belongs_to_many_. + +====================== +Attribute-based syntax +====================== + +The first argument to all these "normal" relationship classes is the name of +the class (entity) you are relating to. + +Following that first mandatory argument, any number of additional keyword +arguments can be specified for advanced behavior. See each relationship type +for a list of their specific keyword arguments. At this point, we'll just note +that all the arguments that are not specifically processed by Elixir, as +mentioned in the documentation below are passed on to the SQLAlchemy +``relation`` function. So, please refer to the `SQLAlchemy relation function's +documentation `_ for further detail about which +keyword arguments are supported. + +You should keep in mind that the following +keyword arguments are automatically generated by Elixir and should not be used +unless you want to override the value provided by Elixir: ``uselist``, +``remote_side``, ``secondary``, ``primaryjoin`` and ``secondaryjoin``. + +Additionally, if you want a bidirectionnal relationship, you should define the +inverse relationship on the other entity explicitly (as opposed to how +SQLAlchemy's backrefs are defined). In non-ambiguous situations, Elixir will +match relationships together automatically. If there are several relationships +of the same type between two entities, Elixir is not able to determine which +relationship is the inverse of which, so you have to disambiguate the +situation by giving the name of the inverse relationship in the ``inverse`` +keyword argument. + +Here is a detailed explanation of each relation type: + +`ManyToOne` +----------- + +Describes the child's side of a parent-child relationship. For example, +a `Pet` object may belong to its owner, who is a `Person`. This could be +expressed like so: + +.. sourcecode:: python + + class Pet(Entity): + owner = ManyToOne('Person') + +Behind the scene, assuming the primary key of the `Person` entity is +an integer column named `id`, the ``ManyToOne`` relationship will +automatically add an integer column named `owner_id` to the entity, with a +foreign key referencing the `id` column of the `Person` entity. + +In addition to the keyword arguments inherited from SQLAlchemy's relation +function, ``ManyToOne`` relationships accept the following optional arguments +which will be directed to the created column: + ++----------------------+------------------------------------------------------+ +| Option Name | Description | ++======================+======================================================+ +| ``colname`` | Specify a custom name for the foreign key column(s). | +| | This argument accepts either a single string or a | +| | list of strings. The number of strings passed must | +| | match the number of primary key columns of the target| +| | entity. If this argument is not used, the name of the| +| | column(s) is generated with the pattern | +| | defined in options.FKCOL_NAMEFORMAT, which is, by | +| | default: "%(relname)s_%(key)s", where relname is the | +| | name of the ManyToOne relationship, and 'key' is the | +| | name (key) of the primary column in the target | +| | entity. That's with, in the above Pet/owner example, | +| | the name of the column would be: "owner_id". | ++----------------------+------------------------------------------------------+ +| ``required`` | Specify whether or not this field can be set to None | +| | (left without a value). Defaults to ``False``, | +| | unless the field is a primary key. | ++----------------------+------------------------------------------------------+ +| ``primary_key`` | Specify whether or not the column(s) created by this | +| | relationship should act as a primary_key. | +| | Defaults to ``False``. | ++----------------------+------------------------------------------------------+ +| ``column_kwargs`` | A dictionary holding any other keyword argument you | +| | might want to pass to the Column. | ++----------------------+------------------------------------------------------+ +| ``target_column`` | Name (or list of names) of the target column(s). | +| | If this argument is not specified, the target entity | +| | primary key column(s) are used. | ++----------------------+------------------------------------------------------+ + +The following optional arguments are also supported to customize the +ForeignKeyConstraint that is created: + ++----------------------+------------------------------------------------------+ +| Option Name | Description | ++======================+======================================================+ +| ``use_alter`` | If True, SQLAlchemy will add the constraint in a | +| | second SQL statement (as opposed to within the | +| | create table statement). This permits to define | +| | tables with a circular foreign key dependency | +| | between them. | ++----------------------+------------------------------------------------------+ +| ``ondelete`` | Value for the foreign key constraint ondelete clause.| +| | May be one of: ``cascade``, ``restrict``, | +| | ``set null``, or ``set default``. | ++----------------------+------------------------------------------------------+ +| ``onupdate`` | Value for the foreign key constraint onupdate clause.| +| | May be one of: ``cascade``, ``restrict``, | +| | ``set null``, or ``set default``. | ++----------------------+------------------------------------------------------+ +| ``constraint_kwargs``| A dictionary holding any other keyword argument you | +| | might want to pass to the Constraint. | ++----------------------+------------------------------------------------------+ + +In some cases, you may want to declare the foreign key column explicitly, +instead of letting it be generated automatically. There are several reasons to +that: it could be because you want to declare it with precise arguments and +using column_kwargs makes your code ugly, or because the name of +your column conflicts with the property name (in which case an error is +thrown). In those cases, you can use the ``field`` argument to specify an +already-declared field to be used for the foreign key column. + +For example, for the Pet example above, if you want the database column +(holding the foreign key) to be called 'owner', one should use the field +parameter to specify the field manually. + +.. sourcecode:: python + + class Pet(Entity): + owner_id = Field(Integer, colname='owner') + owner = ManyToOne('Person', field=owner_id) + ++----------------------+------------------------------------------------------+ +| Option Name | Description | ++======================+======================================================+ +| ``field`` | Specify the previously-declared field to be used for | +| | the foreign key column. Use of this parameter is | +| | mutually exclusive with the colname and column_kwargs| +| | arguments. | ++----------------------+------------------------------------------------------+ + + +Additionally, Elixir supports the belongs_to_ statement as an alternative, +DSL-based, syntax to define ManyToOne_ relationships. + + +`OneToMany` +----------- + +Describes the parent's side of a parent-child relationship when there can be +several children. For example, a `Person` object has many children, each of +them being a `Person`. This could be expressed like so: + +.. sourcecode:: python + + class Person(Entity): + parent = ManyToOne('Person') + children = OneToMany('Person') + +Note that a ``OneToMany`` relationship **cannot exist** without a +corresponding ``ManyToOne`` relationship in the other way. This is because the +``OneToMany`` relationship needs the foreign key created by the ``ManyToOne`` +relationship. + +In addition to keyword arguments inherited from SQLAlchemy, ``OneToMany`` +relationships accept the following optional (keyword) arguments: + ++--------------------+--------------------------------------------------------+ +| Option Name | Description | ++====================+========================================================+ +| ``order_by`` | Specify which field(s) should be used to sort the | +| | results given by accessing the relation field. | +| | Note that this sort order is only applied when loading | +| | objects from the database. Objects appended to the | +| | collection afterwards are not re-sorted in-memory on | +| | the fly. | +| | This argument accepts either a string or a list of | +| | strings, each corresponding to the name of a field in | +| | the target entity. These field names can optionally be | +| | prefixed by a minus (for descending order). | ++--------------------+--------------------------------------------------------+ +| ``filter`` | Specify a filter criterion (as a clause element) for | +| | this relationship. This criterion will be ``and_`` ed | +| | with the normal join criterion (primaryjoin) generated | +| | by Elixir for the relationship. For example: | +| | boston_addresses = | +| | OneToMany('Address', filter=Address.city == 'Boston') | ++--------------------+--------------------------------------------------------+ + +Additionally, Elixir supports an alternate, DSL-based, syntax to define +OneToMany_ relationships, with the has_many_ statement. + + +`OneToOne` +---------- + +Describes the parent's side of a parent-child relationship when there is only +one child. For example, a `Car` object has one gear stick, which is +represented as a `GearStick` object. This could be expressed like so: + +.. sourcecode:: python + + class Car(Entity): + gear_stick = OneToOne('GearStick', inverse='car') + + class GearStick(Entity): + car = ManyToOne('Car') + +Note that a ``OneToOne`` relationship **cannot exist** without a corresponding +``ManyToOne`` relationship in the other way. This is because the ``OneToOne`` +relationship needs the foreign_key created by the ``ManyToOne`` relationship. + +Additionally, Elixir supports an alternate, DSL-based, syntax to define +OneToOne_ relationships, with the has_one_ statement. + + +`ManyToMany` +------------ + +Describes a relationship in which one kind of entity can be related to several +objects of the other kind but the objects of that other kind can be related to +several objects of the first kind. For example, an `Article` can have several +tags, but the same `Tag` can be used on several articles. + +.. sourcecode:: python + + class Article(Entity): + tags = ManyToMany('Tag') + + class Tag(Entity): + articles = ManyToMany('Article') + +Behind the scene, the ``ManyToMany`` relationship will automatically create an +intermediate table to host its data. + +Note that you don't necessarily need to define the inverse relationship. In +our example, even though we want tags to be usable on several articles, we +might not be interested in which articles correspond to a particular tag. In +that case, we could have omitted the `Tag` side of the relationship. + +If your ``ManyToMany`` relationship is self-referencial, the entity +containing it is autoloaded (and you don't intend to specify both the +primaryjoin and secondaryjoin arguments manually), you must specify at least +one of either the ``remote_colname`` or ``local_colname`` argument. + +In addition to keyword arguments inherited from SQLAlchemy, ``ManyToMany`` +relationships accept the following optional (keyword) arguments: + ++--------------------+--------------------------------------------------------+ +| Option Name | Description | ++====================+========================================================+ +| ``tablename`` | Specify a custom name for the intermediary table. This | +| | can be used both when the tables needs to be created | +| | and when the table is autoloaded/reflected from the | +| | database. If this argument is not used, a name will be | +| | automatically generated by Elixir depending on the name| +| | of the tables of the two entities of the relationship, | +| | the name of the relationship, and, if present, the name| +| | of its inverse. Even though this argument is optional, | +| | it is wise to use it if you are not sure what are the | +| | exact consequence of using a generated table name. | ++--------------------+--------------------------------------------------------+ +| ``schema`` | Specify a custom schema for the intermediate table. | +| | This can be used both when the tables needs to | +| | be created and when the table is autoloaded/reflected | +| | from the database. | ++--------------------+--------------------------------------------------------+ +| ``remote_colname`` | A string or list of strings specifying the names of | +| | the column(s) in the intermediary table which | +| | reference the "remote"/target entity's table. | ++--------------------+--------------------------------------------------------+ +| ``local_colname`` | A string or list of strings specifying the names of | +| | the column(s) in the intermediary table which | +| | reference the "local"/current entity's table. | ++--------------------+--------------------------------------------------------+ +| ``table`` | Use a manually created table. If this argument is | +| | used, Elixir won't generate a table for this | +| | relationship, and use the one given instead. | ++--------------------+--------------------------------------------------------+ +| ``order_by`` | Specify which field(s) should be used to sort the | +| | results given by accessing the relation field. | +| | Note that this sort order is only applied when loading | +| | objects from the database. Objects appended to the | +| | collection afterwards are not re-sorted in-memory on | +| | the fly. | +| | This argument accepts either a string or a list of | +| | strings, each corresponding to the name of a field in | +| | the target entity. These field names can optionally be | +| | prefixed by a minus (for descending order). | ++----------------------+------------------------------------------------------+ +| ``ondelete`` | Value for the foreign key constraint ondelete clause. | +| | May be one of: ``cascade``, ``restrict``, | +| | ``set null``, or ``set default``. | ++--------------------+--------------------------------------------------------+ +| ``onupdate`` | Value for the foreign key constraint onupdate clause. | +| | May be one of: ``cascade``, ``restrict``, | +| | ``set null``, or ``set default``. | ++--------------------+--------------------------------------------------------+ +| ``table_kwargs`` | A dictionary holding any other keyword argument you | +| | might want to pass to the underlying Table object. | ++--------------------+--------------------------------------------------------+ +| ``column_format`` | DEPRECATED. Specify an alternate format string for | +| | naming the | +| | columns in the mapping table. The default value is | +| | defined in ``elixir.options.M2MCOL_NAMEFORMAT``. You | +| | will be passed ``tablename``, ``key``, and ``entity`` | +| | as arguments to the format string. | ++--------------------+--------------------------------------------------------+ + + +================ +DSL-based syntax +================ + +The following DSL statements provide an alternative way to define relationships +between your entities. The first argument to all those statements is the name +of the relationship, the second is the 'kind' of object you are relating to +(it is usually given using the ``of_kind`` keyword). + +`belongs_to` +------------ + +The ``belongs_to`` statement is the DSL syntax equivalent to the ManyToOne_ +relationship. As such, it supports all the same arguments as ManyToOne_ +relationships. + +.. sourcecode:: python + + class Pet(Entity): + belongs_to('feeder', of_kind='Person') + belongs_to('owner', of_kind='Person', colname="owner_id") + + +`has_many` +---------- + +The ``has_many`` statement is the DSL syntax equivalent to the OneToMany_ +relationship. As such, it supports all the same arguments as OneToMany_ +relationships. + +.. sourcecode:: python + + class Person(Entity): + belongs_to('parent', of_kind='Person') + has_many('children', of_kind='Person') + +There is also an alternate form of the ``has_many`` relationship that takes +only two keyword arguments: ``through`` and ``via`` in order to encourage a +richer form of many-to-many relationship that is an alternative to the +``has_and_belongs_to_many`` statement. Here is an example: + +.. sourcecode:: python + + class Person(Entity): + has_field('name', Unicode) + has_many('assignments', of_kind='Assignment') + has_many('projects', through='assignments', via='project') + + class Assignment(Entity): + has_field('start_date', DateTime) + belongs_to('person', of_kind='Person') + belongs_to('project', of_kind='Project') + + class Project(Entity): + has_field('title', Unicode) + has_many('assignments', of_kind='Assignment') + +In the above example, a `Person` has many `projects` through the `Assignment` +relationship object, via a `project` attribute. + + +`has_one` +--------- + +The ``has_one`` statement is the DSL syntax equivalent to the OneToOne_ +relationship. As such, it supports all the same arguments as OneToOne_ +relationships. + +.. sourcecode:: python + + class Car(Entity): + has_one('gear_stick', of_kind='GearStick', inverse='car') + + class GearStick(Entity): + belongs_to('car', of_kind='Car') + + +`has_and_belongs_to_many` +------------------------- + +The ``has_and_belongs_to_many`` statement is the DSL syntax equivalent to the +ManyToMany_ relationship. As such, it supports all the same arguments as +ManyToMany_ relationships. + +.. sourcecode:: python + + class Article(Entity): + has_and_belongs_to_many('tags', of_kind='Tag') + + class Tag(Entity): + has_and_belongs_to_many('articles', of_kind='Article') + +''' + +import warnings + +from sqlalchemy import ForeignKeyConstraint, Column, Table, and_ +from sqlalchemy.orm import relation, backref, class_mapper +from sqlalchemy.ext.associationproxy import association_proxy + +import options +from elixir.statements import ClassMutator +from elixir.properties import Property +from elixir.entity import EntityMeta, DEBUG + +__doc_all__ = [] + + +class Relationship(Property): + ''' + Base class for relationships. + ''' + + def __init__(self, of_kind, inverse=None, *args, **kwargs): + super(Relationship, self).__init__() + + self.of_kind = of_kind + self.inverse_name = inverse + + self._target = None + + self.property = None # sqlalchemy property + self.backref = None # sqlalchemy backref + + #TODO: unused for now + self.args = args + self.kwargs = kwargs + + def attach(self, entity, name): + super(Relationship, self).attach(entity, name) + entity._descriptor.relationships.append(self) + + def create_pk_cols(self): + self.create_keys(True) + + def create_non_pk_cols(self): + self.create_keys(False) + + def create_keys(self, pk): + ''' + Subclasses (ie. concrete relationships) may override this method to + create foreign keys. + ''' + + def create_properties(self): + if self.property or self.backref: + return + + kwargs = self.get_prop_kwargs() + if 'order_by' in kwargs: + kwargs['order_by'] = \ + self.target._descriptor.translate_order_by(kwargs['order_by']) + + # transform callable arguments + for arg in ('primaryjoin', 'secondaryjoin', 'remote_side', + 'foreign_keys'): + kwarg = kwargs.get(arg, None) + if hasattr(kwarg, '__call__'): + kwargs[arg] = kwarg() + + # viewonly relationships need to create "standalone" relations (ie + # shouldn't be a backref of another relation). + if self.inverse and not kwargs.get('viewonly', False): + # check if the inverse was already processed (and thus has already + # defined a backref we can use) + if self.inverse.backref: + # let the user override the backref argument + if 'backref' not in kwargs: + kwargs['backref'] = self.inverse.backref + else: + # SQLAlchemy doesn't like when 'secondary' is both defined on + # the relation and the backref + kwargs.pop('secondary', None) + + # define backref for use by the inverse + self.backref = backref(self.name, **kwargs) + return + + self.property = relation(self.target, **kwargs) + self.add_mapper_property(self.name, self.property) + + def target(self): + if not self._target: + if isinstance(self.of_kind, basestring): + collection = self.entity._descriptor.collection + self._target = collection.resolve(self.of_kind, self.entity) + else: + self._target = self.of_kind + return self._target + target = property(target) + + def inverse(self): + if not hasattr(self, '_inverse'): + if self.inverse_name: + desc = self.target._descriptor + inverse = desc.find_relationship(self.inverse_name) + if inverse is None: + raise Exception( + "Couldn't find a relationship named '%s' in " + "entity '%s' or its parent entities." + % (self.inverse_name, self.target.__name__)) + assert self.match_type_of(inverse), \ + "Relationships '%s' in entity '%s' and '%s' in entity " \ + "'%s' cannot be inverse of each other because their " \ + "types do not form a valid combination." % \ + (self.name, self.entity.__name__, + self.inverse_name, self.target.__name__) + else: + check_reverse = not self.kwargs.get('viewonly', False) + if isinstance(self.target, EntityMeta): + inverse = self.target._descriptor.get_inverse_relation( + self, check_reverse=check_reverse) + else: + inverse = None + self._inverse = inverse + if inverse and not self.kwargs.get('viewonly', False): + inverse._inverse = self + + return self._inverse + inverse = property(inverse) + + def match_type_of(self, other): + return False + + def is_inverse(self, other): + # viewonly relationships are not symmetrical: a viewonly relationship + # should have exactly one inverse (a ManyToOne relationship), but that + # inverse shouldn't have the viewonly relationship as its inverse. + return not other.kwargs.get('viewonly', False) and \ + other is not self and \ + self.match_type_of(other) and \ + self.entity == other.target and \ + other.entity == self.target and \ + (self.inverse_name == other.name or not self.inverse_name) and \ + (other.inverse_name == self.name or not other.inverse_name) + + +class ManyToOne(Relationship): + ''' + + ''' + + def __init__(self, of_kind, + column_kwargs=None, + colname=None, required=None, primary_key=None, + field=None, + constraint_kwargs=None, + use_alter=None, ondelete=None, onupdate=None, + target_column=None, + *args, **kwargs): + + # 1) handle column-related args + + # check that the column arguments don't conflict + assert not (field and (column_kwargs or colname)), \ + "ManyToOne can accept the 'field' argument or column " \ + "arguments ('colname' or 'column_kwargs') but not both!" + + if colname and not isinstance(colname, list): + colname = [colname] + self.colname = colname or [] + + column_kwargs = column_kwargs or {} + # kwargs go by default to the relation(), so we need to manually + # extract those targeting the Column + if required is not None: + column_kwargs['nullable'] = not required + if primary_key is not None: + column_kwargs['primary_key'] = primary_key + # by default, created columns will have an index. + column_kwargs.setdefault('index', True) + self.column_kwargs = column_kwargs + + if field and not isinstance(field, list): + field = [field] + self.field = field or [] + + # 2) handle constraint kwargs + constraint_kwargs = constraint_kwargs or {} + if use_alter is not None: + constraint_kwargs['use_alter'] = use_alter + if ondelete is not None: + constraint_kwargs['ondelete'] = ondelete + if onupdate is not None: + constraint_kwargs['onupdate'] = onupdate + self.constraint_kwargs = constraint_kwargs + + # 3) misc arguments + if target_column and not isinstance(target_column, list): + target_column = [target_column] + self.target_column = target_column + + self.foreign_key = [] + self.primaryjoin_clauses = [] + + super(ManyToOne, self).__init__(of_kind, *args, **kwargs) + + def match_type_of(self, other): + return isinstance(other, (OneToMany, OneToOne)) + + def target_table(self): + if isinstance(self.target, EntityMeta): + return self.target._descriptor.table + else: + return class_mapper(self.target).local_table + target_table = property(target_table) + + def create_keys(self, pk): + ''' + Find all primary keys on the target and create foreign keys on the + source accordingly. + ''' + + if self.foreign_key: + return + + if self.column_kwargs.get('primary_key', False) != pk: + return + + source_desc = self.entity._descriptor + if isinstance(self.target, EntityMeta): + # make sure the target has all its pk set up + self.target._descriptor.create_pk_cols() + #XXX: another option, instead of the FakeTable, would be to create an + # EntityDescriptor for the SA class. + target_table = self.target_table + + if source_desc.autoload: + #TODO: allow target_column to be used as an alternative to + # specifying primaryjoin, to be consistent with non-autoloaded + # tables + if self.colname: + if 'primaryjoin' not in self.kwargs: + self.primaryjoin_clauses = \ + _get_join_clauses(self.entity.table, + self.colname, None, + target_table)[0] + if not self.primaryjoin_clauses: + colnames = ', '.join(self.colname) + raise Exception( + "Couldn't find a foreign key constraint in table " + "'%s' using the following columns: %s." + % (self.entity.table.name, colnames)) + if self.field: + raise NotImplementedError( + "'field' argument not allowed on autoloaded table " + "relationships.") + else: + fk_refcols = [] + fk_colnames = [] + + if self.target_column is None: + target_columns = target_table.primary_key.columns + else: + target_columns = [target_table.columns[col] + for col in self.target_column] + + if not target_columns: + raise Exception("No primary key found in target table ('%s') " + "for the '%s' relationship of the '%s' entity." + % (target_table.name, self.name, + self.entity.__name__)) + if self.colname and \ + len(self.colname) != len(target_columns): + raise Exception( + "The number of column names provided in the colname " + "keyword argument of the '%s' relationship of the " + "'%s' entity is not the same as the number of columns " + "of the primary key of '%s'." + % (self.name, self.entity.__name__, + self.target.__name__)) + + for key_num, target_col in enumerate(target_columns): + if self.field: + col = self.field[key_num].column + else: + if self.colname: + colname = self.colname[key_num] + else: + colname = options.FKCOL_NAMEFORMAT % \ + {'relname': self.name, + 'key': target_col.key} + + # We can't add the column to the table directly as the + # table might not be created yet. + col = Column(colname, target_col.type, + **self.column_kwargs) + source_desc.add_column(col) + + # If the column name was specified, and it is the same as + # this property's name, there is going to be a conflict. + # Don't allow this to happen. + if col.key == self.name: + raise ValueError( + "ManyToOne named '%s' in '%s' conficts " + " with the column of the same name. " + "You should probably define the foreign key " + "field manually and use the 'field' " + "argument on the ManyToOne relationship" + % (self.name, self.entity.__name__)) + + # Build the list of local columns which will be part of + # the foreign key + self.foreign_key.append(col) + + # Store the names of those columns + fk_colnames.append(col.key) + + # Build the list of column "paths" the foreign key will + # point to + fk_refcols.append("%s.%s" % \ + (target_table.fullname, target_col.key)) + + # Build up the primary join. This is needed when you have + # several ManyToOne relationships between two objects + self.primaryjoin_clauses.append(col == target_col) + + if 'name' not in self.constraint_kwargs: + # In some databases (at least MySQL) the constraint name needs + # to be unique for the whole database, instead of per table. + fk_name = options.CONSTRAINT_NAMEFORMAT % \ + {'tablename': source_desc.tablename, + 'colnames': '_'.join(fk_colnames)} + self.constraint_kwargs['name'] = fk_name + + source_desc.add_constraint( + ForeignKeyConstraint(fk_colnames, fk_refcols, + **self.constraint_kwargs)) + + def get_prop_kwargs(self): + kwargs = {'uselist': False} + + if self.entity.table is self.target_table: + # this is needed because otherwise SA has no way to know what is + # the direction of the relationship since both columns present in + # the primaryjoin belong to the same table. In other words, it is + # necessary to know if this particular relation + # is the many-to-one side, or the one-to-xxx side. The foreignkey + # doesn't help in this case. + kwargs['remote_side'] = \ + [col for col in self.target_table.primary_key.columns] + + if self.primaryjoin_clauses: + kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) + + kwargs.update(self.kwargs) + + return kwargs + + +class OneToOne(Relationship): + uselist = False + + def __init__(self, of_kind, filter=None, *args, **kwargs): + self.filter = filter + if filter is not None: + # We set viewonly to True by default for filtered relationships, + # unless manually overridden. + # This is not strictly necessary, as SQLAlchemy allows non viewonly + # relationships with a custom join/filter. The example at: + # SADOCS/05/mappers.html#advdatamapping_relation_customjoin + # is not viewonly. Those relationships can be used as if the extra + # filter wasn't present when inserting. This can lead to a + # confusing behavior (if you insert data which doesn't match the + # extra criterion it'll get inserted anyway but you won't see it + # when you query back the attribute after a round-trip to the + # database). + if 'viewonly' not in kwargs: + kwargs['viewonly'] = True + super(OneToOne, self).__init__(of_kind, *args, **kwargs) + + def match_type_of(self, other): + return isinstance(other, ManyToOne) + + def create_keys(self, pk): + # make sure an inverse relationship exists + if self.inverse is None: + raise Exception( + "Couldn't find any relationship in '%s' which " + "match as inverse of the '%s' relationship " + "defined in the '%s' entity. If you are using " + "inheritance you " + "might need to specify inverse relationships " + "manually by using the 'inverse' argument." + % (self.target, self.name, + self.entity)) + + def get_prop_kwargs(self): + kwargs = {'uselist': self.uselist} + + #TODO: for now, we don't break any test if we remove those 2 lines. + # So, we should either complete the selfref test to prove that they + # are indeed useful, or remove them. It might be they are indeed + # useless because the remote_side is already setup in the other way + # (ManyToOne). + if self.entity.table is self.target.table: + #FIXME: IF this code is of any use, it will probably break for + # autoloaded tables + kwargs['remote_side'] = self.inverse.foreign_key + + # Contrary to ManyToMany relationships, we need to specify the join + # clauses even if this relationship is not self-referencial because + # there could be several ManyToOne from the target class to us. + joinclauses = self.inverse.primaryjoin_clauses + if self.filter: + # We need to make a copy of the joinclauses, to not add the filter + # on the backref + joinclauses = joinclauses[:] + [self.filter(self.target.table.c)] + if joinclauses: + kwargs['primaryjoin'] = and_(*joinclauses) + + kwargs.update(self.kwargs) + + return kwargs + + +class OneToMany(OneToOne): + uselist = True + + +class ManyToMany(Relationship): + uselist = True + + def __init__(self, of_kind, tablename=None, + local_colname=None, remote_colname=None, + ondelete=None, onupdate=None, + table=None, schema=None, + column_format=None, + filter=None, + table_kwargs=None, + *args, **kwargs): + self.user_tablename = tablename + + if local_colname and not isinstance(local_colname, list): + local_colname = [local_colname] + self.local_colname = local_colname or [] + if remote_colname and not isinstance(remote_colname, list): + remote_colname = [remote_colname] + self.remote_colname = remote_colname or [] + + self.ondelete = ondelete + self.onupdate = onupdate + + self.table = table + self.schema = schema + + if column_format: + warnings.warn("The 'column_format' argument on ManyToMany " + "relationships is deprecated. Please use the 'local_colname' " + "and/or 'remote_colname' arguments if you want custom " + "column names for this table only, or modify " + "options.M2MCOL_NAMEFORMAT if you want a custom format for " + "all ManyToMany tables", DeprecationWarning, stacklevel=3) + self.column_format = column_format or options.M2MCOL_NAMEFORMAT + if not hasattr(self.column_format, '__call__'): + # we need to store the format in a variable so that the + # closure of the lambda is correct + format = self.column_format + self.column_format = lambda data: format % data + if options.MIGRATION_TO_07_AID: + self.column_format = \ + migration_aid_m2m_column_formatter( + lambda data: options.OLD_M2MCOL_NAMEFORMAT % data, + self.column_format) + + self.filter = filter + if filter is not None: + # We set viewonly to True by default for filtered relationships, + # unless manually overridden. + if 'viewonly' not in kwargs: + kwargs['viewonly'] = True + + self.table_kwargs = table_kwargs or {} + + self.primaryjoin_clauses = [] + self.secondaryjoin_clauses = [] + + super(ManyToMany, self).__init__(of_kind, *args, **kwargs) + + def get_table(self): + warnings.warn("The secondary_table attribute on ManyToMany objects is " + "deprecated. You should rather use the table attribute.", + DeprecationWarning, stacklevel=2) + return self.table + secondary_table = property(get_table) + + def match_type_of(self, other): + return isinstance(other, ManyToMany) + + def create_tables(self): + if self.table is not None: + if 'primaryjoin' not in self.kwargs or \ + 'secondaryjoin' not in self.kwargs: + self._build_join_clauses() + assert self.inverse is None or self.inverse.table is None or \ + self.inverse.table is self.table + return + + if self.inverse: + inverse = self.inverse + if inverse.table is not None: + self.table = inverse.table + self.primaryjoin_clauses = inverse.secondaryjoin_clauses + self.secondaryjoin_clauses = inverse.primaryjoin_clauses + return + + assert not inverse.user_tablename or not self.user_tablename or \ + inverse.user_tablename == self.user_tablename + assert not inverse.remote_colname or not self.local_colname or \ + inverse.remote_colname == self.local_colname + assert not inverse.local_colname or not self.remote_colname or \ + inverse.local_colname == self.remote_colname + assert not inverse.schema or not self.schema or \ + inverse.schema == self.schema + assert not inverse.table_kwargs or not self.table_kwargs or \ + inverse.table_kwargs == self.table_kwargs + + self.user_tablename = inverse.user_tablename or self.user_tablename + self.local_colname = inverse.remote_colname or self.local_colname + self.remote_colname = inverse.local_colname or self.remote_colname + self.schema = inverse.schema or self.schema + self.local_colname = inverse.remote_colname or self.local_colname + + # compute table_kwargs + complete_kwargs = options.options_defaults['table_options'].copy() + complete_kwargs.update(self.table_kwargs) + + #needs: table_options['schema'], autoload, tablename, primary_keys, + #entity.__name__, table_fullname + e1_desc = self.entity._descriptor + e2_desc = self.target._descriptor + + e1_schema = e1_desc.table_options.get('schema', None) + e2_schema = e2_desc.table_options.get('schema', None) + schema = (self.schema is not None) and self.schema or e1_schema + + assert e1_schema == e2_schema or self.schema, \ + "Schema %r for entity %s differs from schema %r of entity %s." \ + " Consider using the schema-parameter. "\ + % (e1_schema, self.entity.__name__, + e2_schema, self.target.__name__) + + # First, we compute the name of the table. Note that some of the + # intermediary variables are reused later for the constraint + # names. + + # We use the name of the relation for the first entity + # (instead of the name of its primary key), so that we can + # have two many-to-many relations between the same objects + # without having a table name collision. + source_part = "%s_%s" % (e1_desc.tablename, self.name) + + # And we use only the name of the table of the second entity + # when there is no inverse, so that a many-to-many relation + # can be defined without an inverse. + if self.inverse: + target_part = "%s_%s" % (e2_desc.tablename, self.inverse.name) + else: + target_part = e2_desc.tablename + + if self.user_tablename: + tablename = self.user_tablename + else: + # We need to keep the table name consistent (independant of + # whether this relation or its inverse is setup first). + if self.inverse and source_part < target_part: + #XXX: use a different scheme for selfref (to not include the + # table name twice)? + tablename = "%s__%s" % (target_part, source_part) + else: + tablename = "%s__%s" % (source_part, target_part) + + if options.MIGRATION_TO_07_AID: + oldname = (self.inverse and + e1_desc.tablename < e2_desc.tablename) and \ + "%s__%s" % (target_part, source_part) or \ + "%s__%s" % (source_part, target_part) + if oldname != tablename: + warnings.warn( + "The generated table name for the '%s' relationship " + "on the '%s' entity changed from '%s' (the name " + "generated by Elixir 0.6.1 and earlier) to '%s'. " + "You should either rename the table in the database " + "to the new name or use the tablename argument on the " + "relationship to force the old name: tablename='%s'!" + % (self.name, self.entity.__name__, oldname, + tablename, oldname)) + + if e1_desc.autoload: + if not e2_desc.autoload: + raise Exception( + "Entity '%s' is autoloaded and its '%s' " + "ManyToMany relationship points to " + "the '%s' entity which is not autoloaded" + % (self.entity.__name__, self.name, + self.target.__name__)) + + self.table = Table(tablename, e1_desc.metadata, autoload=True, + **complete_kwargs) + if 'primaryjoin' not in self.kwargs or \ + 'secondaryjoin' not in self.kwargs: + self._build_join_clauses() + else: + # We pre-compute the names of the foreign key constraints + # pointing to the source (local) entity's table and to the + # target's table + + # In some databases (at least MySQL) the constraint names need + # to be unique for the whole database, instead of per table. + source_fk_name = "%s_fk" % source_part + if self.inverse: + target_fk_name = "%s_fk" % target_part + else: + target_fk_name = "%s_inverse_fk" % source_part + + columns = [] + constraints = [] + + for num, desc, fk_name, rel, inverse, colnames, join_clauses in ( + (0, e1_desc, source_fk_name, self, self.inverse, + self.local_colname, self.primaryjoin_clauses), + (1, e2_desc, target_fk_name, self.inverse, self, + self.remote_colname, self.secondaryjoin_clauses)): + + fk_colnames = [] + fk_refcols = [] + if colnames: + assert len(colnames) == len(desc.primary_keys) + else: + # The data generated here will be fed to the M2M column + # formatter to generate the name of the columns of the + # intermediate table for *one* side of the relationship, + # that is, from the intermediate table to the current + # entity, as stored in the "desc" variable. + data = {# A) relationships info + + # the name of the rel going *from* the entity + # we are currently generating a column pointing + # *to*. This is generally *not* what you want to + # use. eg in a "Post" and "Tag" example, with + # relationships named 'tags' and 'posts', when + # creating the columns from the intermediate + # table to the "Post" entity, 'relname' will + # contain 'tags'. + 'relname': rel and rel.name or 'inverse', + + # the name of the inverse relationship. In the + # above example, 'inversename' will contain + # 'posts'. + 'inversename': inverse and inverse.name + or 'inverse', + # is A == B? + 'selfref': e1_desc is e2_desc, + # provided for backward compatibility, DO NOT USE! + 'num': num, + # provided for backward compatibility, DO NOT USE! + 'numifself': e1_desc is e2_desc and str(num + 1) + or '', + # B) target information (from the perspective of + # the intermediate table) + 'target': desc.entity, + 'entity': desc.entity.__name__.lower(), + 'tablename': desc.tablename, + + # C) current (intermediate) table name + 'current_table': tablename + } + colnames = [] + for pk_col in desc.primary_keys: + data.update(key=pk_col.key) + colnames.append(self.column_format(data)) + + for pk_col, colname in zip(desc.primary_keys, colnames): + col = Column(colname, pk_col.type, primary_key=True) + columns.append(col) + + # Build the list of local columns which will be part + # of the foreign key. + fk_colnames.append(colname) + + # Build the list of column "paths" the foreign key will + # point to + target_path = "%s.%s" % (desc.table_fullname, pk_col.key) + fk_refcols.append(target_path) + + # Build join clauses (in case we have a self-ref) + if self.entity is self.target: + join_clauses.append(col == pk_col) + + onupdate = rel and rel.onupdate + ondelete = rel and rel.ondelete + + #FIXME: fk_name is misleading + constraints.append( + ForeignKeyConstraint(fk_colnames, fk_refcols, + name=fk_name, onupdate=onupdate, + ondelete=ondelete)) + + args = columns + constraints + + self.table = Table(tablename, e1_desc.metadata, + schema=schema, *args, **complete_kwargs) + if DEBUG: + print self.table.repr2() + + def _build_join_clauses(self): + # In the case we have a self-reference, we need to build join clauses + if self.entity is self.target: + if not self.local_colname and not self.remote_colname: + raise Exception( + "Self-referential ManyToMany " + "relationships in autoloaded entities need to have at " + "least one of either 'local_colname' or 'remote_colname' " + "argument specified. The '%s' relationship in the '%s' " + "entity doesn't have either." + % (self.name, self.entity.__name__)) + + self.primaryjoin_clauses, self.secondaryjoin_clauses = \ + _get_join_clauses(self.table, + self.local_colname, self.remote_colname, + self.entity.table) + + def get_prop_kwargs(self): + kwargs = {'secondary': self.table, + 'uselist': self.uselist} + + if self.filter: + # we need to make a copy of the joinclauses + secondaryjoin_clauses = self.secondaryjoin_clauses[:] + \ + [self.filter(self.target.table.c)] + else: + secondaryjoin_clauses = self.secondaryjoin_clauses + + if self.target is self.entity or self.filter: + kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) + kwargs['secondaryjoin'] = and_(*secondaryjoin_clauses) + + kwargs.update(self.kwargs) + + return kwargs + + def is_inverse(self, other): + return super(ManyToMany, self).is_inverse(other) and \ + (self.user_tablename == other.user_tablename or + (not self.user_tablename and not other.user_tablename)) + + +def migration_aid_m2m_column_formatter(oldformatter, newformatter): + def debug_formatter(data): + old_name = oldformatter(data) + new_name = newformatter(data) + if new_name != old_name: + complete_data = data.copy() + complete_data.update(old_name=old_name, + new_name=new_name, + targetname=data['target'].__name__) + # Specifying a stacklevel is useless in this case as the name + # generation is triggered by setup_all(), not by the declaration + # of the offending relationship. + warnings.warn("The '%(old_name)s' column in the " + "'%(current_table)s' table, used as the " + "intermediate table for the '%(relname)s' " + "relationship on the '%(targetname)s' entity " + "was renamed to '%(new_name)s'." + % complete_data) + return new_name + return debug_formatter + + +def _get_join_clauses(local_table, local_cols1, local_cols2, target_table): + primary_join, secondary_join = [], [] + cols1 = local_cols1[:] + cols1.sort() + cols1 = tuple(cols1) + + if local_cols2 is not None: + cols2 = local_cols2[:] + cols2.sort() + cols2 = tuple(cols2) + else: + cols2 = None + + # Build a map of fk constraints pointing to the correct table. + # The map is indexed on the local col names. + constraint_map = {} + for constraint in local_table.constraints: + if isinstance(constraint, ForeignKeyConstraint): + use_constraint = True + fk_colnames = [] + + # if all columns point to the correct table, we use the constraint + #TODO: check that it contains as many columns as the pk of the + #target entity, or even that it points to the actual pk columns + for fk in constraint.elements: + if fk.references(target_table): + # local column key + fk_colnames.append(fk.parent.key) + else: + use_constraint = False + if use_constraint: + fk_colnames.sort() + constraint_map[tuple(fk_colnames)] = constraint + + # Either the fk column names match explicitely with the columns given for + # one of the joins (primary or secondary), or we assume the current + # columns match because the columns for this join were not given and we + # know the other join is either not used (is None) or has an explicit + # match. + +#TODO: rewrite this. Even with the comment, I don't even understand it myself. + for cols, constraint in constraint_map.iteritems(): + if cols == cols1 or (cols != cols2 and + not cols1 and (cols2 in constraint_map or + cols2 is None)): + join = primary_join + elif cols == cols2 or (cols2 == () and cols1 in constraint_map): + join = secondary_join + else: + continue + for fk in constraint.elements: + join.append(fk.parent == fk.column) + return primary_join, secondary_join + + +def rel_mutator_handler(target): + def handler(entity, name, of_kind=None, through=None, via=None, + *args, **kwargs): + if through and via: + setattr(entity, name, + association_proxy(through, via, **kwargs)) + return + elif through or via: + raise Exception("'through' and 'via' relationship keyword " + "arguments should be used in combination.") + rel = target(of_kind, *args, **kwargs) + rel.attach(entity, name) + return handler + + +belongs_to = ClassMutator(rel_mutator_handler(ManyToOne)) +has_one = ClassMutator(rel_mutator_handler(OneToOne)) +has_many = ClassMutator(rel_mutator_handler(OneToMany)) +has_and_belongs_to_many = ClassMutator(rel_mutator_handler(ManyToMany)) diff --git a/libs/elixir/statements.py b/libs/elixir/statements.py new file mode 100644 index 0000000..c21bf30 --- /dev/null +++ b/libs/elixir/statements.py @@ -0,0 +1,59 @@ +import sys + +MUTATORS = '__elixir_mutators__' + +class ClassMutator(object): + ''' + DSL-style syntax + + A ``ClassMutator`` object represents a DSL term. + ''' + + def __init__(self, handler): + ''' + Create a new ClassMutator, using the `handler` callable to process it + when the time will come. + ''' + self.handler = handler + + # called when a mutator (eg. "has_field(...)") is parsed + def __call__(self, *args, **kwargs): + # self in this case is the "generic" mutator (eg "has_field") + + # jam this mutator into the class's mutator list + class_locals = sys._getframe(1).f_locals + mutators = class_locals.setdefault(MUTATORS, []) + mutators.append((self, args, kwargs)) + + def process(self, entity, *args, **kwargs): + ''' + Process one mutator. This version simply calls the handler callable, + but another mutator (sub)class could do more processing. + ''' + self.handler(entity, *args, **kwargs) + + +#TODO: move this to the super class (to be created here) of EntityMeta +def process_mutators(entity): + ''' + Apply all mutators of the given entity. That is, loop over all mutators + in the class's mutator list and process them. + ''' + # we don't use getattr here to not inherit from the parent mutators + # inadvertantly if the current entity hasn't defined any mutator. + mutators = entity.__dict__.get(MUTATORS, []) + for mutator, args, kwargs in mutators: + mutator.process(entity, *args, **kwargs) + +class Statement(ClassMutator): + + def process(self, entity, *args, **kwargs): + builder = self.handler(entity, *args, **kwargs) + entity._descriptor.builders.append(builder) + +class PropertyStatement(ClassMutator): + + def process(self, entity, name, *args, **kwargs): + prop = self.handler(*args, **kwargs) + prop.attach(entity, name) + diff --git a/libs/flask/__init__.py b/libs/flask/__init__.py new file mode 100644 index 0000000..ee8508b --- /dev/null +++ b/libs/flask/__init__.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +""" + flask + ~~~~~ + + A microframework based on Werkzeug. It's extensively documented + and follows best practice patterns. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +# utilities we import from Werkzeug and Jinja2 that are unused +# in the module but are exported as public interface. +from werkzeug import abort, redirect +from jinja2 import Markup, escape + +from .app import Flask, Request, Response +from .config import Config +from .helpers import url_for, jsonify, json_available, flash, \ + send_file, send_from_directory, get_flashed_messages, \ + get_template_attribute, make_response +from .globals import current_app, g, request, session, _request_ctx_stack +from .module import Module +from .templating import render_template, render_template_string +from .session import Session + +# the signals +from .signals import signals_available, template_rendered, request_started, \ + request_finished, got_request_exception + +# only import json if it's available +if json_available: + from .helpers import json diff --git a/libs/flask/app.py b/libs/flask/app.py new file mode 100644 index 0000000..066215b --- /dev/null +++ b/libs/flask/app.py @@ -0,0 +1,965 @@ +# -*- coding: utf-8 -*- +""" + flask.app + ~~~~~~~~~ + + This module implements the central WSGI application object. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import with_statement + +from threading import Lock +from datetime import timedelta, datetime +from itertools import chain + +from jinja2 import Environment + +from werkzeug import ImmutableDict +from werkzeug.routing import Map, Rule +from werkzeug.exceptions import HTTPException, InternalServerError, \ + MethodNotAllowed + +from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \ + _tojson_filter, _endpoint_from_view_func +from .wrappers import Request, Response +from .config import ConfigAttribute, Config +from .ctx import _RequestContext +from .globals import _request_ctx_stack, request +from .session import Session, _NullSession +from .module import _ModuleSetupState +from .templating import _DispatchingJinjaLoader, \ + _default_template_ctx_processor +from .signals import request_started, request_finished, got_request_exception + +# a lock used for logger initialization +_logger_lock = Lock() + + +class Flask(_PackageBoundObject): + """The flask object implements a WSGI application and acts as the central + object. It is passed the name of the module or package of the + application. Once it is created it will act as a central registry for + the view functions, the URL rules, template configuration and much more. + + The name of the package is used to resolve resources from inside the + package or the folder the module is contained in depending on if the + package parameter resolves to an actual python package (a folder with + an `__init__.py` file inside) or a standard module (just a `.py` file). + + For more information about resource loading, see :func:`open_resource`. + + Usually you create a :class:`Flask` instance in your main module or + in the `__init__.py` file of your package like this:: + + from flask import Flask + app = Flask(__name__) + + .. admonition:: About the First Parameter + + The idea of the first parameter is to give Flask an idea what + belongs to your application. This name is used to find resources + on the file system, can be used by extensions to improve debugging + information and a lot more. + + So it's important what you provide there. If you are using a single + module, `__name__` is always the correct value. If you however are + using a package, it's usually recommended to hardcode the name of + your package there. + + For example if your application is defined in `yourapplication/app.py` + you should create it with one of the two versions below:: + + app = Flask('yourapplication') + app = Flask(__name__.split('.')[0]) + + Why is that? The application will work even with `__name__`, thanks + to how resources are looked up. However it will make debugging more + painful. Certain extensions can make assumptions based on the + import name of your application. For example the Flask-SQLAlchemy + extension will look for the code in your application that triggered + an SQL query in debug mode. If the import name is not properly set + up, that debugging information is lost. (For example it would only + pick up SQL queries in `yourapplicaiton.app` and not + `yourapplication.views.frontend`) + + .. versionadded:: 0.5 + The `static_path` parameter was added. + + :param import_name: the name of the application package + :param static_path: can be used to specify a different path for the + static files on the web. Defaults to ``/static``. + This does not affect the folder the files are served + *from*. + """ + + #: The class that is used for request objects. See :class:`~flask.Request` + #: for more information. + request_class = Request + + #: The class that is used for response objects. See + #: :class:`~flask.Response` for more information. + response_class = Response + + #: Path for the static files. If you don't want to use static files + #: you can set this value to `None` in which case no URL rule is added + #: and the development server will no longer serve any static files. + #: + #: This is the default used for application and modules unless a + #: different value is passed to the constructor. + static_path = '/static' + + #: The debug flag. Set this to `True` to enable debugging of the + #: application. In debug mode the debugger will kick in when an unhandled + #: exception ocurrs and the integrated server will automatically reload + #: the application if changes in the code are detected. + #: + #: This attribute can also be configured from the config with the `DEBUG` + #: configuration key. Defaults to `False`. + debug = ConfigAttribute('DEBUG') + + #: The testing flask. Set this to `True` to enable the test mode of + #: Flask extensions (and in the future probably also Flask itself). + #: For example this might activate unittest helpers that have an + #: additional runtime cost which should not be enabled by default. + #: + #: This attribute can also be configured from the config with the + #: `TESTING` configuration key. Defaults to `False`. + testing = ConfigAttribute('TESTING') + + #: If a secret key is set, cryptographic components can use this to + #: sign cookies and other things. Set this to a complex random value + #: when you want to use the secure cookie for instance. + #: + #: This attribute can also be configured from the config with the + #: `SECRET_KEY` configuration key. Defaults to `None`. + secret_key = ConfigAttribute('SECRET_KEY') + + #: The secure cookie uses this for the name of the session cookie. + #: + #: This attribute can also be configured from the config with the + #: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'`` + session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') + + #: A :class:`~datetime.timedelta` which is used to set the expiration + #: date of a permanent session. The default is 31 days which makes a + #: permanent session survive for roughly one month. + #: + #: This attribute can also be configured from the config with the + #: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to + #: ``timedelta(days=31)`` + permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME') + + #: Enable this if you want to use the X-Sendfile feature. Keep in + #: mind that the server has to support this. This only affects files + #: sent with the :func:`send_file` method. + #: + #: .. versionadded:: 0.2 + #: + #: This attribute can also be configured from the config with the + #: `USE_X_SENDFILE` configuration key. Defaults to `False`. + use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') + + #: The name of the logger to use. By default the logger name is the + #: package name passed to the constructor. + #: + #: .. versionadded:: 0.4 + logger_name = ConfigAttribute('LOGGER_NAME') + + #: The logging format used for the debug logger. This is only used when + #: the application is in debug mode, otherwise the attached logging + #: handler does the formatting. + #: + #: .. versionadded:: 0.3 + debug_log_format = ( + '-' * 80 + '\n' + + '%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' + + '%(message)s\n' + + '-' * 80 + ) + + #: Options that are passed directly to the Jinja2 environment. + jinja_options = ImmutableDict( + extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'] + ) + + #: Default configuration parameters. + default_config = ImmutableDict({ + 'DEBUG': False, + 'TESTING': False, + 'PROPAGATE_EXCEPTIONS': None, + 'SECRET_KEY': None, + 'SESSION_COOKIE_NAME': 'session', + 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), + 'USE_X_SENDFILE': False, + 'LOGGER_NAME': None, + 'SERVER_NAME': None, + 'MAX_CONTENT_LENGTH': None + }) + + #: the test client that is used with when `test_client` is used. + #: + #: .. versionadded:: 0.7 + test_client_class = None + + def __init__(self, import_name, static_path=None): + _PackageBoundObject.__init__(self, import_name) + if static_path is not None: + self.static_path = static_path + + #: The configuration dictionary as :class:`Config`. This behaves + #: exactly like a regular dictionary but supports additional methods + #: to load a config from files. + self.config = Config(self.root_path, self.default_config) + + #: Prepare the deferred setup of the logger. + self._logger = None + self.logger_name = self.import_name + + #: A dictionary of all view functions registered. The keys will + #: be function names which are also used to generate URLs and + #: the values are the function objects themselves. + #: To register a view function, use the :meth:`route` decorator. + self.view_functions = {} + + #: A dictionary of all registered error handlers. The key is + #: be the error code as integer, the value the function that + #: should handle that error. + #: To register a error handler, use the :meth:`errorhandler` + #: decorator. + self.error_handlers = {} + + #: A dictionary with lists of functions that should be called at the + #: beginning of the request. The key of the dictionary is the name of + #: the module this function is active for, `None` for all requests. + #: This can for example be used to open database connections or + #: getting hold of the currently logged in user. To register a + #: function here, use the :meth:`before_request` decorator. + self.before_request_funcs = {} + + #: A dictionary with lists of functions that should be called after + #: each request. The key of the dictionary is the name of the module + #: this function is active for, `None` for all requests. This can for + #: example be used to open database connections or getting hold of the + #: currently logged in user. To register a function here, use the + #: :meth:`after_request` decorator. + self.after_request_funcs = {} + + #: A dictionary with list of functions that are called without argument + #: to populate the template context. The key of the dictionary is the + #: name of the module this function is active for, `None` for all + #: requests. Each returns a dictionary that the template context is + #: updated with. To register a function here, use the + #: :meth:`context_processor` decorator. + self.template_context_processors = { + None: [_default_template_ctx_processor] + } + + #: all the loaded modules in a dictionary by name. + #: + #: .. versionadded:: 0.5 + self.modules = {} + + #: a place where extensions can store application specific state. For + #: example this is where an extension could store database engines and + #: similar things. For backwards compatibility extensions should register + #: themselves like this:: + #: + #: if not hasattr(app, 'extensions'): + #: app.extensions = {} + #: app.extensions['extensionname'] = SomeObject() + #: + #: The key must match the name of the `flaskext` module. For example in + #: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be + #: ``'foo'``. + #: + #: .. versionadded:: 0.7 + self.extensions = {} + + #: The :class:`~werkzeug.routing.Map` for this instance. You can use + #: this to change the routing converters after the class was created + #: but before any routes are connected. Example:: + #: + #: from werkzeug.routing import BaseConverter + #: + #: class ListConverter(BaseConverter): + #: def to_python(self, value): + #: return value.split(',') + #: def to_url(self, values): + #: return ','.join(BaseConverter.to_url(value) + #: for value in values) + #: + #: app = Flask(__name__) + #: app.url_map.converters['list'] = ListConverter + self.url_map = Map() + + # register the static folder for the application. Do that even + # if the folder does not exist. First of all it might be created + # while the server is running (usually happens during development) + # but also because google appengine stores static files somewhere + # else when mapped with the .yml file. + self.add_url_rule(self.static_path + '/', + endpoint='static', + view_func=self.send_static_file) + + #: The Jinja2 environment. It is created from the + #: :attr:`jinja_options`. + self.jinja_env = self.create_jinja_environment() + self.init_jinja_globals() + + @property + def propagate_exceptions(self): + """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration + value in case it's set, otherwise a sensible default is returned. + + .. versionadded:: 0.7 + """ + rv = self.config['PROPAGATE_EXCEPTIONS'] + if rv is not None: + return rv + return self.testing or self.debug + + @property + def logger(self): + """A :class:`logging.Logger` object for this application. The + default configuration is to log to stderr if the application is + in debug mode. This logger can be used to (surprise) log messages. + Here some examples:: + + app.logger.debug('A value for debugging') + app.logger.warning('A warning ocurred (%d apples)', 42) + app.logger.error('An error occoured') + + .. versionadded:: 0.3 + """ + if self._logger and self._logger.name == self.logger_name: + return self._logger + with _logger_lock: + if self._logger and self._logger.name == self.logger_name: + return self._logger + from flask.logging import create_logger + self._logger = rv = create_logger(self) + return rv + + def create_jinja_environment(self): + """Creates the Jinja2 environment based on :attr:`jinja_options` + and :meth:`select_jinja_autoescape`. + + .. versionadded:: 0.5 + """ + options = dict(self.jinja_options) + if 'autoescape' not in options: + options['autoescape'] = self.select_jinja_autoescape + return Environment(loader=_DispatchingJinjaLoader(self), **options) + + def init_jinja_globals(self): + """Called directly after the environment was created to inject + some defaults (like `url_for`, `get_flashed_messages` and the + `tojson` filter. + + .. versionadded:: 0.5 + """ + self.jinja_env.globals.update( + url_for=url_for, + get_flashed_messages=get_flashed_messages + ) + self.jinja_env.filters['tojson'] = _tojson_filter + + def select_jinja_autoescape(self, filename): + """Returns `True` if autoescaping should be active for the given + template name. + + .. versionadded:: 0.5 + """ + if filename is None: + return False + return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) + + def update_template_context(self, context): + """Update the template context with some commonly used variables. + This injects request, session, config and g into the template + context as well as everything template context processors want + to inject. Note that the as of Flask 0.6, the original values + in the context will not be overriden if a context processor + decides to return a value with the same key. + + :param context: the context as a dictionary that is updated in place + to add extra variables. + """ + funcs = self.template_context_processors[None] + mod = _request_ctx_stack.top.request.module + if mod is not None and mod in self.template_context_processors: + funcs = chain(funcs, self.template_context_processors[mod]) + orig_ctx = context.copy() + for func in funcs: + context.update(func()) + # make sure the original values win. This makes it possible to + # easier add new variables in context processors without breaking + # existing views. + context.update(orig_ctx) + + def run(self, host='127.0.0.1', port=5000, **options): + """Runs the application on a local development server. If the + :attr:`debug` flag is set the server will automatically reload + for code changes and show a debugger in case an exception happened. + + If you want to run the application in debug mode, but disable the + code execution on the interactive debugger, you can pass + ``use_evalex=False`` as parameter. This will keep the debugger's + traceback screen active, but disable code execution. + + .. admonition:: Keep in Mind + + Flask will suppress any server error with a generic error page + unless it is in debug mode. As such to enable just the + interactive debugger without the code reloading, you have to + invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. + Setting ``use_debugger`` to `True` without being in debug mode + won't catch any exceptions because there won't be any to + catch. + + :param host: the hostname to listen on. set this to ``'0.0.0.0'`` + to have the server available externally as well. + :param port: the port of the webserver + :param options: the options to be forwarded to the underlying + Werkzeug server. See :func:`werkzeug.run_simple` + for more information. + """ + from werkzeug import run_simple + if 'debug' in options: + self.debug = options.pop('debug') + options.setdefault('use_reloader', self.debug) + options.setdefault('use_debugger', self.debug) + return run_simple(host, port, self, **options) + + def test_client(self, use_cookies=True): + """Creates a test client for this application. For information + about unit testing head over to :ref:`testing`. + + The test client can be used in a `with` block to defer the closing down + of the context until the end of the `with` block. This is useful if + you want to access the context locals for testing:: + + with app.test_client() as c: + rv = c.get('/?vodka=42') + assert request.args['vodka'] == '42' + + .. versionchanged:: 0.4 + added support for `with` block usage for the client. + + .. versionadded:: 0.7 + The `use_cookies` parameter was added as well as the ability + to override the client to be used by setting the + :attr:`test_client_class` attribute. + """ + cls = self.test_client_class + if cls is None: + from flask.testing import FlaskClient as cls + return cls(self, self.response_class, use_cookies=use_cookies) + + def open_session(self, request): + """Creates or opens a new session. Default implementation stores all + session data in a signed cookie. This requires that the + :attr:`secret_key` is set. + + :param request: an instance of :attr:`request_class`. + """ + key = self.secret_key + if key is not None: + return Session.load_cookie(request, self.session_cookie_name, + secret_key=key) + + def save_session(self, session, response): + """Saves the session if it needs updates. For the default + implementation, check :meth:`open_session`. + + :param session: the session to be saved (a + :class:`~werkzeug.contrib.securecookie.SecureCookie` + object) + :param response: an instance of :attr:`response_class` + """ + expires = domain = None + if session.permanent: + expires = datetime.utcnow() + self.permanent_session_lifetime + if self.config['SERVER_NAME'] is not None: + domain = '.' + self.config['SERVER_NAME'] + session.save_cookie(response, self.session_cookie_name, + expires=expires, httponly=True, domain=domain) + + def register_module(self, module, **options): + """Registers a module with this application. The keyword argument + of this function are the same as the ones for the constructor of the + :class:`Module` class and will override the values of the module if + provided. + """ + options.setdefault('url_prefix', module.url_prefix) + options.setdefault('subdomain', module.subdomain) + self.view_functions.update(module.view_functions) + state = _ModuleSetupState(self, **options) + for func in module._register_events: + func(state) + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """Connects a URL rule. Works exactly like the :meth:`route` + decorator. If a view_func is provided it will be registered with the + endpoint. + + Basically this example:: + + @app.route('/') + def index(): + pass + + Is equivalent to the following:: + + def index(): + pass + app.add_url_rule('/', 'index', index) + + If the view_func is not provided you will need to connect the endpoint + to a view function like so:: + + app.view_functions['index'] = index + + .. versionchanged:: 0.2 + `view_func` parameter added. + + .. versionchanged:: 0.6 + `OPTIONS` is added automatically as method. + + :param rule: the URL rule as string + :param endpoint: the endpoint for the registered URL rule. Flask + itself assumes the name of the view function as + endpoint + :param view_func: the function to call when serving a request to the + provided endpoint + :param options: the options to be forwarded to the underlying + :class:`~werkzeug.routing.Rule` object. A change + to Werkzeug is handling of method options. methods + is a list of methods this rule should be limited + to (`GET`, `POST` etc.). By default a rule + just listens for `GET` (and implicitly `HEAD`). + Starting with Flask 0.6, `OPTIONS` is implicitly + added and handled by the standard request handling. + """ + if endpoint is None: + endpoint = _endpoint_from_view_func(view_func) + options['endpoint'] = endpoint + methods = options.pop('methods', ('GET',)) + provide_automatic_options = False + if 'OPTIONS' not in methods: + methods = tuple(methods) + ('OPTIONS',) + provide_automatic_options = True + rule = Rule(rule, methods=methods, **options) + rule.provide_automatic_options = provide_automatic_options + self.url_map.add(rule) + if view_func is not None: + self.view_functions[endpoint] = view_func + + def route(self, rule, **options): + """A decorator that is used to register a view function for a + given URL rule. Example:: + + @app.route('/') + def index(): + return 'Hello World' + + Variables parts in the route can be specified with angular + brackets (``/user/``). By default a variable part + in the URL accepts any string without a slash however a different + converter can be specified as well by using ````. + + Variable parts are passed to the view function as keyword + arguments. + + The following converters are possible: + + =========== =========================================== + `int` accepts integers + `float` like `int` but for floating point values + `path` like the default but also accepts slashes + =========== =========================================== + + Here some examples:: + + @app.route('/') + def index(): + pass + + @app.route('/') + def show_user(username): + pass + + @app.route('/post/') + def show_post(post_id): + pass + + An important detail to keep in mind is how Flask deals with trailing + slashes. The idea is to keep each URL unique so the following rules + apply: + + 1. If a rule ends with a slash and is requested without a slash + by the user, the user is automatically redirected to the same + page with a trailing slash attached. + 2. If a rule does not end with a trailing slash and the user request + the page with a trailing slash, a 404 not found is raised. + + This is consistent with how web servers deal with static files. This + also makes it possible to use relative link targets safely. + + The :meth:`route` decorator accepts a couple of other arguments + as well: + + :param rule: the URL rule as string + :param methods: a list of methods this rule should be limited + to (`GET`, `POST` etc.). By default a rule + just listens for `GET` (and implicitly `HEAD`). + Starting with Flask 0.6, `OPTIONS` is implicitly + added and handled by the standard request handling. + :param subdomain: specifies the rule for the subdomain in case + subdomain matching is in use. + :param strict_slashes: can be used to disable the strict slashes + setting for this rule. See above. + :param options: other options to be forwarded to the underlying + :class:`~werkzeug.routing.Rule` object. + """ + def decorator(f): + self.add_url_rule(rule, None, f, **options) + return f + return decorator + + + def endpoint(self, endpoint): + """A decorator to register a function as an endpoint. + Example:: + + @app.endpoint('example.endpoint') + def example(): + return "example" + + :param endpoint: the name of the endpoint + """ + def decorator(f): + self.view_functions[endpoint] = f + return f + return decorator + + def errorhandler(self, code): + """A decorator that is used to register a function give a given + error code. Example:: + + @app.errorhandler(404) + def page_not_found(error): + return 'This page does not exist', 404 + + You can also register a function as error handler without using + the :meth:`errorhandler` decorator. The following example is + equivalent to the one above:: + + def page_not_found(error): + return 'This page does not exist', 404 + app.error_handlers[404] = page_not_found + + :param code: the code as integer for the handler + """ + def decorator(f): + self.error_handlers[code] = f + return f + return decorator + + def template_filter(self, name=None): + """A decorator that is used to register custom template filter. + You can specify a name for the filter, otherwise the function + name will be used. Example:: + + @app.template_filter() + def reverse(s): + return s[::-1] + + :param name: the optional name of the filter, otherwise the + function name will be used. + """ + def decorator(f): + self.jinja_env.filters[name or f.__name__] = f + return f + return decorator + + def before_request(self, f): + """Registers a function to run before each request.""" + self.before_request_funcs.setdefault(None, []).append(f) + return f + + def after_request(self, f): + """Register a function to be run after each request.""" + self.after_request_funcs.setdefault(None, []).append(f) + return f + + def context_processor(self, f): + """Registers a template context processor function.""" + self.template_context_processors[None].append(f) + return f + + def handle_http_exception(self, e): + """Handles an HTTP exception. By default this will invoke the + registered error handlers and fall back to returning the + exception as response. + + .. versionadded: 0.3 + """ + handler = self.error_handlers.get(e.code) + if handler is None: + return e + return handler(e) + + def handle_exception(self, e): + """Default exception handling that kicks in when an exception + occours that is not catched. In debug mode the exception will + be re-raised immediately, otherwise it is logged and the handler + for a 500 internal server error is used. If no such handler + exists, a default 500 internal server error message is displayed. + + .. versionadded: 0.3 + """ + got_request_exception.send(self, exception=e) + handler = self.error_handlers.get(500) + if self.propagate_exceptions: + raise + self.logger.exception('Exception on %s [%s]' % ( + request.path, + request.method + )) + if handler is None: + return InternalServerError() + return handler(e) + + def dispatch_request(self): + """Does the request dispatching. Matches the URL and returns the + return value of the view or error handler. This does not have to + be a response object. In order to convert the return value to a + proper response object, call :func:`make_response`. + """ + req = _request_ctx_stack.top.request + try: + if req.routing_exception is not None: + raise req.routing_exception + rule = req.url_rule + # if we provide automatic options for this URL and the + # request came with the OPTIONS method, reply automatically + if getattr(rule, 'provide_automatic_options', False) \ + and req.method == 'OPTIONS': + return self.make_default_options_response() + # otherwise dispatch to the handler for that endpoint + return self.view_functions[rule.endpoint](**req.view_args) + except HTTPException, e: + return self.handle_http_exception(e) + + def make_default_options_response(self): + """This method is called to create the default `OPTIONS` response. + This can be changed through subclassing to change the default + behaviour of `OPTIONS` responses. + + .. versionadded:: 0.7 + """ + # This would be nicer in Werkzeug 0.7, which however currently + # is not released. Werkzeug 0.7 provides a method called + # allowed_methods() that returns all methods that are valid for + # a given path. + methods = [] + try: + _request_ctx_stack.top.url_adapter.match(method='--') + except MethodNotAllowed, e: + methods = e.valid_methods + except HTTPException, e: + pass + rv = self.response_class() + rv.allow.update(methods) + return rv + + def make_response(self, rv): + """Converts the return value from a view function to a real + response object that is an instance of :attr:`response_class`. + + The following types are allowed for `rv`: + + .. tabularcolumns:: |p{3.5cm}|p{9.5cm}| + + ======================= =========================================== + :attr:`response_class` the object is returned unchanged + :class:`str` a response object is created with the + string as body + :class:`unicode` a response object is created with the + string encoded to utf-8 as body + :class:`tuple` the response object is created with the + contents of the tuple as arguments + a WSGI function the function is called as WSGI application + and buffered as response object + ======================= =========================================== + + :param rv: the return value from the view function + """ + if rv is None: + raise ValueError('View function did not return a response') + if isinstance(rv, self.response_class): + return rv + if isinstance(rv, basestring): + return self.response_class(rv) + if isinstance(rv, tuple): + return self.response_class(*rv) + return self.response_class.force_type(rv, request.environ) + + def create_url_adapter(self, request): + """Creates a URL adapter for the given request. The URL adapter + is created at a point where the request context is not yet set up + so the request is passed explicitly. + + .. versionadded:: 0.6 + """ + return self.url_map.bind_to_environ(request.environ, + server_name=self.config['SERVER_NAME']) + + def preprocess_request(self): + """Called before the actual request dispatching and will + call every as :meth:`before_request` decorated function. + If any of these function returns a value it's handled as + if it was the return value from the view and further + request handling is stopped. + """ + funcs = self.before_request_funcs.get(None, ()) + mod = request.module + if mod and mod in self.before_request_funcs: + funcs = chain(funcs, self.before_request_funcs[mod]) + for func in funcs: + rv = func() + if rv is not None: + return rv + + def process_response(self, response): + """Can be overridden in order to modify the response object + before it's sent to the WSGI server. By default this will + call all the :meth:`after_request` decorated functions. + + .. versionchanged:: 0.5 + As of Flask 0.5 the functions registered for after request + execution are called in reverse order of registration. + + :param response: a :attr:`response_class` object. + :return: a new response object or the same, has to be an + instance of :attr:`response_class`. + """ + ctx = _request_ctx_stack.top + mod = ctx.request.module + if not isinstance(ctx.session, _NullSession): + self.save_session(ctx.session, response) + funcs = () + if mod and mod in self.after_request_funcs: + funcs = reversed(self.after_request_funcs[mod]) + if None in self.after_request_funcs: + funcs = chain(funcs, reversed(self.after_request_funcs[None])) + for handler in funcs: + response = handler(response) + return response + + def request_context(self, environ): + """Creates a request context from the given environment and binds + it to the current context. This must be used in combination with + the `with` statement because the request is only bound to the + current context for the duration of the `with` block. + + Example usage:: + + with app.request_context(environ): + do_something_with(request) + + The object returned can also be used without the `with` statement + which is useful for working in the shell. The example above is + doing exactly the same as this code:: + + ctx = app.request_context(environ) + ctx.push() + try: + do_something_with(request) + finally: + ctx.pop() + + The big advantage of this approach is that you can use it without + the try/finally statement in a shell for interactive testing: + + >>> ctx = app.test_request_context() + >>> ctx.bind() + >>> request.path + u'/' + >>> ctx.unbind() + + .. versionchanged:: 0.3 + Added support for non-with statement usage and `with` statement + is now passed the ctx object. + + :param environ: a WSGI environment + """ + return _RequestContext(self, environ) + + def test_request_context(self, *args, **kwargs): + """Creates a WSGI environment from the given values (see + :func:`werkzeug.create_environ` for more information, this + function accepts the same arguments). + """ + from werkzeug import create_environ + environ_overrides = kwargs.setdefault('environ_overrides', {}) + if self.config.get('SERVER_NAME'): + server_name = self.config.get('SERVER_NAME') + if ':' not in server_name: + http_host, http_port = server_name, '80' + else: + http_host, http_port = server_name.split(':', 1) + + environ_overrides.setdefault('SERVER_NAME', server_name) + environ_overrides.setdefault('HTTP_HOST', server_name) + environ_overrides.setdefault('SERVER_PORT', http_port) + return self.request_context(create_environ(*args, **kwargs)) + + def wsgi_app(self, environ, start_response): + """The actual WSGI application. This is not implemented in + `__call__` so that middlewares can be applied without losing a + reference to the class. So instead of doing this:: + + app = MyMiddleware(app) + + It's a better idea to do this instead:: + + app.wsgi_app = MyMiddleware(app.wsgi_app) + + Then you still have the original application object around and + can continue to call methods on it. + + .. versionchanged:: 0.4 + The :meth:`after_request` functions are now called even if an + error handler took over request processing. This ensures that + even if an exception happens database have the chance to + properly close the connection. + + :param environ: a WSGI environment + :param start_response: a callable accepting a status code, + a list of headers and an optional + exception context to start the response + """ + with self.request_context(environ): + try: + request_started.send(self) + rv = self.preprocess_request() + if rv is None: + rv = self.dispatch_request() + response = self.make_response(rv) + except Exception, e: + response = self.make_response(self.handle_exception(e)) + try: + response = self.process_response(response) + except Exception, e: + response = self.make_response(self.handle_exception(e)) + request_finished.send(self, response=response) + return response(environ, start_response) + + def __call__(self, environ, start_response): + """Shortcut for :attr:`wsgi_app`.""" + return self.wsgi_app(environ, start_response) diff --git a/libs/flask/config.py b/libs/flask/config.py new file mode 100644 index 0000000..aa65f46 --- /dev/null +++ b/libs/flask/config.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +""" + flask.config + ~~~~~~~~~~~~ + + Implements the configuration related objects. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import with_statement + +import imp +import os +import sys + +from werkzeug import import_string + + +class ConfigAttribute(object): + """Makes an attribute forward to the config""" + + def __init__(self, name): + self.__name__ = name + + def __get__(self, obj, type=None): + if obj is None: + return self + return obj.config[self.__name__] + + def __set__(self, obj, value): + obj.config[self.__name__] = value + + +class Config(dict): + """Works exactly like a dict but provides ways to fill it from files + or special dictionaries. There are two common patterns to populate the + config. + + Either you can fill the config from a config file:: + + app.config.from_pyfile('yourconfig.cfg') + + Or alternatively you can define the configuration options in the + module that calls :meth:`from_object` or provide an import path to + a module that should be loaded. It is also possible to tell it to + use the same module and with that provide the configuration values + just before the call:: + + DEBUG = True + SECRET_KEY = 'development key' + app.config.from_object(__name__) + + In both cases (loading from any Python file or loading from modules), + only uppercase keys are added to the config. This makes it possible to use + lowercase values in the config file for temporary values that are not added + to the config or to define the config keys in the same file that implements + the application. + + Probably the most interesting way to load configurations is from an + environment variable pointing to a file:: + + app.config.from_envvar('YOURAPPLICATION_SETTINGS') + + In this case before launching the application you have to set this + environment variable to the file you want to use. On Linux and OS X + use the export statement:: + + export YOURAPPLICATION_SETTINGS='/path/to/config/file' + + On windows use `set` instead. + + :param root_path: path to which files are read relative from. When the + config object is created by the application, this is + the application's :attr:`~flask.Flask.root_path`. + :param defaults: an optional dictionary of default values + """ + + def __init__(self, root_path, defaults=None): + dict.__init__(self, defaults or {}) + self.root_path = root_path + + def from_envvar(self, variable_name, silent=False): + """Loads a configuration from an environment variable pointing to + a configuration file. This basically is just a shortcut with nicer + error messages for this line of code:: + + app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) + + :param variable_name: name of the environment variable + :param silent: set to `True` if you want silent failing for missing + files. + :return: bool. `True` if able to load config, `False` otherwise. + """ + rv = os.environ.get(variable_name) + if not rv: + if silent: + return False + raise RuntimeError('The environment variable %r is not set ' + 'and as such configuration could not be ' + 'loaded. Set this variable and make it ' + 'point to a configuration file' % + variable_name) + self.from_pyfile(rv) + return True + + def from_pyfile(self, filename): + """Updates the values in the config from a Python file. This function + behaves as if the file was imported as module with the + :meth:`from_object` function. + + :param filename: the filename of the config. This can either be an + absolute filename or a filename relative to the + root path. + """ + filename = os.path.join(self.root_path, filename) + d = imp.new_module('config') + d.__file__ = filename + try: + execfile(filename, d.__dict__) + except IOError, e: + e.strerror = 'Unable to load configuration file (%s)' % e.strerror + raise + self.from_object(d) + + def from_object(self, obj): + """Updates the values from the given object. An object can be of one + of the following two types: + + - a string: in this case the object with that name will be imported + - an actual object reference: that object is used directly + + Objects are usually either modules or classes. + + Just the uppercase variables in that object are stored in the config + after lowercasing. Example usage:: + + app.config.from_object('yourapplication.default_config') + from yourapplication import default_config + app.config.from_object(default_config) + + You should not use this function to load the actual configuration but + rather configuration defaults. The actual config should be loaded + with :meth:`from_pyfile` and ideally from a location not within the + package because the package might be installed system wide. + + :param obj: an import name or object + """ + if isinstance(obj, basestring): + obj = import_string(obj) + for key in dir(obj): + if key.isupper(): + self[key] = getattr(obj, key) + + def __repr__(self): + return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/libs/flask/ctx.py b/libs/flask/ctx.py new file mode 100644 index 0000000..1b17086 --- /dev/null +++ b/libs/flask/ctx.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +""" + flask.ctx + ~~~~~~~~~ + + Implements the objects required to keep the context. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from werkzeug.exceptions import HTTPException + +from .globals import _request_ctx_stack +from .session import _NullSession + + +class _RequestGlobals(object): + pass + + +class _RequestContext(object): + """The request context contains all request relevant information. It is + created at the beginning of the request and pushed to the + `_request_ctx_stack` and removed at the end of it. It will create the + URL adapter and request object for the WSGI environment provided. + """ + + def __init__(self, app, environ): + self.app = app + self.request = app.request_class(environ) + self.url_adapter = app.create_url_adapter(self.request) + self.session = app.open_session(self.request) + if self.session is None: + self.session = _NullSession() + self.g = _RequestGlobals() + self.flashes = None + + try: + url_rule, self.request.view_args = \ + self.url_adapter.match(return_rule=True) + self.request.url_rule = url_rule + except HTTPException, e: + self.request.routing_exception = e + + def push(self): + """Binds the request context.""" + _request_ctx_stack.push(self) + + def pop(self): + """Pops the request context.""" + _request_ctx_stack.pop() + + def __enter__(self): + self.push() + return self + + def __exit__(self, exc_type, exc_value, tb): + # do not pop the request stack if we are in debug mode and an + # exception happened. This will allow the debugger to still + # access the request object in the interactive shell. Furthermore + # the context can be force kept alive for the test client. + # See flask.testing for how this works. + if not self.request.environ.get('flask._preserve_context') and \ + (tb is None or not self.app.debug): + self.pop() diff --git a/libs/flask/globals.py b/libs/flask/globals.py new file mode 100644 index 0000000..8471410 --- /dev/null +++ b/libs/flask/globals.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +""" + flask.globals + ~~~~~~~~~~~~~ + + Defines all the global objects that are proxies to the current + active context. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from functools import partial +from werkzeug import LocalStack, LocalProxy + +def _lookup_object(name): + top = _request_ctx_stack.top + if top is None: + raise RuntimeError('working outside of request context') + return getattr(top, name) + +# context locals +_request_ctx_stack = LocalStack() +current_app = LocalProxy(partial(_lookup_object, 'app')) +request = LocalProxy(partial(_lookup_object, 'request')) +session = LocalProxy(partial(_lookup_object, 'session')) +g = LocalProxy(partial(_lookup_object, 'g')) diff --git a/libs/flask/helpers.py b/libs/flask/helpers.py new file mode 100644 index 0000000..ed8a5d5 --- /dev/null +++ b/libs/flask/helpers.py @@ -0,0 +1,496 @@ +# -*- coding: utf-8 -*- +""" + flask.helpers + ~~~~~~~~~~~~~ + + Implements various helpers. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +import os +import sys +import posixpath +import mimetypes +from time import time +from zlib import adler32 + +# try to load the best simplejson implementation available. If JSON +# is not installed, we add a failing class. +json_available = True +json = None +try: + import simplejson as json +except ImportError: + try: + import json + except ImportError: + try: + # Google Appengine offers simplejson via django + from django.utils import simplejson as json + except ImportError: + json_available = False + + +from werkzeug import Headers, wrap_file, cached_property +from werkzeug.exceptions import NotFound + +from jinja2 import FileSystemLoader + +from .globals import session, _request_ctx_stack, current_app, request + + +def _assert_have_json(): + """Helper function that fails if JSON is unavailable.""" + if not json_available: + raise RuntimeError('simplejson not installed') + +# figure out if simplejson escapes slashes. This behaviour was changed +# from one version to another without reason. +if not json_available or '\\/' not in json.dumps('/'): + + def _tojson_filter(*args, **kwargs): + if __debug__: + _assert_have_json() + return json.dumps(*args, **kwargs).replace('/', '\\/') +else: + _tojson_filter = json.dumps + + +# what separators does this operating system provide that are not a slash? +# this is used by the send_from_directory function to ensure that nobody is +# able to access files from outside the filesystem. +_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] + if sep not in (None, '/')) + + +def _endpoint_from_view_func(view_func): + """Internal helper that returns the default endpoint for a given + function. This always is the function name. + """ + assert view_func is not None, 'expected view func if endpoint ' \ + 'is not provided.' + return view_func.__name__ + + +def jsonify(*args, **kwargs): + """Creates a :class:`~flask.Response` with the JSON representation of + the given arguments with an `application/json` mimetype. The arguments + to this function are the same as to the :class:`dict` constructor. + + Example usage:: + + @app.route('/_get_current_user') + def get_current_user(): + return jsonify(username=g.user.username, + email=g.user.email, + id=g.user.id) + + This will send a JSON response like this to the browser:: + + { + "username": "admin", + "email": "admin@localhost", + "id": 42 + } + + This requires Python 2.6 or an installed version of simplejson. For + security reasons only objects are supported toplevel. For more + information about this, have a look at :ref:`json-security`. + + .. versionadded:: 0.2 + """ + if __debug__: + _assert_have_json() + return current_app.response_class(json.dumps(dict(*args, **kwargs), + indent=None if request.is_xhr else 2), mimetype='application/json') + + +def make_response(*args): + """Sometimes it is necessary to set additional headers in a view. Because + views do not have to return response objects but can return a value that + is converted into a response object by Flask itself, it becomes tricky to + add headers to it. This function can be called instead of using a return + and you will get a response object which you can use to attach headers. + + If view looked like this and you want to add a new header:: + + def index(): + return render_template('index.html', foo=42) + + You can now do something like this:: + + def index(): + response = make_response(render_template('index.html', foo=42)) + response.headers['X-Parachutes'] = 'parachutes are cool' + return response + + This function accepts the very same arguments you can return from a + view function. This for example creates a response with a 404 error + code:: + + response = make_response(render_template('not_found.html'), 404) + + Internally this function does the following things: + + - if no arguments are passed, it creates a new response argument + - if one argument is passed, :meth:`flask.Flask.make_response` + is invoked with it. + - if more than one argument is passed, the arguments are passed + to the :meth:`flask.Flask.make_response` function as tuple. + + .. versionadded:: 0.6 + """ + if not args: + return current_app.response_class() + if len(args) == 1: + args = args[0] + return current_app.make_response(args) + + +def url_for(endpoint, **values): + """Generates a URL to the given endpoint with the method provided. + The endpoint is relative to the active module if modules are in use. + + Here are some examples: + + ==================== ======================= ============================= + Active Module Target Endpoint Target Function + ==================== ======================= ============================= + `None` ``'index'`` `index` of the application + `None` ``'.index'`` `index` of the application + ``'admin'`` ``'index'`` `index` of the `admin` module + any ``'.index'`` `index` of the application + any ``'admin.index'`` `index` of the `admin` module + ==================== ======================= ============================= + + Variable arguments that are unknown to the target endpoint are appended + to the generated URL as query arguments. + + For more information, head over to the :ref:`Quickstart `. + + :param endpoint: the endpoint of the URL (name of the function) + :param values: the variable arguments of the URL rule + :param _external: if set to `True`, an absolute URL is generated. + """ + ctx = _request_ctx_stack.top + if '.' not in endpoint: + mod = ctx.request.module + if mod is not None: + endpoint = mod + '.' + endpoint + elif endpoint.startswith('.'): + endpoint = endpoint[1:] + external = values.pop('_external', False) + return ctx.url_adapter.build(endpoint, values, force_external=external) + + +def get_template_attribute(template_name, attribute): + """Loads a macro (or variable) a template exports. This can be used to + invoke a macro from within Python code. If you for example have a + template named `_cider.html` with the following contents: + + .. sourcecode:: html+jinja + + {% macro hello(name) %}Hello {{ name }}!{% endmacro %} + + You can access this from Python code like this:: + + hello = get_template_attribute('_cider.html', 'hello') + return hello('World') + + .. versionadded:: 0.2 + + :param template_name: the name of the template + :param attribute: the name of the variable of macro to acccess + """ + return getattr(current_app.jinja_env.get_template(template_name).module, + attribute) + + +def flash(message, category='message'): + """Flashes a message to the next request. In order to remove the + flashed message from the session and to display it to the user, + the template has to call :func:`get_flashed_messages`. + + .. versionchanged: 0.3 + `category` parameter added. + + :param message: the message to be flashed. + :param category: the category for the message. The following values + are recommended: ``'message'`` for any kind of message, + ``'error'`` for errors, ``'info'`` for information + messages and ``'warning'`` for warnings. However any + kind of string can be used as category. + """ + session.setdefault('_flashes', []).append((category, message)) + + +def get_flashed_messages(with_categories=False): + """Pulls all flashed messages from the session and returns them. + Further calls in the same request to the function will return + the same messages. By default just the messages are returned, + but when `with_categories` is set to `True`, the return value will + be a list of tuples in the form ``(category, message)`` instead. + + Example usage: + + .. sourcecode:: html+jinja + + {% for category, msg in get_flashed_messages(with_categories=true) %} +

{{ msg }} + {% endfor %} + + .. versionchanged:: 0.3 + `with_categories` parameter added. + + :param with_categories: set to `True` to also receive categories. + """ + flashes = _request_ctx_stack.top.flashes + if flashes is None: + _request_ctx_stack.top.flashes = flashes = session.pop('_flashes', []) + if not with_categories: + return [x[1] for x in flashes] + return flashes + + +def send_file(filename_or_fp, mimetype=None, as_attachment=False, + attachment_filename=None, add_etags=True, + cache_timeout=60 * 60 * 12, conditional=False): + """Sends the contents of a file to the client. This will use the + most efficient method available and configured. By default it will + try to use the WSGI server's file_wrapper support. Alternatively + you can set the application's :attr:`~Flask.use_x_sendfile` attribute + to ``True`` to directly emit an `X-Sendfile` header. This however + requires support of the underlying webserver for `X-Sendfile`. + + By default it will try to guess the mimetype for you, but you can + also explicitly provide one. For extra security you probably want + to send certain files as attachment (HTML for instance). The mimetype + guessing requires a `filename` or an `attachment_filename` to be + provided. + + Please never pass filenames to this function from user sources without + checking them first. Something like this is usually sufficient to + avoid security problems:: + + if '..' in filename or filename.startswith('/'): + abort(404) + + .. versionadded:: 0.2 + + .. versionadded:: 0.5 + The `add_etags`, `cache_timeout` and `conditional` parameters were + added. The default behaviour is now to attach etags. + + .. versionchanged:: 0.7 + mimetype guessing and etag support for file objects was + deprecated because it was unreliable. Pass a filename if you are + able to, otherwise attach an etag yourself. This functionality + will be removed in Flask 1.0 + + :param filename_or_fp: the filename of the file to send. This is + relative to the :attr:`~Flask.root_path` if a + relative path is specified. + Alternatively a file object might be provided + in which case `X-Sendfile` might not work and + fall back to the traditional method. Make sure + that the file pointer is positioned at the start + of data to send before calling :func:`send_file`. + :param mimetype: the mimetype of the file if provided, otherwise + auto detection happens. + :param as_attachment: set to `True` if you want to send this file with + a ``Content-Disposition: attachment`` header. + :param attachment_filename: the filename for the attachment if it + differs from the file's filename. + :param add_etags: set to `False` to disable attaching of etags. + :param conditional: set to `True` to enable conditional responses. + :param cache_timeout: the timeout in seconds for the headers. + """ + mtime = None + if isinstance(filename_or_fp, basestring): + filename = filename_or_fp + file = None + else: + from warnings import warn + file = filename_or_fp + filename = getattr(file, 'name', None) + + # XXX: this behaviour is now deprecated because it was unreliable. + # removed in Flask 1.0 + if not attachment_filename and not mimetype \ + and isinstance(filename, basestring): + warn(DeprecationWarning('The filename support for file objects ' + 'passed to send_file is not deprecated. Pass an ' + 'attach_filename if you want mimetypes to be guessed.'), + stacklevel=2) + if add_etags: + warn(DeprecationWarning('In future flask releases etags will no ' + 'longer be generated for file objects passed to the send_file ' + 'function because this behaviour was unreliable. Pass ' + 'filenames instead if possible, otherwise attach an etag ' + 'yourself based on another value'), stacklevel=2) + + if filename is not None: + if not os.path.isabs(filename): + filename = os.path.join(current_app.root_path, filename) + if mimetype is None and (filename or attachment_filename): + mimetype = mimetypes.guess_type(filename or attachment_filename)[0] + if mimetype is None: + mimetype = 'application/octet-stream' + + headers = Headers() + if as_attachment: + if attachment_filename is None: + if filename is None: + raise TypeError('filename unavailable, required for ' + 'sending as attachment') + attachment_filename = os.path.basename(filename) + headers.add('Content-Disposition', 'attachment', + filename=attachment_filename) + + if current_app.use_x_sendfile and filename: + if file is not None: + file.close() + headers['X-Sendfile'] = filename + data = None + else: + if file is None: + file = open(filename, 'rb') + mtime = os.path.getmtime(filename) + data = wrap_file(request.environ, file) + + rv = current_app.response_class(data, mimetype=mimetype, headers=headers, + direct_passthrough=True) + + # if we know the file modification date, we can store it as the + # the time of the last modification. + if mtime is not None: + rv.last_modified = int(mtime) + + rv.cache_control.public = True + if cache_timeout: + rv.cache_control.max_age = cache_timeout + rv.expires = int(time() + cache_timeout) + + if add_etags and filename is not None: + rv.set_etag('flask-%s-%s-%s' % ( + os.path.getmtime(filename), + os.path.getsize(filename), + adler32(filename) & 0xffffffff + )) + if conditional: + rv = rv.make_conditional(request) + # make sure we don't send x-sendfile for servers that + # ignore the 304 status code for x-sendfile. + if rv.status_code == 304: + rv.headers.pop('x-sendfile', None) + return rv + + +def send_from_directory(directory, filename, **options): + """Send a file from a given directory with :func:`send_file`. This + is a secure way to quickly expose static files from an upload folder + or something similar. + + Example usage:: + + @app.route('/uploads/') + def download_file(filename): + return send_from_directory(app.config['UPLOAD_FOLDER'], + filename, as_attachment=True) + + .. admonition:: Sending files and Performance + + It is strongly recommended to activate either `X-Sendfile` support in + your webserver or (if no authentication happens) to tell the webserver + to serve files for the given path on its own without calling into the + web application for improved performance. + + .. versionadded:: 0.5 + + :param directory: the directory where all the files are stored. + :param filename: the filename relative to that directory to + download. + :param options: optional keyword arguments that are directly + forwarded to :func:`send_file`. + """ + filename = posixpath.normpath(filename) + for sep in _os_alt_seps: + if sep in filename: + raise NotFound() + if os.path.isabs(filename) or filename.startswith('../'): + raise NotFound() + filename = os.path.join(directory, filename) + if not os.path.isfile(filename): + raise NotFound() + return send_file(filename, conditional=True, **options) + + +def _get_package_path(name): + """Returns the path to a package or cwd if that cannot be found.""" + try: + return os.path.abspath(os.path.dirname(sys.modules[name].__file__)) + except (KeyError, AttributeError): + return os.getcwd() + + +class _PackageBoundObject(object): + + def __init__(self, import_name): + #: The name of the package or module. Do not change this once + #: it was set by the constructor. + self.import_name = import_name + + #: Where is the app root located? + self.root_path = _get_package_path(self.import_name) + + @property + def has_static_folder(self): + """This is `True` if the package bound object's container has a + folder named ``'static'``. + + .. versionadded:: 0.5 + """ + return os.path.isdir(os.path.join(self.root_path, 'static')) + + @cached_property + def jinja_loader(self): + """The Jinja loader for this package bound object. + + .. versionadded:: 0.5 + """ + return FileSystemLoader(os.path.join(self.root_path, 'templates')) + + def send_static_file(self, filename): + """Function used internally to send static files from the static + folder to the browser. + + .. versionadded:: 0.5 + """ + return send_from_directory(os.path.join(self.root_path, 'static'), + filename) + + def open_resource(self, resource): + """Opens a resource from the application's resource folder. To see + how this works, consider the following folder structure:: + + /myapplication.py + /schema.sql + /static + /style.css + /templates + /layout.html + /index.html + + If you want to open the `schema.sql` file you would do the + following:: + + with app.open_resource('schema.sql') as f: + contents = f.read() + do_something_with(contents) + + :param resource: the name of the resource. To access resources within + subfolders use forward slashes as separator. + """ + return open(os.path.join(self.root_path, resource), 'rb') diff --git a/libs/flask/logging.py b/libs/flask/logging.py new file mode 100644 index 0000000..29caadc --- /dev/null +++ b/libs/flask/logging.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +""" + flask.logging + ~~~~~~~~~~~~~ + + Implements the logging support for Flask. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from __future__ import absolute_import + +from logging import getLogger, StreamHandler, Formatter, Logger, DEBUG + + +def create_logger(app): + """Creates a logger for the given application. This logger works + similar to a regular Python logger but changes the effective logging + level based on the application's debug flag. Furthermore this + function also removes all attached handlers in case there was a + logger with the log name before. + """ + + class DebugLogger(Logger): + def getEffectiveLevel(x): + return DEBUG if app.debug else Logger.getEffectiveLevel(x) + + class DebugHandler(StreamHandler): + def emit(x, record): + StreamHandler.emit(x, record) if app.debug else None + + handler = DebugHandler() + handler.setLevel(DEBUG) + handler.setFormatter(Formatter(app.debug_log_format)) + logger = getLogger(app.logger_name) + # just in case that was not a new logger, get rid of all the handlers + # already attached to it. + del logger.handlers[:] + logger.__class__ = DebugLogger + logger.addHandler(handler) + return logger diff --git a/libs/flask/module.py b/libs/flask/module.py new file mode 100644 index 0000000..5c91499 --- /dev/null +++ b/libs/flask/module.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- +""" + flask.module + ~~~~~~~~~~~~ + + Implements a class that represents module blueprints. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from .helpers import _PackageBoundObject, _endpoint_from_view_func + + +def _register_module(module, static_path): + """Internal helper function that returns a function for recording + that registers the `send_static_file` function for the module on + the application if necessary. It also registers the module on + the application. + """ + def _register(state): + state.app.modules[module.name] = module + # do not register the rule if the static folder of the + # module is the same as the one from the application. + if state.app.root_path == module.root_path: + return + path = static_path + if path is None: + path = state.app.static_path + if state.url_prefix: + path = state.url_prefix + path + state.app.add_url_rule(path + '/', + endpoint='%s.static' % module.name, + view_func=module.send_static_file, + subdomain=state.subdomain) + return _register + + +class _ModuleSetupState(object): + + def __init__(self, app, url_prefix=None, subdomain=None): + self.app = app + self.url_prefix = url_prefix + self.subdomain = subdomain + + +class Module(_PackageBoundObject): + """Container object that enables pluggable applications. A module can + be used to organize larger applications. They represent blueprints that, + in combination with a :class:`Flask` object are used to create a large + application. + + A module is like an application bound to an `import_name`. Multiple + modules can share the same import names, but in that case a `name` has + to be provided to keep them apart. If different import names are used, + the rightmost part of the import name is used as name. + + Here's an example structure for a larger application:: + + /myapplication + /__init__.py + /views + /__init__.py + /admin.py + /frontend.py + + The `myapplication/__init__.py` can look like this:: + + from flask import Flask + from myapplication.views.admin import admin + from myapplication.views.frontend import frontend + + app = Flask(__name__) + app.register_module(admin, url_prefix='/admin') + app.register_module(frontend) + + And here's an example view module (`myapplication/views/admin.py`):: + + from flask import Module + + admin = Module(__name__) + + @admin.route('/') + def index(): + pass + + @admin.route('/login') + def login(): + pass + + For a gentle introduction into modules, checkout the + :ref:`working-with-modules` section. + + .. versionadded:: 0.5 + The `static_path` parameter was added and it's now possible for + modules to refer to their own templates and static files. See + :ref:`modules-and-resources` for more information. + + .. versionadded:: 0.6 + The `subdomain` parameter was added. + + :param import_name: the name of the Python package or module + implementing this :class:`Module`. + :param name: the internal short name for the module. Unless specified + the rightmost part of the import name + :param url_prefix: an optional string that is used to prefix all the + URL rules of this module. This can also be specified + when registering the module with the application. + :param subdomain: used to set the subdomain setting for URL rules that + do not have a subdomain setting set. + :param static_path: can be used to specify a different path for the + static files on the web. Defaults to ``/static``. + This does not affect the folder the files are served + *from*. + """ + + def __init__(self, import_name, name=None, url_prefix=None, + static_path=None, subdomain=None): + if name is None: + assert '.' in import_name, 'name required if package name ' \ + 'does not point to a submodule' + name = import_name.rsplit('.', 1)[1] + _PackageBoundObject.__init__(self, import_name) + self.name = name + self.url_prefix = url_prefix + self.subdomain = subdomain + self.view_functions = {} + self._register_events = [_register_module(self, static_path)] + + def route(self, rule, **options): + """Like :meth:`Flask.route` but for a module. The endpoint for the + :func:`url_for` function is prefixed with the name of the module. + """ + def decorator(f): + self.add_url_rule(rule, f.__name__, f, **options) + return f + return decorator + + def add_url_rule(self, rule, endpoint=None, view_func=None, **options): + """Like :meth:`Flask.add_url_rule` but for a module. The endpoint for + the :func:`url_for` function is prefixed with the name of the module. + + .. versionchanged:: 0.6 + The `endpoint` argument is now optional and will default to the + function name to consistent with the function of the same name + on the application object. + """ + def register_rule(state): + the_rule = rule + if state.url_prefix: + the_rule = state.url_prefix + rule + options.setdefault('subdomain', state.subdomain) + the_endpoint = endpoint + if the_endpoint is None: + the_endpoint = _endpoint_from_view_func(view_func) + state.app.add_url_rule(the_rule, '%s.%s' % (self.name, + the_endpoint), + view_func, **options) + self._record(register_rule) + + def endpoint(self, endpoint): + """Like :meth:`Flask.endpoint` but for a module.""" + def decorator(f): + self.view_functions[endpoint] = f + return f + return decorator + + def before_request(self, f): + """Like :meth:`Flask.before_request` but for a module. This function + is only executed before each request that is handled by a function of + that module. + """ + self._record(lambda s: s.app.before_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def before_app_request(self, f): + """Like :meth:`Flask.before_request`. Such a function is executed + before each request, even if outside of a module. + """ + self._record(lambda s: s.app.before_request_funcs + .setdefault(None, []).append(f)) + return f + + def after_request(self, f): + """Like :meth:`Flask.after_request` but for a module. This function + is only executed after each request that is handled by a function of + that module. + """ + self._record(lambda s: s.app.after_request_funcs + .setdefault(self.name, []).append(f)) + return f + + def after_app_request(self, f): + """Like :meth:`Flask.after_request` but for a module. Such a function + is executed after each request, even if outside of the module. + """ + self._record(lambda s: s.app.after_request_funcs + .setdefault(None, []).append(f)) + return f + + def context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a module. This + function is only executed for requests handled by a module. + """ + self._record(lambda s: s.app.template_context_processors + .setdefault(self.name, []).append(f)) + return f + + def app_context_processor(self, f): + """Like :meth:`Flask.context_processor` but for a module. Such a + function is executed each request, even if outside of the module. + """ + self._record(lambda s: s.app.template_context_processors + .setdefault(None, []).append(f)) + return f + + def app_errorhandler(self, code): + """Like :meth:`Flask.errorhandler` but for a module. This + handler is used for all requests, even if outside of the module. + + .. versionadded:: 0.4 + """ + def decorator(f): + self._record(lambda s: s.app.errorhandler(code)(f)) + return f + return decorator + + def _record(self, func): + self._register_events.append(func) diff --git a/libs/flask/session.py b/libs/flask/session.py new file mode 100644 index 0000000..df2d877 --- /dev/null +++ b/libs/flask/session.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +""" + flask.session + ~~~~~~~~~~~~~ + + Implements cookie based sessions based on Werkzeug's secure cookie + system. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from werkzeug.contrib.securecookie import SecureCookie + + +class Session(SecureCookie): + """Expands the session with support for switching between permanent + and non-permanent sessions. + """ + + def _get_permanent(self): + return self.get('_permanent', False) + + def _set_permanent(self, value): + self['_permanent'] = bool(value) + + permanent = property(_get_permanent, _set_permanent) + del _get_permanent, _set_permanent + + +class _NullSession(Session): + """Class used to generate nicer error messages if sessions are not + available. Will still allow read-only access to the empty session + but fail on setting. + """ + + def _fail(self, *args, **kwargs): + raise RuntimeError('the session is unavailable because no secret ' + 'key was set. Set the secret_key on the ' + 'application to something unique and secret.') + __setitem__ = __delitem__ = clear = pop = popitem = \ + update = setdefault = _fail + del _fail diff --git a/libs/flask/signals.py b/libs/flask/signals.py new file mode 100644 index 0000000..22447c7 --- /dev/null +++ b/libs/flask/signals.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +""" + flask.signals + ~~~~~~~~~~~~~ + + Implements signals based on blinker if available, otherwise + falls silently back to a noop + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +signals_available = False +try: + from blinker import Namespace + signals_available = True +except ImportError: + class Namespace(object): + def signal(self, name, doc=None): + return _FakeSignal(name, doc) + + class _FakeSignal(object): + """If blinker is unavailable, create a fake class with the same + interface that allows sending of signals but will fail with an + error on anything else. Instead of doing anything on send, it + will just ignore the arguments and do nothing instead. + """ + + def __init__(self, name, doc=None): + self.name = name + self.__doc__ = doc + def _fail(self, *args, **kwargs): + raise RuntimeError('signalling support is unavailable ' + 'because the blinker library is ' + 'not installed.') + send = lambda *a, **kw: None + connect = disconnect = has_receivers_for = receivers_for = \ + temporarily_connected_to = _fail + del _fail + +# the namespace for code signals. If you are not flask code, do +# not put signals in here. Create your own namespace instead. +_signals = Namespace() + + +# core signals. For usage examples grep the sourcecode or consult +# the API documentation in docs/api.rst as well as docs/signals.rst +template_rendered = _signals.signal('template-rendered') +request_started = _signals.signal('request-started') +request_finished = _signals.signal('request-finished') +got_request_exception = _signals.signal('got-request-exception') diff --git a/libs/flask/templating.py b/libs/flask/templating.py new file mode 100644 index 0000000..4db03b7 --- /dev/null +++ b/libs/flask/templating.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +""" + flask.templating + ~~~~~~~~~~~~~~~~ + + Implements the bridge to Jinja2. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import posixpath +from jinja2 import BaseLoader, TemplateNotFound + +from .globals import _request_ctx_stack +from .signals import template_rendered + + +def _default_template_ctx_processor(): + """Default template context processor. Injects `request`, + `session` and `g`. + """ + reqctx = _request_ctx_stack.top + return dict( + config=reqctx.app.config, + request=reqctx.request, + session=reqctx.session, + g=reqctx.g + ) + + +class _DispatchingJinjaLoader(BaseLoader): + """A loader that looks for templates in the application and all + the module folders. + """ + + def __init__(self, app): + self.app = app + + def get_source(self, environment, template): + template = posixpath.normpath(template) + if template.startswith('../'): + raise TemplateNotFound(template) + loader = None + try: + module, name = template.split('/', 1) + loader = self.app.modules[module].jinja_loader + except (ValueError, KeyError): + pass + # if there was a module and it has a loader, try this first + if loader is not None: + try: + return loader.get_source(environment, name) + except TemplateNotFound: + pass + # fall back to application loader if module failed + return self.app.jinja_loader.get_source(environment, template) + + def list_templates(self): + result = self.app.jinja_loader.list_templates() + for name, module in self.app.modules.iteritems(): + if module.jinja_loader is not None: + for template in module.jinja_loader.list_templates(): + result.append('%s/%s' % (name, template)) + return result + + +def _render(template, context, app): + """Renders the template and fires the signal""" + rv = template.render(context) + template_rendered.send(app, template=template, context=context) + return rv + + +def render_template(template_name, **context): + """Renders a template from the template folder with the given + context. + + :param template_name: the name of the template to be rendered + :param context: the variables that should be available in the + context of the template. + """ + ctx = _request_ctx_stack.top + ctx.app.update_template_context(context) + return _render(ctx.app.jinja_env.get_template(template_name), + context, ctx.app) + + +def render_template_string(source, **context): + """Renders a template from the given template source string + with the given context. + + :param template_name: the sourcecode of the template to be + rendered + :param context: the variables that should be available in the + context of the template. + """ + ctx = _request_ctx_stack.top + ctx.app.update_template_context(context) + return _render(ctx.app.jinja_env.from_string(source), + context, ctx.app) diff --git a/libs/flask/testing.py b/libs/flask/testing.py new file mode 100644 index 0000000..8423733 --- /dev/null +++ b/libs/flask/testing.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +""" + flask.testing + ~~~~~~~~~~~~~ + + Implements test support helpers. This module is lazily imported + and usually not used in production environments. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from werkzeug import Client, EnvironBuilder +from flask import _request_ctx_stack + + +class FlaskClient(Client): + """Works like a regular Werkzeug test client but has some + knowledge about how Flask works to defer the cleanup of the + request context stack to the end of a with body when used + in a with statement. + """ + + preserve_context = context_preserved = False + + def open(self, *args, **kwargs): + if self.context_preserved: + _request_ctx_stack.pop() + self.context_preserved = False + kwargs.setdefault('environ_overrides', {}) \ + ['flask._preserve_context'] = self.preserve_context + + as_tuple = kwargs.pop('as_tuple', False) + buffered = kwargs.pop('buffered', False) + follow_redirects = kwargs.pop('follow_redirects', False) + + builder = EnvironBuilder(*args, **kwargs) + + if self.application.config.get('SERVER_NAME'): + server_name = self.application.config.get('SERVER_NAME') + if ':' not in server_name: + http_host, http_port = server_name, None + else: + http_host, http_port = server_name.split(':', 1) + if builder.base_url == 'http://localhost/': + # Default Generated Base URL + if http_port != None: + builder.host = http_host + ':' + http_port + else: + builder.host = http_host + old = _request_ctx_stack.top + try: + return Client.open(self, builder, + as_tuple=as_tuple, + buffered=buffered, + follow_redirects=follow_redirects) + finally: + self.context_preserved = _request_ctx_stack.top is not old + + def __enter__(self): + self.preserve_context = True + return self + + def __exit__(self, exc_type, exc_value, tb): + self.preserve_context = False + if self.context_preserved: + _request_ctx_stack.pop() diff --git a/libs/flask/wrappers.py b/libs/flask/wrappers.py new file mode 100644 index 0000000..4db1e78 --- /dev/null +++ b/libs/flask/wrappers.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +""" + flask.wrappers + ~~~~~~~~~~~~~~ + + Implements the WSGI wrappers (request and response). + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + +from werkzeug import Request as RequestBase, Response as ResponseBase, \ + cached_property + +from .helpers import json, _assert_have_json +from .globals import _request_ctx_stack + + +class Request(RequestBase): + """The request object used by default in Flask. Remembers the + matched endpoint and view arguments. + + It is what ends up as :class:`~flask.request`. If you want to replace + the request object used you can subclass this and set + :attr:`~flask.Flask.request_class` to your subclass. + """ + + #: the internal URL rule that matched the request. This can be + #: useful to inspect which methods are allowed for the URL from + #: a before/after handler (``request.url_rule.methods``) etc. + #: + #: .. versionadded:: 0.6 + url_rule = None + + #: a dict of view arguments that matched the request. If an exception + #: happened when matching, this will be `None`. + view_args = None + + #: if matching the URL failed, this is the exception that will be + #: raised / was raised as part of the request handling. This is + #: usually a :exc:`~werkzeug.exceptions.NotFound` exception or + #: something similar. + routing_exception = None + + @property + def max_content_length(self): + """Read-only view of the `MAX_CONTENT_LENGTH` config key.""" + ctx = _request_ctx_stack.top + if ctx is not None: + return ctx.app.config['MAX_CONTENT_LENGTH'] + + @property + def endpoint(self): + """The endpoint that matched the request. This in combination with + :attr:`view_args` can be used to reconstruct the same or a + modified URL. If an exception happened when matching, this will + be `None`. + """ + if self.url_rule is not None: + return self.url_rule.endpoint + + @property + def module(self): + """The name of the current module""" + if self.url_rule and '.' in self.url_rule.endpoint: + return self.url_rule.endpoint.rsplit('.', 1)[0] + + @cached_property + def json(self): + """If the mimetype is `application/json` this will contain the + parsed JSON data. + """ + if __debug__: + _assert_have_json() + if self.mimetype == 'application/json': + return json.loads(self.data) + + +class Response(ResponseBase): + """The response object that is used by default in Flask. Works like the + response object from Werkzeug but is set to have an HTML mimetype by + default. Quite often you don't have to create this object yourself because + :meth:`~flask.Flask.make_response` will take care of that for you. + + If you want to replace the response object used you can subclass this and + set :attr:`~flask.Flask.response_class` to your subclass. + """ + default_mimetype = 'text/html' diff --git a/libs/getmeta.py b/libs/getmeta.py new file mode 100644 index 0000000..4913a11 --- /dev/null +++ b/libs/getmeta.py @@ -0,0 +1,206 @@ +from hachoir_parser import createParser +from hachoir_metadata import extractMetadata +from hachoir_core.cmd_line import unicodeFilename + +import datetime +import json +import sys +import re + + +def getMetadata(filename): + filename, realname = unicodeFilename(filename), filename + parser = createParser(filename, realname) + try: + metadata = extractMetadata(parser) + except: + return None + + if metadata is not None: + metadata = metadata.exportPlaintext() + return metadata + return None + +def parseMetadata(meta, jsonsafe=True): + ''' + Return a dict of section headings like 'Video stream' or 'Audio stream'. Each key will have a list of dicts. + This supports multiple video/audio/subtitle/whatever streams per stream type. Each element in the list of streams + will he a dict with keys like 'Image height' and 'Compression'...anything that hachoir is able to extract. + + An example output: + {'Audio stream': [{u'Channel': u'6', + u'Compression': u'A_AC3', + u'Sample rate': u'48.0 kHz'}], + u'Common': [{u'Creation date': u'2008-03-20 09:09:43', + u'Duration': u'1 hour 40 min 6 sec', + u'Endianness': u'Big endian', + u'MIME type': u'video/x-matroska', + u'Producer': u'libebml v0.7.7 + libmatroska v0.8.1'}], + 'Video stream': [{u'Compression': u'V_MPEG4/ISO/AVC', + u'Image height': u'688 pixels', + u'Image width': u'1280 pixels', + u'Language': u'English'}]} + ''' + if not meta: + return + sections = {} + what = [] + for line in meta: + #if line doesn't start with "- " it is a section heading + if line[:2] != "- ": + section = line.strip(":").lower() + + #lets collapse multiple stream headings into one... + search = re.search(r'#\d+\Z', section) + if search: + section = re.sub(search.group(), '', section).strip() + + if section not in sections: + sections[section] = [dict()] + else: + sections[section].append(dict()) + else: + #This isn't a section heading, so we put it in the last section heading we found. + #meta always starts out with a section heading so 'section' will always be defined + i = line.find(":") + key = line[2:i].lower() + value = _parseValue(section, key, line[i+2:]) + + if value is None: + value = line[i+2:] + + if jsonsafe: + try: + v = json.dumps(value) + except TypeError: + value = str(value) + + sections[section][-1][key] = value + + + + return sections + +def _parseValue(section, key, value, jsonsafe = True): + ''' + Tediously check all the types that we know about (checked over 7k videos to find these) + and convert them to python native types. + + If jsonsafe is True, we'll make json-unfriendly types like datetime into json-friendly. + ''' + + date_search = re.search("\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d", value) + + if key == 'bit rate': + ret = _parseBitRate(value.lower()) + elif key == 'bits/sample' or key == 'bits/pixel': + try: + bits = int(value.split()[0]) + ret = bits + except: + ret = None + elif key == 'channel': + if value == 'stereo': + ret = 2 + elif value == 'mono': + ret = 1 + else: + try: + channels = int(value) + ret = channels + except: + ret = None + elif key == 'compression': + ret = _parseCompression(value) + elif key == 'compression rate': + try: + ret = float(value.split('x')[0]) + except: + ret = None + elif key == 'duration': + try: + ret = _parseDuration(value) + except: + ret = None + elif key == 'sample rate': + try: + ret = float(value.split()[0]) * 1000 + except: + ret = None + elif key == 'frame rate': + try: + ret = float(value.split()[0]) + except: + pass + elif key == 'image height' or key == 'image width': + pixels = re.match("(?P\d{1,4}) pixel", value) + if pixels: + ret = int(pixels.group('pixels')) + else: + ret = None + elif date_search: + try: + ret = datetime.datetime.strptime(date_search.group(), "%Y-%m-%d %H:%M:%S") + except: + ret = None + else: + #If it's something we don't know about... + ret = None + + return ret + +def _parseDuration(value): + t = re.search(r"((?P\d+) hour(s|))? ?((?P\d+) min)? ?((?P\d+) sec)? ?((?P\d+) ms)?", value) + if t: + hour = 0 if not t.group('hour') else int(t.group('hour')) + min = 0 if not t.group('min') else int(t.group('min')) + sec = 0 if not t.group('sec') else int(t.group('sec')) + ms = 0 if not t.group('ms') else int(t.group('ms')) + return datetime.timedelta(hours = hour, minutes = min, seconds = sec, milliseconds = ms) + +def _parseCompression(value): + codecs = { + 'v_mpeg4/iso/avc': 'AVC', + 'x264': 'AVC', + 'divx': 'divx', + 'xvid': 'xvid', + 'v_ms/vfw/fourcc': 'vfw', + 'vorbis': 'vorbis', + 'xvid': 'xvid', + 'mpeg layer 3': 'mp3', + 'a_dts': 'DTS', + 'a_aac': 'AAC', + 'a_truehd': 'TRUEHD', + 'microsoft mpeg': 'MPEG', + 'ac3': 'AC3', + 'wvc1': 'WVC1', + 'pulse code modulation': 'PCM', + 'pcm': 'PCM', + 'windows media audio': 'WMA', + 'windows media video': 'WMV', + 's_text/ascii': 'ASCII', + 's_text/utf8': 'UTF8', + 's_text/ssa': 'SSA', + 's_text/ass': 'ASS' + } + for codec in codecs: + if codec in value.lower(): + return codecs[codec] + + +def _parseBitRate(value): + try: + bitrate = float(value.split()[0]) + except: + return None + + if 'kbit' in value.lower(): + multi = 1000 + elif 'mbit' in value.lower(): + multi = 1000 * 1000 + else: + return None + + return bitrate * multi + +print json.dumps(parseMetadata(getMetadata(sys.argv[1]))) \ No newline at end of file diff --git a/libs/hachoir_core/__init__.py b/libs/hachoir_core/__init__.py new file mode 100644 index 0000000..df1988f --- /dev/null +++ b/libs/hachoir_core/__init__.py @@ -0,0 +1,2 @@ +from hachoir_core.version import VERSION as __version__, PACKAGE, WEBSITE, LICENSE + diff --git a/libs/hachoir_core/benchmark.py b/libs/hachoir_core/benchmark.py new file mode 100644 index 0000000..f823cfa --- /dev/null +++ b/libs/hachoir_core/benchmark.py @@ -0,0 +1,210 @@ +from hachoir_core.tools import humanDurationNanosec +from hachoir_core.i18n import _ +from math import floor +from time import time + +class BenchmarkError(Exception): + """ + Error during benchmark, use str(err) to format it as string. + """ + def __init__(self, message): + Exception.__init__(self, + "Benchmark internal error: %s" % message) + +class BenchmarkStat: + """ + Benchmark statistics. This class automatically computes minimum value, + maximum value and sum of all values. + + Methods: + - append(value): append a value + - getMin(): minimum value + - getMax(): maximum value + - getSum(): sum of all values + - __len__(): get number of elements + - __nonzero__(): isn't empty? + """ + def __init__(self): + self._values = [] + + def append(self, value): + self._values.append(value) + try: + self._min = min(self._min, value) + self._max = max(self._max, value) + self._sum += value + except AttributeError: + self._min = value + self._max = value + self._sum = value + + def __len__(self): + return len(self._values) + + def __nonzero__(self): + return bool(self._values) + + def getMin(self): + return self._min + + def getMax(self): + return self._max + + def getSum(self): + return self._sum + +class Benchmark: + def __init__(self, max_time=5.0, + min_count=5, max_count=None, progress_time=1.0): + """ + Constructor: + - max_time: Maximum wanted duration of the whole benchmark + (default: 5 seconds, minimum: 1 second). + - min_count: Minimum number of function calls to get good statistics + (defaut: 5, minimum: 1). + - progress_time: Time between each "progress" message + (default: 1 second, minimum: 250 ms). + - max_count: Maximum number of function calls (default: no limit). + - verbose: Is verbose? (default: False) + - disable_gc: Disable garbage collector? (default: False) + """ + self.max_time = max(max_time, 1.0) + self.min_count = max(min_count, 1) + self.max_count = max_count + self.progress_time = max(progress_time, 0.25) + self.verbose = False + self.disable_gc = False + + def formatTime(self, value): + """ + Format a time delta to string: use humanDurationNanosec() + """ + return humanDurationNanosec(value * 1000000000) + + def displayStat(self, stat): + """ + Display statistics to stdout: + - best time (minimum) + - average time (arithmetic average) + - worst time (maximum) + - total time (sum) + + Use arithmetic avertage instead of geometric average because + geometric fails if any value is zero (returns zero) and also + because floating point multiplication lose precision with many + values. + """ + average = stat.getSum() / len(stat) + values = (stat.getMin(), average, stat.getMax(), stat.getSum()) + values = tuple(self.formatTime(value) for value in values) + print _("Benchmark: best=%s average=%s worst=%s total=%s") \ + % values + + def _runOnce(self, func, args, kw): + before = time() + func(*args, **kw) + after = time() + return after - before + + def _run(self, func, args, kw): + """ + Call func(*args, **kw) as many times as needed to get + good statistics. Algorithm: + - call the function once + - compute needed number of calls + - and then call function N times + + To compute number of calls, parameters are: + - time of first function call + - minimum number of calls (min_count attribute) + - maximum test time (max_time attribute) + + Notice: The function will approximate number of calls. + """ + # First call of the benchmark + stat = BenchmarkStat() + diff = self._runOnce(func, args, kw) + best = diff + stat.append(diff) + total_time = diff + + # Compute needed number of calls + count = int(floor(self.max_time / diff)) + count = max(count, self.min_count) + if self.max_count: + count = min(count, self.max_count) + + # Not other call? Just exit + if count == 1: + return stat + estimate = diff * count + if self.verbose: + print _("Run benchmark: %s calls (estimate: %s)") \ + % (count, self.formatTime(estimate)) + + display_progress = self.verbose and (1.0 <= estimate) + total_count = 1 + while total_count < count: + # Run benchmark and display each result + if display_progress: + print _("Result %s/%s: %s (best: %s)") % \ + (total_count, count, + self.formatTime(diff), self.formatTime(best)) + part = count - total_count + + # Will takes more than one second? + average = total_time / total_count + if self.progress_time < part * average: + part = max( int(self.progress_time / average), 1) + for index in xrange(part): + diff = self._runOnce(func, args, kw) + stat.append(diff) + total_time += diff + best = min(diff, best) + total_count += part + if display_progress: + print _("Result %s/%s: %s (best: %s)") % \ + (count, count, + self.formatTime(diff), self.formatTime(best)) + return stat + + def validateStat(self, stat): + """ + Check statistics and raise a BenchmarkError if they are invalid. + Example of tests: reject empty stat, reject stat with only nul values. + """ + if not stat: + raise BenchmarkError("empty statistics") + if not stat.getSum(): + raise BenchmarkError("nul statistics") + + def run(self, func, *args, **kw): + """ + Run function func(*args, **kw), validate statistics, + and display the result on stdout. + + Disable garbage collector if asked too. + """ + + # Disable garbarge collector is needed and if it does exist + # (Jython 2.2 don't have it for example) + if self.disable_gc: + try: + import gc + except ImportError: + self.disable_gc = False + if self.disable_gc: + gc_enabled = gc.isenabled() + gc.disable() + else: + gc_enabled = False + + # Run the benchmark + stat = self._run(func, args, kw) + if gc_enabled: + gc.enable() + + # Validate and display stats + self.validateStat(stat) + self.displayStat(stat) + diff --git a/libs/hachoir_core/bits.py b/libs/hachoir_core/bits.py new file mode 100644 index 0000000..d5b31a0 --- /dev/null +++ b/libs/hachoir_core/bits.py @@ -0,0 +1,277 @@ +""" +Utilities to convert integers and binary strings to binary (number), binary +string, number, hexadecimal, etc. +""" + +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.compatibility import reversed +from itertools import chain, repeat +from struct import calcsize, unpack, error as struct_error + +def swap16(value): + """ + Swap byte between big and little endian of a 16 bits integer. + + >>> "%x" % swap16(0x1234) + '3412' + """ + return (value & 0xFF) << 8 | (value >> 8) + +def swap32(value): + """ + Swap byte between big and little endian of a 32 bits integer. + + >>> "%x" % swap32(0x12345678) + '78563412' + """ + value = long(value) + return ((value & 0x000000FFL) << 24) \ + | ((value & 0x0000FF00L) << 8) \ + | ((value & 0x00FF0000L) >> 8) \ + | ((value & 0xFF000000L) >> 24) + +def bin2long(text, endian): + """ + Convert binary number written in a string into an integer. + Skip characters differents than "0" and "1". + + >>> bin2long("110", BIG_ENDIAN) + 6 + >>> bin2long("110", LITTLE_ENDIAN) + 3 + >>> bin2long("11 00", LITTLE_ENDIAN) + 3 + """ + assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) + bits = [ (ord(character)-ord("0")) \ + for character in text if character in "01" ] + assert len(bits) != 0 + if endian is not BIG_ENDIAN: + bits = reversed(bits) + value = 0 + for bit in bits: + value *= 2 + value += bit + return value + +def str2hex(value, prefix="", glue=u"", format="%02X"): + r""" + Convert binary string in hexadecimal (base 16). + + >>> str2hex("ABC") + u'414243' + >>> str2hex("\xF0\xAF", glue=" ") + u'F0 AF' + >>> str2hex("ABC", prefix="0x") + u'0x414243' + >>> str2hex("ABC", format=r"\x%02X") + u'\\x41\\x42\\x43' + """ + if isinstance(glue, str): + glue = unicode(glue) + if 0 < len(prefix): + text = [prefix] + else: + text = [] + for character in value: + text.append(format % ord(character)) + return glue.join(text) + +def countBits(value): + """ + Count number of bits needed to store a (positive) integer number. + + >>> countBits(0) + 1 + >>> countBits(1000) + 10 + >>> countBits(44100) + 16 + >>> countBits(18446744073709551615) + 64 + """ + assert 0 <= value + count = 1 + bits = 1 + while (1 << bits) <= value: + count += bits + value >>= bits + bits <<= 1 + while 2 <= value: + if bits != 1: + bits >>= 1 + else: + bits -= 1 + while (1 << bits) <= value: + count += bits + value >>= bits + return count + +def byte2bin(number, classic_mode=True): + """ + Convert a byte (integer in 0..255 range) to a binary string. + If classic_mode is true (default value), reverse bits. + + >>> byte2bin(10) + '00001010' + >>> byte2bin(10, False) + '01010000' + """ + text = "" + for i in range(0, 8): + if classic_mode: + mask = 1 << (7-i) + else: + mask = 1 << i + if (number & mask) == mask: + text += "1" + else: + text += "0" + return text + +def long2raw(value, endian, size=None): + r""" + Convert a number (positive and not nul) to a raw string. + If size is given, add nul bytes to fill to size bytes. + + >>> long2raw(0x1219, BIG_ENDIAN) + '\x12\x19' + >>> long2raw(0x1219, BIG_ENDIAN, 4) # 32 bits + '\x00\x00\x12\x19' + >>> long2raw(0x1219, LITTLE_ENDIAN, 4) # 32 bits + '\x19\x12\x00\x00' + """ + assert (not size and 0 < value) or (0 <= value) + assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) + text = [] + while (value != 0 or text == ""): + byte = value % 256 + text.append( chr(byte) ) + value >>= 8 + if size: + need = max(size - len(text), 0) + else: + need = 0 + if need: + if endian is BIG_ENDIAN: + text = chain(repeat("\0", need), reversed(text)) + else: + text = chain(text, repeat("\0", need)) + else: + if endian is BIG_ENDIAN: + text = reversed(text) + return "".join(text) + +def long2bin(size, value, endian, classic_mode=False): + """ + Convert a number into bits (in a string): + - size: size in bits of the number + - value: positive (or nul) number + - endian: BIG_ENDIAN (most important bit first) + or LITTLE_ENDIAN (least important bit first) + - classic_mode (default: False): reverse each packet of 8 bits + + >>> long2bin(16, 1+4 + (1+8)*256, BIG_ENDIAN) + '10100000 10010000' + >>> long2bin(16, 1+4 + (1+8)*256, BIG_ENDIAN, True) + '00000101 00001001' + >>> long2bin(16, 1+4 + (1+8)*256, LITTLE_ENDIAN) + '00001001 00000101' + >>> long2bin(16, 1+4 + (1+8)*256, LITTLE_ENDIAN, True) + '10010000 10100000' + """ + text = "" + assert endian in (LITTLE_ENDIAN, BIG_ENDIAN) + assert 0 <= value + for index in xrange(size): + if (value & 1) == 1: + text += "1" + else: + text += "0" + value >>= 1 + if endian is LITTLE_ENDIAN: + text = text[::-1] + result = "" + while len(text) != 0: + if len(result) != 0: + result += " " + if classic_mode: + result += text[7::-1] + else: + result += text[:8] + text = text[8:] + return result + +def str2bin(value, classic_mode=True): + r""" + Convert binary string to binary numbers. + If classic_mode is true (default value), reverse bits. + + >>> str2bin("\x03\xFF") + '00000011 11111111' + >>> str2bin("\x03\xFF", False) + '11000000 11111111' + """ + text = "" + for character in value: + if text != "": + text += " " + byte = ord(character) + text += byte2bin(byte, classic_mode) + return text + +def _createStructFormat(): + """ + Create a dictionnary (endian, size_byte) => struct format used + by str2long() to convert raw data to positive integer. + """ + format = { + BIG_ENDIAN: {}, + LITTLE_ENDIAN: {}, + } + for struct_format in "BHILQ": + try: + size = calcsize(struct_format) + format[BIG_ENDIAN][size] = '>%s' % struct_format + format[LITTLE_ENDIAN][size] = '<%s' % struct_format + except struct_error: + pass + return format +_struct_format = _createStructFormat() + +def str2long(data, endian): + r""" + Convert a raw data (type 'str') into a long integer. + + >>> chr(str2long('*', BIG_ENDIAN)) + '*' + >>> str2long("\x00\x01\x02\x03", BIG_ENDIAN) == 0x10203 + True + >>> str2long("\x2a\x10", LITTLE_ENDIAN) == 0x102a + True + >>> str2long("\xff\x14\x2a\x10", BIG_ENDIAN) == 0xff142a10 + True + >>> str2long("\x00\x01\x02\x03", LITTLE_ENDIAN) == 0x3020100 + True + >>> str2long("\xff\x14\x2a\x10\xab\x00\xd9\x0e", BIG_ENDIAN) == 0xff142a10ab00d90e + True + >>> str2long("\xff\xff\xff\xff\xff\xff\xff\xff", BIG_ENDIAN) == (2**64-1) + True + """ + assert 1 <= len(data) <= 32 # arbitrary limit: 256 bits + try: + return unpack(_struct_format[endian][len(data)], data)[0] + except KeyError: + pass + + assert endian in (BIG_ENDIAN, LITTLE_ENDIAN) + shift = 0 + value = 0 + if endian is BIG_ENDIAN: + data = reversed(data) + for character in data: + byte = ord(character) + value += (byte << shift) + shift += 8 + return value + diff --git a/libs/hachoir_core/cmd_line.py b/libs/hachoir_core/cmd_line.py new file mode 100644 index 0000000..8c4178d --- /dev/null +++ b/libs/hachoir_core/cmd_line.py @@ -0,0 +1,43 @@ +from optparse import OptionGroup +from hachoir_core.log import log +from hachoir_core.i18n import _, getTerminalCharset +from hachoir_core.tools import makePrintable +import hachoir_core.config as config + +def getHachoirOptions(parser): + """ + Create an option group (type optparse.OptionGroup) of Hachoir + library options. + """ + def setLogFilename(*args): + log.setFilename(args[2]) + + common = OptionGroup(parser, _("Hachoir library"), \ + "Configure Hachoir library") + common.add_option("--verbose", help=_("Verbose mode"), + default=False, action="store_true") + common.add_option("--log", help=_("Write log in a file"), + type="string", action="callback", callback=setLogFilename) + common.add_option("--quiet", help=_("Quiet mode (don't display warning)"), + default=False, action="store_true") + common.add_option("--debug", help=_("Debug mode"), + default=False, action="store_true") + return common + +def configureHachoir(option): + # Configure Hachoir using "option" (value from optparse) + if option.quiet: + config.quiet = True + if option.verbose: + config.verbose = True + if option.debug: + config.debug = True + +def unicodeFilename(filename, charset=None): + if not charset: + charset = getTerminalCharset() + try: + return unicode(filename, charset) + except UnicodeDecodeError: + return makePrintable(filename, charset, to_unicode=True) + diff --git a/libs/hachoir_core/compatibility.py b/libs/hachoir_core/compatibility.py new file mode 100644 index 0000000..caff43c --- /dev/null +++ b/libs/hachoir_core/compatibility.py @@ -0,0 +1,185 @@ +""" +Compatibility constants and functions. This module works on Python 1.5 to 2.5. + +This module provides: +- True and False constants ; +- any() and all() function ; +- has_yield and has_slice values ; +- isinstance() with Python 2.3 behaviour ; +- reversed() and sorted() function. + + +True and False constants +======================== + +Truth constants: True is yes (one) and False is no (zero). + +>>> int(True), int(False) # int value +(1, 0) +>>> int(False | True) # and binary operator +1 +>>> int(True & False) # or binary operator +0 +>>> int(not(True) == False) # not binary operator +1 + +Warning: on Python smaller than 2.3, True and False are aliases to +number 1 and 0. So "print True" will displays 1 and not True. + + +any() function +============== + +any() returns True if at least one items is True, or False otherwise. + +>>> any([False, True]) +True +>>> any([True, True]) +True +>>> any([False, False]) +False + + +all() function +============== + +all() returns True if all items are True, or False otherwise. +This function is just apply binary and operator (&) on all values. + +>>> all([True, True]) +True +>>> all([False, True]) +False +>>> all([False, False]) +False + + +has_yield boolean +================= + +has_yield: boolean which indicatese if the interpreter supports yield keyword. +yield keyworkd is available since Python 2.0. + + +has_yield boolean +================= + +has_slice: boolean which indicates if the interpreter supports slices with step +argument or not. slice with step is available since Python 2.3. + + +reversed() and sorted() function +================================ + +reversed() and sorted() function has been introduced in Python 2.4. +It's should returns a generator, but this module it may be a list. + +>>> data = list("cab") +>>> list(sorted(data)) +['a', 'b', 'c'] +>>> list(reversed("abc")) +['c', 'b', 'a'] +""" + +import copy +import operator + +# --- True and False constants from Python 2.0 --- +# --- Warning: for Python < 2.3, they are aliases for 1 and 0 --- +try: + True = True + False = False +except NameError: + True = 1 + False = 0 + +# --- any() from Python 2.5 --- +try: + from __builtin__ import any +except ImportError: + def any(items): + for item in items: + if item: + return True + return False + +# ---all() from Python 2.5 --- +try: + from __builtin__ import all +except ImportError: + def all(items): + return reduce(operator.__and__, items) + +# --- test if interpreter supports yield keyword --- +try: + eval(compile(""" +from __future__ import generators + +def gen(): + yield 1 + yield 2 + +if list(gen()) != [1, 2]: + raise KeyError("42") +""", "", "exec")) +except (KeyError, SyntaxError): + has_yield = False +else: + has_yield = True + +# --- test if interpreter supports slices (with step argument) --- +try: + has_slice = eval('"abc"[::-1] == "cba"') +except (TypeError, SyntaxError): + has_slice = False + +# --- isinstance with isinstance Python 2.3 behaviour (arg 2 is a type) --- +try: + if isinstance(1, int): + from __builtin__ import isinstance +except TypeError: + print "Redef isinstance" + def isinstance20(a, typea): + if type(typea) != type(type): + raise TypeError("TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types") + return type(typea) != typea + isinstance = isinstance20 + +# --- reversed() from Python 2.4 --- +try: + from __builtin__ import reversed +except ImportError: +# if hasYield() == "ok": +# code = """ +#def reversed(data): +# for index in xrange(len(data)-1, -1, -1): +# yield data[index]; +#reversed""" +# reversed = eval(compile(code, "", "exec")) + if has_slice: + def reversed(data): + if not isinstance(data, list): + data = list(data) + return data[::-1] + else: + def reversed(data): + if not isinstance(data, list): + data = list(data) + reversed_data = [] + for index in xrange(len(data)-1, -1, -1): + reversed_data.append(data[index]) + return reversed_data + +# --- sorted() from Python 2.4 --- +try: + from __builtin__ import sorted +except ImportError: + def sorted(data): + sorted_data = copy.copy(data) + sorted_data.sort() + return sorted + +__all__ = ("True", "False", + "any", "all", "has_yield", "has_slice", + "isinstance", "reversed", "sorted") + diff --git a/libs/hachoir_core/config.py b/libs/hachoir_core/config.py new file mode 100644 index 0000000..9250b62 --- /dev/null +++ b/libs/hachoir_core/config.py @@ -0,0 +1,29 @@ +""" +Configuration of Hachoir +""" + +import os + +# UI: display options +max_string_length = 40 # Max. length in characters of GenericString.display +max_byte_length = 14 # Max. length in bytes of RawBytes.display +max_bit_length = 256 # Max. length in bits of RawBits.display +unicode_stdout = True # Replace stdout and stderr with Unicode compatible objects + # Disable it for readline or ipython + +# Global options +debug = False # Display many informations usefull to debug +verbose = False # Display more informations +quiet = False # Don't display warnings + +# Use internationalization and localization (gettext)? +if os.name == "nt": + # TODO: Remove this hack and make i18n works on Windows :-) + use_i18n = False +else: + use_i18n = True + +# Parser global options +autofix = True # Enable Autofix? see hachoir_core.field.GenericFieldSet +check_padding_pattern = True # Check padding fields pattern? + diff --git a/libs/hachoir_core/dict.py b/libs/hachoir_core/dict.py new file mode 100644 index 0000000..f887683 --- /dev/null +++ b/libs/hachoir_core/dict.py @@ -0,0 +1,183 @@ +""" +Dictionnary classes which store values order. +""" + +from hachoir_core.error import HachoirError +from hachoir_core.i18n import _ + +class UniqKeyError(HachoirError): + """ + Error raised when a value is set whereas the key already exist in a + dictionnary. + """ + pass + +class Dict(object): + """ + This class works like classic Python dict() but has an important method: + __iter__() which allow to iterate into the dictionnary _values_ (and not + keys like Python's dict does). + """ + def __init__(self, values=None): + self._index = {} # key => index + self._key_list = [] # index => key + self._value_list = [] # index => value + if values: + for key, value in values: + self.append(key,value) + + def _getValues(self): + return self._value_list + values = property(_getValues) + + def index(self, key): + """ + Search a value by its key and returns its index + Returns None if the key doesn't exist. + + >>> d=Dict( (("two", "deux"), ("one", "un")) ) + >>> d.index("two") + 0 + >>> d.index("one") + 1 + >>> d.index("three") is None + True + """ + return self._index.get(key) + + def __getitem__(self, key): + """ + Get item with specified key. + To get a value by it's index, use mydict.values[index] + + >>> d=Dict( (("two", "deux"), ("one", "un")) ) + >>> d["one"] + 'un' + """ + return self._value_list[self._index[key]] + + def __setitem__(self, key, value): + self._value_list[self._index[key]] = value + + def append(self, key, value): + """ + Append new value + """ + if key in self._index: + raise UniqKeyError(_("Key '%s' already exists") % key) + self._index[key] = len(self._value_list) + self._key_list.append(key) + self._value_list.append(value) + + def __len__(self): + return len(self._value_list) + + def __contains__(self, key): + return key in self._index + + def __iter__(self): + return iter(self._value_list) + + def iteritems(self): + """ + Create a generator to iterate on: (key, value). + + >>> d=Dict( (("two", "deux"), ("one", "un")) ) + >>> for key, value in d.iteritems(): + ... print "%r: %r" % (key, value) + ... + 'two': 'deux' + 'one': 'un' + """ + for index in xrange(len(self)): + yield (self._key_list[index], self._value_list[index]) + + def itervalues(self): + """ + Create an iterator on values + """ + return iter(self._value_list) + + def iterkeys(self): + """ + Create an iterator on keys + """ + return iter(self._key_list) + + def replace(self, oldkey, newkey, new_value): + """ + Replace an existing value with another one + + >>> d=Dict( (("two", "deux"), ("one", "un")) ) + >>> d.replace("one", "three", 3) + >>> d + {'two': 'deux', 'three': 3} + + You can also use the classic form: + + >>> d['three'] = 4 + >>> d + {'two': 'deux', 'three': 4} + """ + index = self._index[oldkey] + self._value_list[index] = new_value + if oldkey != newkey: + del self._index[oldkey] + self._index[newkey] = index + self._key_list[index] = newkey + + def __delitem__(self, index): + """ + Delete item at position index. May raise IndexError. + + >>> d=Dict( ((6, 'six'), (9, 'neuf'), (4, 'quatre')) ) + >>> del d[1] + >>> d + {6: 'six', 4: 'quatre'} + """ + if index < 0: + index += len(self._value_list) + if not (0 <= index < len(self._value_list)): + raise IndexError(_("list assignment index out of range (%s/%s)") + % (index, len(self._value_list))) + del self._value_list[index] + del self._key_list[index] + + # First loop which may alter self._index + for key, item_index in self._index.iteritems(): + if item_index == index: + del self._index[key] + break + + # Second loop update indexes + for key, item_index in self._index.iteritems(): + if index < item_index: + self._index[key] -= 1 + + def insert(self, index, key, value): + """ + Insert an item at specified position index. + + >>> d=Dict( ((6, 'six'), (9, 'neuf'), (4, 'quatre')) ) + >>> d.insert(1, '40', 'quarante') + >>> d + {6: 'six', '40': 'quarante', 9: 'neuf', 4: 'quatre'} + """ + if key in self: + raise UniqKeyError(_("Insert error: key '%s' ready exists") % key) + _index = index + if index < 0: + index += len(self._value_list) + if not(0 <= index <= len(self._value_list)): + raise IndexError(_("Insert error: index '%s' is invalid") % _index) + for item_key, item_index in self._index.iteritems(): + if item_index >= index: + self._index[item_key] += 1 + self._index[key] = index + self._key_list.insert(index, key) + self._value_list.insert(index, value) + + def __repr__(self): + items = ( "%r: %r" % (key, value) for key, value in self.iteritems() ) + return "{%s}" % ", ".join(items) + diff --git a/libs/hachoir_core/endian.py b/libs/hachoir_core/endian.py new file mode 100644 index 0000000..5f6ae88 --- /dev/null +++ b/libs/hachoir_core/endian.py @@ -0,0 +1,15 @@ +""" +Constant values about endian. +""" + +from hachoir_core.i18n import _ + +BIG_ENDIAN = "ABCD" +LITTLE_ENDIAN = "DCBA" +NETWORK_ENDIAN = BIG_ENDIAN + +endian_name = { + BIG_ENDIAN: _("Big endian"), + LITTLE_ENDIAN: _("Little endian"), +} + diff --git a/libs/hachoir_core/error.py b/libs/hachoir_core/error.py new file mode 100644 index 0000000..9ec6b57 --- /dev/null +++ b/libs/hachoir_core/error.py @@ -0,0 +1,45 @@ +""" +Functions to display an error (error, warning or information) message. +""" + +from hachoir_core.log import log +from hachoir_core.tools import makePrintable +import sys, traceback + +def getBacktrace(empty="Empty backtrace."): + """ + Try to get backtrace as string. + Returns "Error while trying to get backtrace" on failure. + """ + try: + info = sys.exc_info() + trace = traceback.format_exception(*info) + sys.exc_clear() + if trace[0] != "None\n": + return "".join(trace) + except: + # No i18n here (imagine if i18n function calls error...) + return "Error while trying to get backtrace" + return empty + +class HachoirError(Exception): + """ + Parent of all errors in Hachoir library + """ + def __init__(self, message): + message_bytes = makePrintable(message, "ASCII") + Exception.__init__(self, message_bytes) + self.text = message + + def __unicode__(self): + return self.text + +# Error classes which may be raised by Hachoir core +# FIXME: Add EnvironmentError (IOError or OSError) and AssertionError? +# FIXME: Remove ArithmeticError and RuntimeError? +HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError, + TypeError, ValueError, ArithmeticError, RuntimeError) + +info = log.info +warning = log.warning +error = log.error diff --git a/libs/hachoir_core/event_handler.py b/libs/hachoir_core/event_handler.py new file mode 100644 index 0000000..80f474b --- /dev/null +++ b/libs/hachoir_core/event_handler.py @@ -0,0 +1,26 @@ +class EventHandler(object): + """ + Class to connect events to event handlers. + """ + + def __init__(self): + self.handlers = {} + + def connect(self, event_name, handler): + """ + Connect an event handler to an event. Append it to handlers list. + """ + try: + self.handlers[event_name].append(handler) + except KeyError: + self.handlers[event_name] = [handler] + + def raiseEvent(self, event_name, *args): + """ + Raiser an event: call each handler for this event_name. + """ + if event_name not in self.handlers: + return + for handler in self.handlers[event_name]: + handler(*args) + diff --git a/libs/hachoir_core/field/__init__.py b/libs/hachoir_core/field/__init__.py new file mode 100644 index 0000000..f313c9b --- /dev/null +++ b/libs/hachoir_core/field/__init__.py @@ -0,0 +1,59 @@ +# Field classes +from hachoir_core.field.field import Field, FieldError, MissingField, joinPath +from hachoir_core.field.bit_field import Bit, Bits, RawBits +from hachoir_core.field.byte_field import Bytes, RawBytes +from hachoir_core.field.sub_file import SubFile, CompressedField +from hachoir_core.field.character import Character +from hachoir_core.field.integer import ( + Int8, Int16, Int24, Int32, Int64, + UInt8, UInt16, UInt24, UInt32, UInt64, + GenericInteger) +from hachoir_core.field.enum import Enum +from hachoir_core.field.string_field import (GenericString, + String, CString, UnixLine, + PascalString8, PascalString16, PascalString32) +from hachoir_core.field.padding import (PaddingBits, PaddingBytes, + NullBits, NullBytes) + +# Functions +from hachoir_core.field.helper import (isString, isInteger, + createPaddingField, createNullField, createRawField, + writeIntoFile, createOrphanField) + +# FieldSet classes +from hachoir_core.field.fake_array import FakeArray +from hachoir_core.field.basic_field_set import (BasicFieldSet, + ParserError, MatchError) +from hachoir_core.field.generic_field_set import GenericFieldSet +from hachoir_core.field.seekable_field_set import SeekableFieldSet, RootSeekableFieldSet +from hachoir_core.field.field_set import FieldSet +from hachoir_core.field.static_field_set import StaticFieldSet +from hachoir_core.field.parser import Parser +from hachoir_core.field.vector import GenericVector, UserVector + +# Complex types +from hachoir_core.field.float import Float32, Float64, Float80 +from hachoir_core.field.timestamp import (GenericTimestamp, + TimestampUnix32, TimestampUnix64, TimestampMac32, TimestampUUID60, TimestampWin64, + DateTimeMSDOS32, TimeDateMSDOS32, TimedeltaWin64) + +# Special Field classes +from hachoir_core.field.link import Link, Fragment + +available_types = ( + Bit, Bits, RawBits, + Bytes, RawBytes, + SubFile, + Character, + Int8, Int16, Int24, Int32, Int64, + UInt8, UInt16, UInt24, UInt32, UInt64, + String, CString, UnixLine, + PascalString8, PascalString16, PascalString32, + Float32, Float64, + PaddingBits, PaddingBytes, + NullBits, NullBytes, + TimestampUnix32, TimestampMac32, TimestampWin64, + DateTimeMSDOS32, TimeDateMSDOS32, +# GenericInteger, GenericString, +) + diff --git a/libs/hachoir_core/field/basic_field_set.py b/libs/hachoir_core/field/basic_field_set.py new file mode 100644 index 0000000..c044124 --- /dev/null +++ b/libs/hachoir_core/field/basic_field_set.py @@ -0,0 +1,147 @@ +from hachoir_core.field import Field, FieldError +from hachoir_core.stream import InputStream +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.event_handler import EventHandler + +class ParserError(FieldError): + """ + Error raised by a field set. + + @see: L{FieldError} + """ + pass + +class MatchError(FieldError): + """ + Error raised by a field set when the stream content doesn't + match to file format. + + @see: L{FieldError} + """ + pass + +class BasicFieldSet(Field): + _event_handler = None + is_field_set = True + endian = None + + def __init__(self, parent, name, stream, description, size): + # Sanity checks (preconditions) + assert not parent or issubclass(parent.__class__, BasicFieldSet) + assert issubclass(stream.__class__, InputStream) + + # Set field set size + if size is None and self.static_size: + assert isinstance(self.static_size, (int, long)) + size = self.static_size + + # Set Field attributes + self._parent = parent + self._name = name + self._size = size + self._description = description + self.stream = stream + self._field_array_count = {} + + # Set endian + if not self.endian: + assert parent and parent.endian + self.endian = parent.endian + + if parent: + # This field set is one of the root leafs + self._address = parent.nextFieldAddress() + self.root = parent.root + assert id(self.stream) == id(parent.stream) + else: + # This field set is the root + self._address = 0 + self.root = self + self._global_event_handler = None + + # Sanity checks (post-conditions) + assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN) + if (self._size is not None) and (self._size <= 0): + raise ParserError("Invalid parser '%s' size: %s" % (self.path, self._size)) + + def reset(self): + self._field_array_count = {} + + def createValue(self): + return None + + def connectEvent(self, event_name, handler, local=True): + assert event_name in ( + # Callback prototype: def f(field) + # Called when new value is already set + "field-value-changed", + + # Callback prototype: def f(field) + # Called when field size is already set + "field-resized", + + # A new field has been inserted in the field set + # Callback prototype: def f(index, new_field) + "field-inserted", + + # Callback prototype: def f(old_field, new_field) + # Called when new field is already in field set + "field-replaced", + + # Callback prototype: def f(field, new_value) + # Called to ask to set new value + "set-field-value" + ), "Event name %r is invalid" % event_name + if local: + if self._event_handler is None: + self._event_handler = EventHandler() + self._event_handler.connect(event_name, handler) + else: + if self.root._global_event_handler is None: + self.root._global_event_handler = EventHandler() + self.root._global_event_handler.connect(event_name, handler) + + def raiseEvent(self, event_name, *args): + # Transfer event to local listeners + if self._event_handler is not None: + self._event_handler.raiseEvent(event_name, *args) + + # Transfer event to global listeners + if self.root._global_event_handler is not None: + self.root._global_event_handler.raiseEvent(event_name, *args) + + def setUniqueFieldName(self, field): + key = field._name[:-2] + try: + self._field_array_count[key] += 1 + except KeyError: + self._field_array_count[key] = 0 + field._name = key + "[%u]" % self._field_array_count[key] + + def readFirstFields(self, number): + """ + Read first number fields if they are not read yet. + + Returns number of new added fields. + """ + number = number - self.current_length + if 0 < number: + return self.readMoreFields(number) + else: + return 0 + + def createFields(self): + raise NotImplementedError() + def __iter__(self): + raise NotImplementedError() + def __len__(self): + raise NotImplementedError() + def getField(self, key, const=True): + raise NotImplementedError() + def nextFieldAddress(self): + raise NotImplementedError() + def getFieldIndex(self, field): + raise NotImplementedError() + def readMoreFields(self, number): + raise NotImplementedError() + diff --git a/libs/hachoir_core/field/bit_field.py b/libs/hachoir_core/field/bit_field.py new file mode 100644 index 0000000..8fae3c7 --- /dev/null +++ b/libs/hachoir_core/field/bit_field.py @@ -0,0 +1,68 @@ +""" +Bit sized classes: +- Bit: Single bit, value is False or True ; +- Bits: Integer with a size in bits ; +- RawBits: unknown content with a size in bits. +""" + +from hachoir_core.field import Field +from hachoir_core.i18n import _ +from hachoir_core import config + +class RawBits(Field): + """ + Unknown content with a size in bits. + """ + static_size = staticmethod(lambda *args, **kw: args[1]) + + def __init__(self, parent, name, size, description=None): + """ + Constructor: see L{Field.__init__} for parameter description + """ + Field.__init__(self, parent, name, size, description) + + def hasValue(self): + return True + + def createValue(self): + return self._parent.stream.readBits( + self.absolute_address, self._size, self._parent.endian) + + def createDisplay(self): + if self._size < config.max_bit_length: + return unicode(self.value) + else: + return _("<%s size=%u>" % + (self.__class__.__name__, self._size)) + createRawDisplay = createDisplay + +class Bits(RawBits): + """ + Positive integer with a size in bits + + @see: L{Bit} + @see: L{RawBits} + """ + pass + +class Bit(RawBits): + """ + Single bit: value can be False or True, and size is exactly one bit. + + @see: L{Bits} + """ + static_size = 1 + + def __init__(self, parent, name, description=None): + """ + Constructor: see L{Field.__init__} for parameter description + """ + RawBits.__init__(self, parent, name, 1, description=description) + + def createValue(self): + return 1 == self._parent.stream.readBits( + self.absolute_address, 1, self._parent.endian) + + def createRawDisplay(self): + return unicode(int(self.value)) + diff --git a/libs/hachoir_core/field/byte_field.py b/libs/hachoir_core/field/byte_field.py new file mode 100644 index 0000000..16db181 --- /dev/null +++ b/libs/hachoir_core/field/byte_field.py @@ -0,0 +1,73 @@ +""" +Very basic field: raw content with a size in byte. Use this class for +unknown content. +""" + +from hachoir_core.field import Field, FieldError +from hachoir_core.tools import makePrintable +from hachoir_core.bits import str2hex +from hachoir_core import config + +MAX_LENGTH = (2**64) + +class RawBytes(Field): + """ + Byte vector of unknown content + + @see: L{Bytes} + """ + static_size = staticmethod(lambda *args, **kw: args[1]*8) + + def __init__(self, parent, name, length, description="Raw data"): + assert issubclass(parent.__class__, Field) + if not(0 < length <= MAX_LENGTH): + raise FieldError("Invalid RawBytes length (%s)!" % length) + Field.__init__(self, parent, name, length*8, description) + self._display = None + + def _createDisplay(self, human): + max_bytes = config.max_byte_length + if type(self._getValue) is type(lambda: None): + display = self.value[:max_bytes] + else: + if self._display is None: + address = self.absolute_address + length = min(self._size / 8, max_bytes) + self._display = self._parent.stream.readBytes(address, length) + display = self._display + truncated = (8 * len(display) < self._size) + if human: + if truncated: + display += "(...)" + return makePrintable(display, "latin-1", quote='"', to_unicode=True) + else: + display = str2hex(display, format=r"\x%02x") + if truncated: + return '"%s(...)"' % display + else: + return '"%s"' % display + + def createDisplay(self): + return self._createDisplay(True) + + def createRawDisplay(self): + return self._createDisplay(False) + + def hasValue(self): + return True + + def createValue(self): + assert (self._size % 8) == 0 + if self._display: + self._display = None + return self._parent.stream.readBytes( + self.absolute_address, self._size / 8) + +class Bytes(RawBytes): + """ + Byte vector: can be used for magic number or GUID/UUID for example. + + @see: L{RawBytes} + """ + pass + diff --git a/libs/hachoir_core/field/character.py b/libs/hachoir_core/field/character.py new file mode 100644 index 0000000..42bc4b1 --- /dev/null +++ b/libs/hachoir_core/field/character.py @@ -0,0 +1,27 @@ +""" +Character field class: a 8-bit character +""" + +from hachoir_core.field import Bits +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.tools import makePrintable + +class Character(Bits): + """ + A 8-bit character using ASCII charset for display attribute. + """ + static_size = 8 + + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 8, description=description) + + def createValue(self): + return chr(self._parent.stream.readBits( + self.absolute_address, 8, BIG_ENDIAN)) + + def createRawDisplay(self): + return unicode(Bits.createValue(self)) + + def createDisplay(self): + return makePrintable(self.value, "ASCII", quote="'", to_unicode=True) + diff --git a/libs/hachoir_core/field/enum.py b/libs/hachoir_core/field/enum.py new file mode 100644 index 0000000..8817dfd --- /dev/null +++ b/libs/hachoir_core/field/enum.py @@ -0,0 +1,26 @@ +def Enum(field, enum, key_func=None): + """ + Enum is an adapter to another field: it will just change its display + attribute. It uses a dictionary to associate a value to another. + + key_func is an optional function with prototype "def func(key)->key" + which is called to transform key. + """ + display = field.createDisplay + if key_func: + def createDisplay(): + try: + key = key_func(field.value) + return enum[key] + except LookupError: + return display() + else: + def createDisplay(): + try: + return enum[field.value] + except LookupError: + return display() + field.createDisplay = createDisplay + field.getEnum = lambda: enum + return field + diff --git a/libs/hachoir_core/field/fake_array.py b/libs/hachoir_core/field/fake_array.py new file mode 100644 index 0000000..5535caf --- /dev/null +++ b/libs/hachoir_core/field/fake_array.py @@ -0,0 +1,81 @@ +import itertools +from hachoir_core.field import MissingField + +class FakeArray: + """ + Simulate an array for GenericFieldSet.array(): fielset.array("item")[0] is + equivalent to fielset.array("item[0]"). + + It's possible to iterate over the items using:: + + for element in fieldset.array("item"): + ... + + And to get array size using len(fieldset.array("item")). + """ + def __init__(self, fieldset, name): + pos = name.rfind("/") + if pos != -1: + self.fieldset = fieldset[name[:pos]] + self.name = name[pos+1:] + else: + self.fieldset = fieldset + self.name = name + self._format = "%s[%%u]" % self.name + self._cache = {} + self._known_size = False + self._max_index = -1 + + def __nonzero__(self): + "Is the array empty or not?" + if self._cache: + return True + else: + return (0 in self) + + def __len__(self): + "Number of fields in the array" + total = self._max_index+1 + if not self._known_size: + for index in itertools.count(total): + try: + field = self[index] + total += 1 + except MissingField: + break + return total + + def __contains__(self, index): + try: + field = self[index] + return True + except MissingField: + return False + + def __getitem__(self, index): + """ + Get a field of the array. Returns a field, or raise MissingField + exception if the field doesn't exist. + """ + try: + value = self._cache[index] + except KeyError: + try: + value = self.fieldset[self._format % index] + except MissingField: + self._known_size = True + raise + self._cache[index] = value + self._max_index = max(index, self._max_index) + return value + + def __iter__(self): + """ + Iterate in the fields in their index order: field[0], field[1], ... + """ + for index in itertools.count(0): + try: + yield self[index] + except MissingField: + raise StopIteration() + diff --git a/libs/hachoir_core/field/field.py b/libs/hachoir_core/field/field.py new file mode 100644 index 0000000..cc59e9c --- /dev/null +++ b/libs/hachoir_core/field/field.py @@ -0,0 +1,262 @@ +""" +Parent of all (field) classes in Hachoir: Field. +""" + +from hachoir_core.compatibility import reversed +from hachoir_core.stream import InputFieldStream +from hachoir_core.error import HachoirError, HACHOIR_ERRORS +from hachoir_core.log import Logger +from hachoir_core.i18n import _ +from hachoir_core.tools import makePrintable +from weakref import ref as weakref_ref + +class FieldError(HachoirError): + """ + Error raised by a L{Field}. + + @see: L{HachoirError} + """ + pass + +def joinPath(path, name): + if path != "/": + return "/".join((path, name)) + else: + return "/%s" % name + +class MissingField(KeyError, FieldError): + def __init__(self, field, key): + KeyError.__init__(self) + self.field = field + self.key = key + + def __str__(self): + return 'Can\'t get field "%s" from %s' % (self.key, self.field.path) + + def __unicode__(self): + return u'Can\'t get field "%s" from %s' % (self.key, self.field.path) + +class Field(Logger): + # static size can have two differents value: None (no static size), an + # integer (number of bits), or a function which returns an integer. + # + # This function receives exactly the same arguments than the constructor + # except the first one (one). Example of function: + # static_size = staticmethod(lambda *args, **kw: args[1]) + static_size = None + + # Indicate if this field contains other fields (is a field set) or not + is_field_set = False + + def __init__(self, parent, name, size=None, description=None): + """ + Set default class attributes, set right address if None address is + given. + + @param parent: Parent field of this field + @type parent: L{Field}|None + @param name: Name of the field, have to be unique in parent. If it ends + with "[]", end will be replaced with "[new_id]" (eg. "raw[]" + becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.) + @type name: str + @param size: Size of the field in bit (can be None, so it + will be computed later) + @type size: int|None + @param address: Address in bit relative to the parent absolute address + @type address: int|None + @param description: Optional string description + @type description: str|None + """ + assert issubclass(parent.__class__, Field) + assert (size is None) or (0 <= size) + self._parent = parent + if not name: + raise ValueError("empty field name") + self._name = name + self._address = parent.nextFieldAddress() + self._size = size + self._description = description + + def _logger(self): + return self.path + + def createDescription(self): + return "" + def _getDescription(self): + if self._description is None: + try: + self._description = self.createDescription() + if isinstance(self._description, str): + self._description = makePrintable( + self._description, "ISO-8859-1", to_unicode=True) + except HACHOIR_ERRORS, err: + self.error("Error getting description: " + unicode(err)) + self._description = "" + return self._description + description = property(_getDescription, + doc="Description of the field (string)") + + def __str__(self): + return self.display + def __unicode__(self): + return self.display + def __repr__(self): + return "<%s path=%r, address=%s, size=%s>" % ( + self.__class__.__name__, self.path, self._address, self._size) + + def hasValue(self): + return self._getValue() is not None + def createValue(self): + raise NotImplementedError() + def _getValue(self): + try: + value = self.createValue() + except HACHOIR_ERRORS, err: + self.error(_("Unable to create value: %s") % unicode(err)) + value = None + self._getValue = lambda: value + return value + value = property(lambda self: self._getValue(), doc="Value of field") + + def _getParent(self): + return self._parent + parent = property(_getParent, doc="Parent of this field") + + def createDisplay(self): + return unicode(self.value) + def _getDisplay(self): + if not hasattr(self, "_Field__display"): + try: + self.__display = self.createDisplay() + except HACHOIR_ERRORS, err: + self.error("Unable to create display: %s" % err) + self.__display = u"" + return self.__display + display = property(lambda self: self._getDisplay(), + doc="Short (unicode) string which represents field content") + + def createRawDisplay(self): + value = self.value + if isinstance(value, str): + return makePrintable(value, "ASCII", to_unicode=True) + else: + return unicode(value) + def _getRawDisplay(self): + if not hasattr(self, "_Field__raw_display"): + try: + self.__raw_display = self.createRawDisplay() + except HACHOIR_ERRORS, err: + self.error("Unable to create raw display: %s" % err) + self.__raw_display = u"" + return self.__raw_display + raw_display = property(lambda self: self._getRawDisplay(), + doc="(Unicode) string which represents raw field content") + + def _getName(self): + return self._name + name = property(_getName, + doc="Field name (unique in its parent field set list)") + + def _getIndex(self): + if not self._parent: + return None + return self._parent.getFieldIndex(self) + index = property(_getIndex) + + def _getPath(self): + if not self._parent: + return '/' + names = [] + field = self + while field is not None: + names.append(field._name) + field = field._parent + names[-1] = '' + return '/'.join(reversed(names)) + path = property(_getPath, + doc="Full path of the field starting at root field") + + def _getAddress(self): + return self._address + address = property(_getAddress, + doc="Relative address in bit to parent address") + + def _getAbsoluteAddress(self): + address = self._address + current = self._parent + while current: + address += current._address + current = current._parent + return address + absolute_address = property(_getAbsoluteAddress, + doc="Absolute address (from stream beginning) in bit") + + def _getSize(self): + return self._size + size = property(_getSize, doc="Content size in bit") + + def _getField(self, name, const): + if name.strip("."): + return None + field = self + for index in xrange(1, len(name)): + field = field._parent + if field is None: + break + return field + + def getField(self, key, const=True): + if key: + if key[0] == "/": + if self._parent: + current = self._parent.root + else: + current = self + if len(key) == 1: + return current + key = key[1:] + else: + current = self + for part in key.split("/"): + field = current._getField(part, const) + if field is None: + raise MissingField(current, part) + current = field + return current + raise KeyError("Key must not be an empty string!") + + def __getitem__(self, key): + return self.getField(key, False) + + def __contains__(self, key): + try: + return self.getField(key, False) is not None + except FieldError: + return False + + def _createInputStream(self, **args): + assert self._parent + return InputFieldStream(self, **args) + def getSubIStream(self): + if hasattr(self, "_sub_istream"): + stream = self._sub_istream() + else: + stream = None + if stream is None: + stream = self._createInputStream() + self._sub_istream = weakref_ref(stream) + return stream + def setSubIStream(self, createInputStream): + cis = self._createInputStream + self._createInputStream = lambda **args: createInputStream(cis, **args) + + def __nonzero__(self): + """ + Method called by code like "if field: (...)". + Always returns True + """ + return True + + def getFieldType(self): + return self.__class__.__name__ + diff --git a/libs/hachoir_core/field/field_set.py b/libs/hachoir_core/field/field_set.py new file mode 100644 index 0000000..92b5192 --- /dev/null +++ b/libs/hachoir_core/field/field_set.py @@ -0,0 +1,7 @@ +from hachoir_core.field import BasicFieldSet, GenericFieldSet + +class FieldSet(GenericFieldSet): + def __init__(self, parent, name, *args, **kw): + assert issubclass(parent.__class__, BasicFieldSet) + GenericFieldSet.__init__(self, parent, name, parent.stream, *args, **kw) + diff --git a/libs/hachoir_core/field/float.py b/libs/hachoir_core/field/float.py new file mode 100644 index 0000000..025b57d --- /dev/null +++ b/libs/hachoir_core/field/float.py @@ -0,0 +1,99 @@ +from hachoir_core.field import Bit, Bits, FieldSet +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +import struct + +# Make sure that we use right struct types +assert struct.calcsize("f") == 4 +assert struct.calcsize("d") == 8 +assert struct.unpack("d", "\xc0\0\0\0\0\0\0\0")[0] == -2.0 + +class FloatMantissa(Bits): + def createValue(self): + value = Bits.createValue(self) + return 1 + float(value) / (2 ** self.size) + + def createRawDisplay(self): + return unicode(Bits.createValue(self)) + +class FloatExponent(Bits): + def __init__(self, parent, name, size): + Bits.__init__(self, parent, name, size) + self.bias = 2 ** (size-1) - 1 + + def createValue(self): + return Bits.createValue(self) - self.bias + + def createRawDisplay(self): + return unicode(self.value + self.bias) + +def floatFactory(name, format, mantissa_bits, exponent_bits, doc): + size = 1 + mantissa_bits + exponent_bits + + class Float(FieldSet): + static_size = size + __doc__ = doc + + def __init__(self, parent, name, description=None): + assert parent.endian in (BIG_ENDIAN, LITTLE_ENDIAN) + FieldSet.__init__(self, parent, name, description, size) + if format: + if self._parent.endian == BIG_ENDIAN: + self.struct_format = ">"+format + else: + self.struct_format = "<"+format + else: + self.struct_format = None + + def createValue(self): + """ + Create float value: use struct.unpack() when it's possible + (32 and 64-bit float) or compute it with : + mantissa * (2.0 ** exponent) + + This computation may raise an OverflowError. + """ + if self.struct_format: + raw = self._parent.stream.readBytes( + self.absolute_address, self._size//8) + try: + return struct.unpack(self.struct_format, raw)[0] + except struct.error, err: + raise ValueError("[%s] conversion error: %s" % + (self.__class__.__name__, err)) + else: + try: + value = self["mantissa"].value * (2.0 ** float(self["exponent"].value)) + if self["negative"].value: + return -(value) + else: + return value + except OverflowError: + raise ValueError("[%s] floating point overflow" % + self.__class__.__name__) + + def createFields(self): + yield Bit(self, "negative") + yield FloatExponent(self, "exponent", exponent_bits) + if 64 <= mantissa_bits: + yield Bit(self, "one") + yield FloatMantissa(self, "mantissa", mantissa_bits-1) + else: + yield FloatMantissa(self, "mantissa", mantissa_bits) + + cls = Float + cls.__name__ = name + return cls + +# 32-bit float (standard: IEEE 754/854) +Float32 = floatFactory("Float32", "f", 23, 8, + "Floating point number: format IEEE 754 int 32 bit") + +# 64-bit float (standard: IEEE 754/854) +Float64 = floatFactory("Float64", "d", 52, 11, + "Floating point number: format IEEE 754 in 64 bit") + +# 80-bit float (standard: IEEE 754/854) +Float80 = floatFactory("Float80", None, 64, 15, + "Floating point number: format IEEE 754 in 80 bit") + diff --git a/libs/hachoir_core/field/generic_field_set.py b/libs/hachoir_core/field/generic_field_set.py new file mode 100644 index 0000000..a3b5eb7 --- /dev/null +++ b/libs/hachoir_core/field/generic_field_set.py @@ -0,0 +1,532 @@ +from hachoir_core.field import (MissingField, BasicFieldSet, Field, ParserError, + createRawField, createNullField, createPaddingField, FakeArray) +from hachoir_core.dict import Dict, UniqKeyError +from hachoir_core.error import HACHOIR_ERRORS +from hachoir_core.tools import lowerBound +import hachoir_core.config as config + +class GenericFieldSet(BasicFieldSet): + """ + Ordered list of fields. Use operator [] to access fields using their + name (field names are unique in a field set, but not in the whole + document). + + Class attributes: + - endian: Bytes order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}). Optional if the + field set has a parent ; + - static_size: (optional) Size of FieldSet in bits. This attribute should + be used in parser of constant size. + + Instance attributes/methods: + - _fields: Ordered dictionnary of all fields, may be incomplete + because feeded when a field is requested ; + - stream: Input stream used to feed fields' value + - root: The root of all field sets ; + - __len__(): Number of fields, may need to create field set ; + - __getitem__(): Get an field by it's name or it's path. + + And attributes inherited from Field class: + - parent: Parent field (may be None if it's the root) ; + - name: Field name (unique in parent field set) ; + - value: The field set ; + - address: Field address (in bits) relative to parent ; + - description: A string describing the content (can be None) ; + - size: Size of field set in bits, may need to create field set. + + Event handling: + - "connectEvent": Connect an handler to an event ; + - "raiseEvent": Raise an event. + + To implement a new field set, you need to: + - create a class which inherite from FieldSet ; + - write createFields() method using lines like: + yield Class(self, "name", ...) ; + - and maybe set endian and static_size class attributes. + """ + + _current_size = 0 + + def __init__(self, parent, name, stream, description=None, size=None): + """ + Constructor + @param parent: Parent field set, None for root parser + @param name: Name of the field, have to be unique in parent. If it ends + with "[]", end will be replaced with "[new_id]" (eg. "raw[]" + becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.) + @type name: str + @param stream: Input stream from which data are read + @type stream: L{InputStream} + @param description: Optional string description + @type description: str|None + @param size: Size in bits. If it's None, size will be computed. You + can also set size with class attribute static_size + """ + BasicFieldSet.__init__(self, parent, name, stream, description, size) + self._fields = Dict() + self._field_generator = self.createFields() + self._array_cache = {} + self.__is_feeding = False + + def array(self, key): + try: + return self._array_cache[key] + except KeyError: + array = FakeArray(self, key) + self._array_cache[key] = array + return self._array_cache[key] + + def reset(self): + """ + Reset a field set: + * clear fields ; + * restart field generator ; + * set current size to zero ; + * clear field array count. + + But keep: name, value, description and size. + """ + BasicFieldSet.reset(self) + self._fields = Dict() + self._field_generator = self.createFields() + self._current_size = 0 + self._array_cache = {} + + def __str__(self): + return '<%s path=%s, current_size=%s, current length=%s>' % \ + (self.__class__.__name__, self.path, self._current_size, len(self._fields)) + + def __len__(self): + """ + Returns number of fields, may need to create all fields + if it's not done yet. + """ + if self._field_generator is not None: + self._feedAll() + return len(self._fields) + + def _getCurrentLength(self): + return len(self._fields) + current_length = property(_getCurrentLength) + + def _getSize(self): + if self._size is None: + self._feedAll() + return self._size + size = property(_getSize, doc="Size in bits, may create all fields to get size") + + def _getCurrentSize(self): + assert not(self.done) + return self._current_size + current_size = property(_getCurrentSize) + + eof = property(lambda self: self._checkSize(self._current_size + 1, True) < 0) + + def _checkSize(self, size, strict): + field = self + while field._size is None: + if not field._parent: + assert self.stream.size is None + if not strict: + return None + if self.stream.sizeGe(size): + return 0 + break + size += field._address + field = field._parent + return field._size - size + + autofix = property(lambda self: self.root.autofix) + + def _addField(self, field): + """ + Add a field to the field set: + * add it into _fields + * update _current_size + + May raise a StopIteration() on error + """ + if not issubclass(field.__class__, Field): + raise ParserError("Field type (%s) is not a subclass of 'Field'!" + % field.__class__.__name__) + assert isinstance(field._name, str) + if field._name.endswith("[]"): + self.setUniqueFieldName(field) + if config.debug: + self.info("[+] DBG: _addField(%s)" % field.name) + + # required for the msoffice parser + if field._address != self._current_size: + self.warning("Fix address of %s to %s (was %s)" % + (field.path, self._current_size, field._address)) + field._address = self._current_size + + ask_stop = False + # Compute field size and check that there is enough place for it + self.__is_feeding = True + try: + field_size = field.size + except HACHOIR_ERRORS, err: + if field.is_field_set and field.current_length and field.eof: + self.warning("Error when getting size of '%s': %s" % (field.name, err)) + field._stopFeeding() + ask_stop = True + else: + self.warning("Error when getting size of '%s': delete it" % field.name) + self.__is_feeding = False + raise + self.__is_feeding = False + + # No more place? + dsize = self._checkSize(field._address + field.size, False) + if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0): + if self.autofix and self._current_size: + self._fixFieldSize(field, field.size + dsize) + else: + raise ParserError("Field %s is too large!" % field.path) + + self._current_size += field.size + try: + self._fields.append(field._name, field) + except UniqKeyError, err: + self.warning("Duplicate field name " + unicode(err)) + field._name += "[]" + self.setUniqueFieldName(field) + self._fields.append(field._name, field) + if ask_stop: + raise StopIteration() + + def _fixFieldSize(self, field, new_size): + if new_size > 0: + if field.is_field_set and 0 < field.size: + field._truncate(new_size) + return + + # Don't add the field <=> delete item + if self._size is None: + self._size = self._current_size + new_size + self.warning("[Autofix] Delete '%s' (too large)" % field.path) + raise StopIteration() + + def _getField(self, name, const): + field = Field._getField(self, name, const) + if field is None: + if name in self._fields: + field = self._fields[name] + elif self._field_generator is not None and not const: + field = self._feedUntil(name) + return field + + def getField(self, key, const=True): + if isinstance(key, (int, long)): + if key < 0: + raise KeyError("Key must be positive!") + if not const: + self.readFirstFields(key+1) + if len(self._fields.values) <= key: + raise MissingField(self, key) + return self._fields.values[key] + return Field.getField(self, key, const) + + def _truncate(self, size): + assert size > 0 + if size < self._current_size: + self._size = size + while True: + field = self._fields.values[-1] + if field._address < size: + break + del self._fields[-1] + self._current_size = field._address + size -= field._address + if size < field._size: + if field.is_field_set: + field._truncate(size) + else: + del self._fields[-1] + field = createRawField(self, size, "raw[]") + self._fields.append(field._name, field) + self._current_size = self._size + else: + assert size < self._size or self._size is None + self._size = size + if self._size == self._current_size: + self._field_generator = None + + def _deleteField(self, index): + field = self._fields.values[index] + size = field.size + self._current_size -= size + del self._fields[index] + return field + + def _fixLastField(self): + """ + Try to fix last field when we know current field set size. + Returns new added field if any, or None. + """ + assert self._size is not None + + # Stop parser + message = ["stop parser"] + self._field_generator = None + + # If last field is too big, delete it + while self._size < self._current_size: + field = self._deleteField(len(self._fields)-1) + message.append("delete field %s" % field.path) + assert self._current_size <= self._size + + # If field size current is smaller: add a raw field + size = self._size - self._current_size + if size: + field = createRawField(self, size, "raw[]") + message.append("add padding") + self._current_size += field.size + self._fields.append(field._name, field) + else: + field = None + message = ", ".join(message) + self.warning("[Autofix] Fix parser error: " + message) + assert self._current_size == self._size + return field + + def _stopFeeding(self): + new_field = None + if self._size is None: + if self._parent: + self._size = self._current_size + elif self._size != self._current_size: + if self.autofix: + new_field = self._fixLastField() + else: + raise ParserError("Invalid parser \"%s\" size!" % self.path) + self._field_generator = None + return new_field + + def _fixFeedError(self, exception): + """ + Try to fix a feeding error. Returns False if error can't be fixed, + otherwise returns new field if any, or None. + """ + if self._size is None or not self.autofix: + return False + self.warning(unicode(exception)) + return self._fixLastField() + + def _feedUntil(self, field_name): + """ + Return the field if it was found, None else + """ + if self.__is_feeding \ + or (self._field_generator and self._field_generator.gi_running): + self.warning("Unable to get %s (and generator is already running)" + % field_name) + return None + try: + while True: + field = self._field_generator.next() + self._addField(field) + if field.name == field_name: + return field + except HACHOIR_ERRORS, err: + if self._fixFeedError(err) is False: + raise + except StopIteration: + self._stopFeeding() + return None + + def readMoreFields(self, number): + """ + Read more number fields, or do nothing if parsing is done. + + Returns number of new added fields. + """ + if self._field_generator is None: + return 0 + oldlen = len(self._fields) + try: + for index in xrange(number): + self._addField( self._field_generator.next() ) + except HACHOIR_ERRORS, err: + if self._fixFeedError(err) is False: + raise + except StopIteration: + self._stopFeeding() + return len(self._fields) - oldlen + + def _feedAll(self): + if self._field_generator is None: + return + try: + while True: + field = self._field_generator.next() + self._addField(field) + except HACHOIR_ERRORS, err: + if self._fixFeedError(err) is False: + raise + except StopIteration: + self._stopFeeding() + + def __iter__(self): + """ + Create a generator to iterate on each field, may create new + fields when needed + """ + try: + done = 0 + while True: + if done == len(self._fields): + if self._field_generator is None: + break + self._addField( self._field_generator.next() ) + for field in self._fields.values[done:]: + yield field + done += 1 + except HACHOIR_ERRORS, err: + field = self._fixFeedError(err) + if isinstance(field, Field): + yield field + elif hasattr(field, '__iter__'): + for f in field: + yield f + elif field is False: + raise + except StopIteration: + field = self._stopFeeding() + if isinstance(field, Field): + yield field + elif hasattr(field, '__iter__'): + for f in field: + yield f + + def _isDone(self): + return (self._field_generator is None) + done = property(_isDone, doc="Boolean to know if parsing is done or not") + + # + # FieldSet_SeekUtility + # + def seekBit(self, address, name="padding[]", + description=None, relative=True, null=False): + """ + Create a field to seek to specified address, + or None if it's not needed. + + May raise an (ParserError) exception if address is invalid. + """ + if relative: + nbits = address - self._current_size + else: + nbits = address - (self.absolute_address + self._current_size) + if nbits < 0: + raise ParserError("Seek error, unable to go back!") + if 0 < nbits: + if null: + return createNullField(self, nbits, name, description) + else: + return createPaddingField(self, nbits, name, description) + else: + return None + + def seekByte(self, address, name="padding[]", description=None, relative=True, null=False): + """ + Same as seekBit(), but with address in byte. + """ + return self.seekBit(address * 8, name, description, relative, null=null) + + # + # RandomAccessFieldSet + # + def replaceField(self, name, new_fields): + # TODO: Check in self and not self.field + # Problem is that "generator is already executing" + if name not in self._fields: + raise ParserError("Unable to replace %s: field doesn't exist!" % name) + assert 1 <= len(new_fields) + old_field = self[name] + total_size = sum( (field.size for field in new_fields) ) + if old_field.size != total_size: + raise ParserError("Unable to replace %s: " + "new field(s) hasn't same size (%u bits instead of %u bits)!" + % (name, total_size, old_field.size)) + field = new_fields[0] + if field._name.endswith("[]"): + self.setUniqueFieldName(field) + field._address = old_field.address + if field.name != name and field.name in self._fields: + raise ParserError( + "Unable to replace %s: name \"%s\" is already used!" + % (name, field.name)) + self._fields.replace(name, field.name, field) + self.raiseEvent("field-replaced", old_field, field) + if 1 < len(new_fields): + index = self._fields.index(new_fields[0].name)+1 + address = field.address + field.size + for field in new_fields[1:]: + if field._name.endswith("[]"): + self.setUniqueFieldName(field) + field._address = address + if field.name in self._fields: + raise ParserError( + "Unable to replace %s: name \"%s\" is already used!" + % (name, field.name)) + self._fields.insert(index, field.name, field) + self.raiseEvent("field-inserted", index, field) + index += 1 + address += field.size + + def getFieldByAddress(self, address, feed=True): + """ + Only search in existing fields + """ + if feed and self._field_generator is not None: + self._feedAll() + if address < self._current_size: + i = lowerBound(self._fields.values, lambda x: x.address + x.size <= address) + if i is not None: + return self._fields.values[i] + return None + + def writeFieldsIn(self, old_field, address, new_fields): + """ + Can only write in existing fields (address < self._current_size) + """ + + # Check size + total_size = sum( field.size for field in new_fields ) + if old_field.size < total_size: + raise ParserError( \ + "Unable to write fields at address %s " \ + "(too big)!" % (address)) + + # Need padding before? + replace = [] + size = address - old_field.address + assert 0 <= size + if 0 < size: + padding = createPaddingField(self, size) + padding._address = old_field.address + replace.append(padding) + + # Set fields address + for field in new_fields: + field._address = address + address += field.size + replace.append(field) + + # Need padding after? + size = (old_field.address + old_field.size) - address + assert 0 <= size + if 0 < size: + padding = createPaddingField(self, size) + padding._address = address + replace.append(padding) + + self.replaceField(old_field.name, replace) + + def nextFieldAddress(self): + return self._current_size + + def getFieldIndex(self, field): + return self._fields.index(field._name) + diff --git a/libs/hachoir_core/field/helper.py b/libs/hachoir_core/field/helper.py new file mode 100644 index 0000000..ba44f68 --- /dev/null +++ b/libs/hachoir_core/field/helper.py @@ -0,0 +1,57 @@ +from hachoir_core.field import (FieldError, + RawBits, RawBytes, + PaddingBits, PaddingBytes, + NullBits, NullBytes, + GenericString, GenericInteger) +from hachoir_core.stream import FileOutputStream + +def createRawField(parent, size, name="raw[]", description=None): + if size <= 0: + raise FieldError("Unable to create raw field of %s bits" % size) + if (size % 8) == 0: + return RawBytes(parent, name, size/8, description) + else: + return RawBits(parent, name, size, description) + +def createPaddingField(parent, nbits, name="padding[]", description=None): + if nbits <= 0: + raise FieldError("Unable to create padding of %s bits" % nbits) + if (nbits % 8) == 0: + return PaddingBytes(parent, name, nbits/8, description) + else: + return PaddingBits(parent, name, nbits, description) + +def createNullField(parent, nbits, name="padding[]", description=None): + if nbits <= 0: + raise FieldError("Unable to create null padding of %s bits" % nbits) + if (nbits % 8) == 0: + return NullBytes(parent, name, nbits/8, description) + else: + return NullBits(parent, name, nbits, description) + +def isString(field): + return issubclass(field.__class__, GenericString) + +def isInteger(field): + return issubclass(field.__class__, GenericInteger) + +def writeIntoFile(fieldset, filename): + output = FileOutputStream(filename) + fieldset.writeInto(output) + +def createOrphanField(fieldset, address, field_cls, *args, **kw): + """ + Create an orphan field at specified address: + field_cls(fieldset, *args, **kw) + + The field uses the fieldset properties but it isn't added to the + field set. + """ + save_size = fieldset._current_size + try: + fieldset._current_size = address + field = field_cls(fieldset, *args, **kw) + finally: + fieldset._current_size = save_size + return field + diff --git a/libs/hachoir_core/field/integer.py b/libs/hachoir_core/field/integer.py new file mode 100644 index 0000000..763f1d2 --- /dev/null +++ b/libs/hachoir_core/field/integer.py @@ -0,0 +1,44 @@ +""" +Integer field classes: +- UInt8, UInt16, UInt24, UInt32, UInt64: unsigned integer of 8, 16, 32, 64 bits ; +- Int8, Int16, Int24, Int32, Int64: signed integer of 8, 16, 32, 64 bits. +""" + +from hachoir_core.field import Bits, FieldError + +class GenericInteger(Bits): + """ + Generic integer class used to generate other classes. + """ + def __init__(self, parent, name, signed, size, description=None): + if not (8 <= size <= 256): + raise FieldError("Invalid integer size (%s): have to be in 8..256" % size) + Bits.__init__(self, parent, name, size, description) + self.signed = signed + + def createValue(self): + return self._parent.stream.readInteger( + self.absolute_address, self.signed, self._size, self._parent.endian) + +def integerFactory(name, is_signed, size, doc): + class Integer(GenericInteger): + __doc__ = doc + static_size = size + def __init__(self, parent, name, description=None): + GenericInteger.__init__(self, parent, name, is_signed, size, description) + cls = Integer + cls.__name__ = name + return cls + +UInt8 = integerFactory("UInt8", False, 8, "Unsigned integer of 8 bits") +UInt16 = integerFactory("UInt16", False, 16, "Unsigned integer of 16 bits") +UInt24 = integerFactory("UInt24", False, 24, "Unsigned integer of 24 bits") +UInt32 = integerFactory("UInt32", False, 32, "Unsigned integer of 32 bits") +UInt64 = integerFactory("UInt64", False, 64, "Unsigned integer of 64 bits") + +Int8 = integerFactory("Int8", True, 8, "Signed integer of 8 bits") +Int16 = integerFactory("Int16", True, 16, "Signed integer of 16 bits") +Int24 = integerFactory("Int24", True, 24, "Signed integer of 24 bits") +Int32 = integerFactory("Int32", True, 32, "Signed integer of 32 bits") +Int64 = integerFactory("Int64", True, 64, "Signed integer of 64 bits") + diff --git a/libs/hachoir_core/field/link.py b/libs/hachoir_core/field/link.py new file mode 100644 index 0000000..b331c3b --- /dev/null +++ b/libs/hachoir_core/field/link.py @@ -0,0 +1,109 @@ +from hachoir_core.field import Field, FieldSet, ParserError, Bytes, MissingField +from hachoir_core.stream import FragmentedStream + + +class Link(Field): + def __init__(self, parent, name, *args, **kw): + Field.__init__(self, parent, name, 0, *args, **kw) + + def hasValue(self): + return True + + def createValue(self): + return self._parent[self.display] + + def createDisplay(self): + value = self.value + if value is None: + return "<%s>" % MissingField.__name__ + return value.path + + def _getField(self, name, const): + target = self.value + assert self != target + return target._getField(name, const) + + +class Fragments: + def __init__(self, first): + self.first = first + + def __iter__(self): + fragment = self.first + while fragment is not None: + data = fragment.getData() + yield data and data.size + fragment = fragment.next + + +class Fragment(FieldSet): + _first = None + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._field_generator = self._createFields(self._field_generator) + if self.__class__.createFields == Fragment.createFields: + self._getData = lambda: self + + def getData(self): + try: + return self._getData() + except MissingField, e: + self.error(str(e)) + return None + + def setLinks(self, first, next=None): + self._first = first or self + self._next = next + self._feedLinks = lambda: self + return self + + def _feedLinks(self): + while self._first is None and self.readMoreFields(1): + pass + if self._first is None: + raise ParserError("first is None") + return self + first = property(lambda self: self._feedLinks()._first) + + def _getNext(self): + next = self._feedLinks()._next + if callable(next): + self._next = next = next() + return next + next = property(_getNext) + + def _createInputStream(self, **args): + first = self.first + if first is self and hasattr(first, "_getData"): + return FragmentedStream(first, packets=Fragments(first), **args) + return FieldSet._createInputStream(self, **args) + + def _createFields(self, field_generator): + if self._first is None: + for field in field_generator: + if self._first is not None: + break + yield field + else: + raise ParserError("Fragment.setLinks not called") + else: + field = None + if self._first is not self: + link = Link(self, "first", None) + link._getValue = lambda: self._first + yield link + if self._next: + link = Link(self, "next", None) + link.createValue = self._getNext + yield link + if field: + yield field + for field in field_generator: + yield field + + def createFields(self): + if self._size is None: + self._size = self._getSize() + yield Bytes(self, "data", self._size/8) + diff --git a/libs/hachoir_core/field/new_seekable_field_set.py b/libs/hachoir_core/field/new_seekable_field_set.py new file mode 100644 index 0000000..d145ab9 --- /dev/null +++ b/libs/hachoir_core/field/new_seekable_field_set.py @@ -0,0 +1,82 @@ +from hachoir_core.field import BasicFieldSet, GenericFieldSet, ParserError, createRawField +from hachoir_core.error import HACHOIR_ERRORS + +# getgaps(int, int, [listof (int, int)]) -> generator of (int, int) +# Gets all the gaps not covered by a block in `blocks` from `start` for `length` units. +def getgaps(start, length, blocks): + ''' + Example: + >>> list(getgaps(0, 20, [(15,3), (6,2), (6,2), (1,2), (2,3), (11,2), (9,5)])) + [(0, 1), (5, 1), (8, 1), (14, 1), (18, 2)] + ''' + # done this way to avoid mutating the original + blocks = sorted(blocks, key=lambda b: b[0]) + end = start+length + for s, l in blocks: + if s > start: + yield (start, s-start) + start = s + if s+l > start: + start = s+l + if start < end: + yield (start, end-start) + +class NewRootSeekableFieldSet(GenericFieldSet): + def seekBit(self, address, relative=True): + if not relative: + address -= self.absolute_address + if address < 0: + raise ParserError("Seek below field set start (%s.%s)" % divmod(address, 8)) + self._current_size = address + return None + + def seekByte(self, address, relative=True): + return self.seekBit(address*8, relative) + + def _fixLastField(self): + """ + Try to fix last field when we know current field set size. + Returns new added field if any, or None. + """ + assert self._size is not None + + # Stop parser + message = ["stop parser"] + self._field_generator = None + + # If last field is too big, delete it + while self._size < self._current_size: + field = self._deleteField(len(self._fields)-1) + message.append("delete field %s" % field.path) + assert self._current_size <= self._size + + blocks = [(x.absolute_address, x.size) for x in self._fields] + fields = [] + for start, length in getgaps(self.absolute_address, self._size, blocks): + self.seekBit(start, relative=False) + field = createRawField(self, length, "unparsed[]") + self.setUniqueFieldName(field) + self._fields.append(field.name, field) + fields.append(field) + message.append("found unparsed segment: start %s, length %s" % (start, length)) + + self.seekBit(self._size, relative=False) + message = ", ".join(message) + if fields: + self.warning("[Autofix] Fix parser error: " + message) + return fields + + def _stopFeeding(self): + new_field = None + if self._size is None: + if self._parent: + self._size = self._current_size + + new_field = self._fixLastField() + self._field_generator = None + return new_field + +class NewSeekableFieldSet(NewRootSeekableFieldSet): + def __init__(self, parent, name, description=None, size=None): + assert issubclass(parent.__class__, BasicFieldSet) + NewRootSeekableFieldSet.__init__(self, parent, name, parent.stream, description, size) diff --git a/libs/hachoir_core/field/padding.py b/libs/hachoir_core/field/padding.py new file mode 100644 index 0000000..c1c4b8c --- /dev/null +++ b/libs/hachoir_core/field/padding.py @@ -0,0 +1,138 @@ +from hachoir_core.field import Bits, Bytes +from hachoir_core.tools import makePrintable, humanFilesize +from hachoir_core import config + +class PaddingBits(Bits): + """ + Padding bits used, for example, to align address (of next field). + See also NullBits and PaddingBytes types. + + Arguments: + * nbits: Size of the field in bits + + Optional arguments: + * pattern (int): Content pattern, eg. 0 if all bits are set to 0 + """ + static_size = staticmethod(lambda *args, **kw: args[1]) + MAX_SIZE = 128 + + def __init__(self, parent, name, nbits, description="Padding", pattern=None): + Bits.__init__(self, parent, name, nbits, description) + self.pattern = pattern + self._display_pattern = self.checkPattern() + + def checkPattern(self): + if not(config.check_padding_pattern): + return False + if self.pattern != 0: + return False + + if self.MAX_SIZE < self._size: + value = self._parent.stream.readBits( + self.absolute_address, self.MAX_SIZE, self._parent.endian) + else: + value = self.value + if value != 0: + self.warning("padding contents doesn't look normal (invalid pattern)") + return False + if self.MAX_SIZE < self._size: + self.info("only check first %u bits" % self.MAX_SIZE) + return True + + def createDisplay(self): + if self._display_pattern: + return u"" % self.pattern + else: + return Bits.createDisplay(self) + +class PaddingBytes(Bytes): + """ + Padding bytes used, for example, to align address (of next field). + See also NullBytes and PaddingBits types. + + Arguments: + * nbytes: Size of the field in bytes + + Optional arguments: + * pattern (str): Content pattern, eg. "\0" for nul bytes + """ + + static_size = staticmethod(lambda *args, **kw: args[1]*8) + MAX_SIZE = 4096 + + def __init__(self, parent, name, nbytes, + description="Padding", pattern=None): + """ pattern is None or repeated string """ + assert (pattern is None) or (isinstance(pattern, str)) + Bytes.__init__(self, parent, name, nbytes, description) + self.pattern = pattern + self._display_pattern = self.checkPattern() + + def checkPattern(self): + if not(config.check_padding_pattern): + return False + if self.pattern is None: + return False + + if self.MAX_SIZE < self._size/8: + self.info("only check first %s of padding" % humanFilesize(self.MAX_SIZE)) + content = self._parent.stream.readBytes( + self.absolute_address, self.MAX_SIZE) + else: + content = self.value + index = 0 + pattern_len = len(self.pattern) + while index < len(content): + if content[index:index+pattern_len] != self.pattern: + self.warning( + "padding contents doesn't look normal" + " (invalid pattern at byte %u)!" + % index) + return False + index += pattern_len + return True + + def createDisplay(self): + if self._display_pattern: + return u"" % makePrintable(self.pattern, "ASCII", quote="'") + else: + return Bytes.createDisplay(self) + + def createRawDisplay(self): + return Bytes.createDisplay(self) + +class NullBits(PaddingBits): + """ + Null padding bits used, for example, to align address (of next field). + See also PaddingBits and NullBytes types. + + Arguments: + * nbits: Size of the field in bits + """ + + def __init__(self, parent, name, nbits, description=None): + PaddingBits.__init__(self, parent, name, nbits, description, pattern=0) + + def createDisplay(self): + if self._display_pattern: + return "" + else: + return Bits.createDisplay(self) + +class NullBytes(PaddingBytes): + """ + Null padding bytes used, for example, to align address (of next field). + See also PaddingBytes and NullBits types. + + Arguments: + * nbytes: Size of the field in bytes + """ + def __init__(self, parent, name, nbytes, description=None): + PaddingBytes.__init__(self, parent, name, nbytes, description, pattern="\0") + + def createDisplay(self): + if self._display_pattern: + return "" + else: + return Bytes.createDisplay(self) + diff --git a/libs/hachoir_core/field/parser.py b/libs/hachoir_core/field/parser.py new file mode 100644 index 0000000..e294e02 --- /dev/null +++ b/libs/hachoir_core/field/parser.py @@ -0,0 +1,40 @@ +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.field import GenericFieldSet +from hachoir_core.log import Logger +import hachoir_core.config as config + +class Parser(GenericFieldSet): + """ + A parser is the root of all other fields. It create first level of fields + and have special attributes and methods: + - endian: Byte order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}) of input data ; + - stream: Data input stream (set in L{__init__()}) ; + - size: Field set size will be size of input stream. + """ + + def __init__(self, stream, description=None): + """ + Parser constructor + + @param stream: Data input stream (see L{InputStream}) + @param description: (optional) String description + """ + # Check arguments + assert hasattr(self, "endian") \ + and self.endian in (BIG_ENDIAN, LITTLE_ENDIAN) + + # Call parent constructor + GenericFieldSet.__init__(self, None, "root", stream, description, stream.askSize(self)) + + def _logger(self): + return Logger._logger(self) + + def _setSize(self, size): + self._truncate(size) + self.raiseEvent("field-resized", self) + size = property(lambda self: self._size, doc="Size in bits") + + path = property(lambda self: "/") + + # dummy definition to prevent hachoir-core from depending on hachoir-parser + autofix = property(lambda self: config.autofix) diff --git a/libs/hachoir_core/field/seekable_field_set.py b/libs/hachoir_core/field/seekable_field_set.py new file mode 100644 index 0000000..9bc3fbb --- /dev/null +++ b/libs/hachoir_core/field/seekable_field_set.py @@ -0,0 +1,182 @@ +from hachoir_core.field import Field, BasicFieldSet, FakeArray, MissingField, ParserError +from hachoir_core.tools import makeUnicode +from hachoir_core.error import HACHOIR_ERRORS +from itertools import repeat +import hachoir_core.config as config + +class RootSeekableFieldSet(BasicFieldSet): + def __init__(self, parent, name, stream, description, size): + BasicFieldSet.__init__(self, parent, name, stream, description, size) + self._generator = self.createFields() + self._offset = 0 + self._current_size = 0 + if size: + self._current_max_size = size + else: + self._current_max_size = 0 + self._field_dict = {} + self._field_array = [] + + def _feedOne(self): + assert self._generator + field = self._generator.next() + self._addField(field) + return field + + def array(self, key): + return FakeArray(self, key) + + def getFieldByAddress(self, address, feed=True): + for field in self._field_array: + if field.address <= address < field.address + field.size: + return field + for field in self._readFields(): + if field.address <= address < field.address + field.size: + return field + return None + + def _stopFeed(self): + self._size = self._current_max_size + self._generator = None + done = property(lambda self: not bool(self._generator)) + + def _getSize(self): + if self._size is None: + self._feedAll() + return self._size + size = property(_getSize) + + def _getField(self, key, const): + field = Field._getField(self, key, const) + if field is not None: + return field + if key in self._field_dict: + return self._field_dict[key] + if self._generator and not const: + try: + while True: + field = self._feedOne() + if field.name == key: + return field + except StopIteration: + self._stopFeed() + except HACHOIR_ERRORS, err: + self.error("Error: %s" % makeUnicode(err)) + self._stopFeed() + return None + + def getField(self, key, const=True): + if isinstance(key, (int, long)): + if key < 0: + raise KeyError("Key must be positive!") + if not const: + self.readFirstFields(key+1) + if len(self._field_array) <= key: + raise MissingField(self, key) + return self._field_array[key] + return Field.getField(self, key, const) + + def _addField(self, field): + if field._name.endswith("[]"): + self.setUniqueFieldName(field) + if config.debug: + self.info("[+] DBG: _addField(%s)" % field.name) + + if field._address != self._offset: + self.warning("Set field %s address to %s (was %s)" % ( + field.path, self._offset//8, field._address//8)) + field._address = self._offset + assert field.name not in self._field_dict + + self._checkFieldSize(field) + + self._field_dict[field.name] = field + self._field_array.append(field) + self._current_size += field.size + self._offset += field.size + self._current_max_size = max(self._current_max_size, field.address + field.size) + + def _checkAddress(self, address): + if self._size is not None: + max_addr = self._size + else: + # FIXME: Use parent size + max_addr = self.stream.size + return address < max_addr + + def _checkFieldSize(self, field): + size = field.size + addr = field.address + if not self._checkAddress(addr+size-1): + raise ParserError("Unable to add %s: field is too large" % field.name) + + def seekBit(self, address, relative=True): + if not relative: + address -= self.absolute_address + if address < 0: + raise ParserError("Seek below field set start (%s.%s)" % divmod(address, 8)) + if not self._checkAddress(address): + raise ParserError("Seek above field set end (%s.%s)" % divmod(address, 8)) + self._offset = address + return None + + def seekByte(self, address, relative=True): + return self.seekBit(address*8, relative) + + def readMoreFields(self, number): + return self._readMoreFields(xrange(number)) + + def _feedAll(self): + return self._readMoreFields(repeat(1)) + + def _readFields(self): + while True: + added = self._readMoreFields(xrange(1)) + if not added: + break + yield self._field_array[-1] + + def _readMoreFields(self, index_generator): + added = 0 + if self._generator: + try: + for index in index_generator: + self._feedOne() + added += 1 + except StopIteration: + self._stopFeed() + except HACHOIR_ERRORS, err: + self.error("Error: %s" % makeUnicode(err)) + self._stopFeed() + return added + + current_length = property(lambda self: len(self._field_array)) + current_size = property(lambda self: self._offset) + + def __iter__(self): + for field in self._field_array: + yield field + if self._generator: + try: + while True: + yield self._feedOne() + except StopIteration: + self._stopFeed() + raise StopIteration + + def __len__(self): + if self._generator: + self._feedAll() + return len(self._field_array) + + def nextFieldAddress(self): + return self._offset + + def getFieldIndex(self, field): + return self._field_array.index(field) + +class SeekableFieldSet(RootSeekableFieldSet): + def __init__(self, parent, name, description=None, size=None): + assert issubclass(parent.__class__, BasicFieldSet) + RootSeekableFieldSet.__init__(self, parent, name, parent.stream, description, size) + diff --git a/libs/hachoir_core/field/static_field_set.py b/libs/hachoir_core/field/static_field_set.py new file mode 100644 index 0000000..e3897b3 --- /dev/null +++ b/libs/hachoir_core/field/static_field_set.py @@ -0,0 +1,54 @@ +from hachoir_core.field import FieldSet, ParserError + +class StaticFieldSet(FieldSet): + """ + Static field set: format class attribute is a tuple of all fields + in syntax like: + format = ( + (TYPE1, ARG1, ARG2, ...), + (TYPE2, ARG1, ARG2, ..., {KEY1=VALUE1, ...}), + ... + ) + + Types with dynamic size are forbidden, eg. CString, PascalString8, etc. + """ + format = None # You have to redefine this class variable + _class = None + + def __new__(cls, *args, **kw): + assert cls.format is not None, "Class attribute 'format' is not set" + if cls._class is not cls.__name__: + cls._class = cls.__name__ + cls.static_size = cls._computeStaticSize() + return object.__new__(cls, *args, **kw) + + @staticmethod + def _computeItemSize(item): + item_class = item[0] + if item_class.static_size is None: + raise ParserError("Unable to get static size of field type: %s" + % item_class.__name__) + if callable(item_class.static_size): + if isinstance(item[-1], dict): + return item_class.static_size(*item[1:-1], **item[-1]) + else: + return item_class.static_size(*item[1:]) + else: + assert isinstance(item_class.static_size, (int, long)) + return item_class.static_size + + def createFields(self): + for item in self.format: + if isinstance(item[-1], dict): + yield item[0](self, *item[1:-1], **item[-1]) + else: + yield item[0](self, *item[1:]) + + @classmethod + def _computeStaticSize(cls, *args): + return sum(cls._computeItemSize(item) for item in cls.format) + + # Initial value of static_size, it changes when first instance + # is created (see __new__) + static_size = _computeStaticSize + diff --git a/libs/hachoir_core/field/string_field.py b/libs/hachoir_core/field/string_field.py new file mode 100644 index 0000000..e44e24d --- /dev/null +++ b/libs/hachoir_core/field/string_field.py @@ -0,0 +1,402 @@ +""" +String field classes: +- String: Fixed length string (no prefix/no suffix) ; +- CString: String which ends with nul byte ("\0") ; +- UnixLine: Unix line of text, string which ends with "\n" ; +- PascalString8, PascalString16, PascalString32: String prefixed with + length written in a 8, 16, 32-bit integer (use parent endian). + +Constructor has optional arguments: +- strip: value can be a string or True ; +- charset: if set, convert string to unicode using this charset (in "replace" + mode which replace all buggy characters with "."). + +Note: For PascalStringXX, prefixed value is the number of bytes and not + of characters! +""" + +from hachoir_core.field import FieldError, Bytes +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.tools import alignValue, makePrintable +from hachoir_core.i18n import guessBytesCharset, _ +from hachoir_core import config +from codecs import BOM_UTF16_LE, BOM_UTF16_BE, BOM_UTF32_LE, BOM_UTF32_BE + +# Default charset used to convert byte string to Unicode +# This charset is used if no charset is specified or on conversion error +FALLBACK_CHARSET = "ISO-8859-1" + +class GenericString(Bytes): + """ + Generic string class. + + charset have to be in CHARSET_8BIT or in UTF_CHARSET. + """ + + VALID_FORMATS = ("C", "UnixLine", + "fixed", "Pascal8", "Pascal16", "Pascal32") + + # 8-bit charsets + CHARSET_8BIT = set(( + "ASCII", # ANSI X3.4-1968 + "MacRoman", + "CP037", # EBCDIC 037 + "CP874", # Thai + "WINDOWS-1250", # Central Europe + "WINDOWS-1251", # Cyrillic + "WINDOWS-1252", # Latin I + "WINDOWS-1253", # Greek + "WINDOWS-1254", # Turkish + "WINDOWS-1255", # Hebrew + "WINDOWS-1256", # Arabic + "WINDOWS-1257", # Baltic + "WINDOWS-1258", # Vietnam + "ISO-8859-1", # Latin-1 + "ISO-8859-2", # Latin-2 + "ISO-8859-3", # Latin-3 + "ISO-8859-4", # Latin-4 + "ISO-8859-5", + "ISO-8859-6", + "ISO-8859-7", + "ISO-8859-8", + "ISO-8859-9", # Latin-5 + "ISO-8859-10", # Latin-6 + "ISO-8859-11", # Thai + "ISO-8859-13", # Latin-7 + "ISO-8859-14", # Latin-8 + "ISO-8859-15", # Latin-9 or ("Latin-0") + "ISO-8859-16", # Latin-10 + )) + + # UTF-xx charset familly + UTF_CHARSET = { + "UTF-8": (8, None), + "UTF-16-LE": (16, LITTLE_ENDIAN), + "UTF-32LE": (32, LITTLE_ENDIAN), + "UTF-16-BE": (16, BIG_ENDIAN), + "UTF-32BE": (32, BIG_ENDIAN), + "UTF-16": (16, "BOM"), + "UTF-32": (32, "BOM"), + } + + # UTF-xx BOM => charset with endian + UTF_BOM = { + 16: {BOM_UTF16_LE: "UTF-16-LE", BOM_UTF16_BE: "UTF-16-BE"}, + 32: {BOM_UTF32_LE: "UTF-32LE", BOM_UTF32_BE: "UTF-32BE"}, + } + + # Suffix format: value is suffix (string) + SUFFIX_FORMAT = { + "C": { + 8: {LITTLE_ENDIAN: "\0", BIG_ENDIAN: "\0"}, + 16: {LITTLE_ENDIAN: "\0\0", BIG_ENDIAN: "\0\0"}, + 32: {LITTLE_ENDIAN: "\0\0\0\0", BIG_ENDIAN: "\0\0\0\0"}, + }, + "UnixLine": { + 8: {LITTLE_ENDIAN: "\n", BIG_ENDIAN: "\n"}, + 16: {LITTLE_ENDIAN: "\n\0", BIG_ENDIAN: "\0\n"}, + 32: {LITTLE_ENDIAN: "\n\0\0\0", BIG_ENDIAN: "\0\0\0\n"}, + }, + + } + + # Pascal format: value is the size of the prefix in bits + PASCAL_FORMATS = { + "Pascal8": 1, + "Pascal16": 2, + "Pascal32": 4 + } + + # Raw value: with prefix and suffix, not stripped, + # and not converted to Unicode + _raw_value = None + + def __init__(self, parent, name, format, description=None, + strip=None, charset=None, nbytes=None, truncate=None): + Bytes.__init__(self, parent, name, 1, description) + + # Is format valid? + assert format in self.VALID_FORMATS + + # Store options + self._format = format + self._strip = strip + self._truncate = truncate + + # Check charset and compute character size in bytes + # (or None when it's not possible to guess character size) + if not charset or charset in self.CHARSET_8BIT: + self._character_size = 1 # one byte per character + elif charset in self.UTF_CHARSET: + self._character_size = None + else: + raise FieldError("Invalid charset for %s: \"%s\"" % + (self.path, charset)) + self._charset = charset + + # It is a fixed string? + if nbytes is not None: + assert self._format == "fixed" + # Arbitrary limits, just to catch some bugs... + if not (1 <= nbytes <= 0xffff): + raise FieldError("Invalid string size for %s: %s" % + (self.path, nbytes)) + self._content_size = nbytes # content length in bytes + self._size = nbytes * 8 + self._content_offset = 0 + else: + # Format with a suffix: Find the end of the string + if self._format in self.SUFFIX_FORMAT: + self._content_offset = 0 + + # Choose the suffix + suffix = self.suffix_str + + # Find the suffix + length = self._parent.stream.searchBytesLength( + suffix, False, self.absolute_address) + if length is None: + raise FieldError("Unable to find end of string %s (format %s)!" + % (self.path, self._format)) + if 1 < len(suffix): + # Fix length for little endian bug with UTF-xx charset: + # u"abc" -> "a\0b\0c\0\0\0" (UTF-16-LE) + # search returns length=5, whereas real lenght is 6 + length = alignValue(length, len(suffix)) + + # Compute sizes + self._content_size = length # in bytes + self._size = (length + len(suffix)) * 8 + + # Format with a prefix: Read prefixed length in bytes + else: + assert self._format in self.PASCAL_FORMATS + + # Get the prefix size + prefix_size = self.PASCAL_FORMATS[self._format] + self._content_offset = prefix_size + + # Read the prefix and compute sizes + value = self._parent.stream.readBits( + self.absolute_address, prefix_size*8, self._parent.endian) + self._content_size = value # in bytes + self._size = (prefix_size + value) * 8 + + # For UTF-16 and UTF-32, choose the right charset using BOM + if self._charset in self.UTF_CHARSET: + # Charset requires a BOM? + bomsize, endian = self.UTF_CHARSET[self._charset] + if endian == "BOM": + # Read the BOM value + nbytes = bomsize // 8 + bom = self._parent.stream.readBytes(self.absolute_address, nbytes) + + # Choose right charset using the BOM + bom_endian = self.UTF_BOM[bomsize] + if bom not in bom_endian: + raise FieldError("String %s has invalid BOM (%s)!" + % (self.path, repr(bom))) + self._charset = bom_endian[bom] + self._content_size -= nbytes + self._content_offset += nbytes + + # Compute length in character if possible + if self._character_size: + self._length = self._content_size // self._character_size + else: + self._length = None + + @staticmethod + def staticSuffixStr(format, charset, endian): + if format not in GenericString.SUFFIX_FORMAT: + return '' + suffix = GenericString.SUFFIX_FORMAT[format] + if charset in GenericString.UTF_CHARSET: + suffix_size = GenericString.UTF_CHARSET[charset][0] + suffix = suffix[suffix_size] + else: + suffix = suffix[8] + return suffix[endian] + + def _getSuffixStr(self): + return self.staticSuffixStr( + self._format, self._charset, self._parent.endian) + suffix_str = property(_getSuffixStr) + + def _convertText(self, text): + if not self._charset: + # charset is still unknown: guess the charset + self._charset = guessBytesCharset(text, default=FALLBACK_CHARSET) + + # Try to convert to Unicode + try: + return unicode(text, self._charset, "strict") + except UnicodeDecodeError, err: + pass + + #--- Conversion error --- + + # Fix truncated UTF-16 string like 'B\0e' (3 bytes) + # => Add missing nul byte: 'B\0e\0' (4 bytes) + if err.reason == "truncated data" \ + and err.end == len(text) \ + and self._charset == "UTF-16-LE": + try: + text = unicode(text+"\0", self._charset, "strict") + self.warning("Fix truncated %s string: add missing nul byte" % self._charset) + return text + except UnicodeDecodeError, err: + pass + + # On error, use FALLBACK_CHARSET + self.warning(u"Unable to convert string to Unicode: %s" % err) + return unicode(text, FALLBACK_CHARSET, "strict") + + def _guessCharset(self): + addr = self.absolute_address + self._content_offset * 8 + bytes = self._parent.stream.readBytes(addr, self._content_size) + return guessBytesCharset(bytes, default=FALLBACK_CHARSET) + + def createValue(self, human=True): + # Compress data address (in bits) and size (in bytes) + if human: + addr = self.absolute_address + self._content_offset * 8 + size = self._content_size + else: + addr = self.absolute_address + size = self._size // 8 + if size == 0: + # Empty string + return u"" + + # Read bytes in data stream + text = self._parent.stream.readBytes(addr, size) + + # Don't transform data? + if not human: + return text + + # Convert text to Unicode + text = self._convertText(text) + + # Truncate + if self._truncate: + pos = text.find(self._truncate) + if 0 <= pos: + text = text[:pos] + + # Strip string if needed + if self._strip: + if isinstance(self._strip, (str, unicode)): + text = text.strip(self._strip) + else: + text = text.strip() + assert isinstance(text, unicode) + return text + + def createDisplay(self, human=True): + if not human: + if self._raw_value is None: + self._raw_value = GenericString.createValue(self, False) + value = makePrintable(self._raw_value, "ASCII", to_unicode=True) + elif self._charset: + value = makePrintable(self.value, "ISO-8859-1", to_unicode=True) + else: + value = self.value + if config.max_string_length < len(value): + # Truncate string if needed + value = "%s(...)" % value[:config.max_string_length] + if not self._charset or not human: + return makePrintable(value, "ASCII", quote='"', to_unicode=True) + else: + if value: + return '"%s"' % value.replace('"', '\\"') + else: + return _("(empty)") + + def createRawDisplay(self): + return GenericString.createDisplay(self, human=False) + + def _getLength(self): + if self._length is None: + self._length = len(self.value) + return self._length + length = property(_getLength, doc="String length in characters") + + def _getFormat(self): + return self._format + format = property(_getFormat, doc="String format (eg. 'C')") + + def _getCharset(self): + if not self._charset: + self._charset = self._guessCharset() + return self._charset + charset = property(_getCharset, doc="String charset (eg. 'ISO-8859-1')") + + def _getContentSize(self): + return self._content_size + content_size = property(_getContentSize, doc="Content size in bytes") + + def _getContentOffset(self): + return self._content_offset + content_offset = property(_getContentOffset, doc="Content offset in bytes") + + def getFieldType(self): + info = self.charset + if self._strip: + if isinstance(self._strip, (str, unicode)): + info += ",strip=%s" % makePrintable(self._strip, "ASCII", quote="'") + else: + info += ",strip=True" + return "%s<%s>" % (Bytes.getFieldType(self), info) + +def stringFactory(name, format, doc): + class NewString(GenericString): + __doc__ = doc + def __init__(self, parent, name, description=None, + strip=None, charset=None, truncate=None): + GenericString.__init__(self, parent, name, format, description, + strip=strip, charset=charset, truncate=truncate) + cls = NewString + cls.__name__ = name + return cls + +# String which ends with nul byte ("\0") +CString = stringFactory("CString", "C", + r"""C string: string ending with nul byte. +See GenericString to get more information.""") + +# Unix line of text: string which ends with "\n" (ASCII 0x0A) +UnixLine = stringFactory("UnixLine", "UnixLine", + r"""Unix line: string ending with "\n" (ASCII code 10). +See GenericString to get more information.""") + +# String prefixed with length written in a 8-bit integer +PascalString8 = stringFactory("PascalString8", "Pascal8", + r"""Pascal string: string prefixed with 8-bit integer containing its length (endian depends on parent endian). +See GenericString to get more information.""") + +# String prefixed with length written in a 16-bit integer (use parent endian) +PascalString16 = stringFactory("PascalString16", "Pascal16", + r"""Pascal string: string prefixed with 16-bit integer containing its length (endian depends on parent endian). +See GenericString to get more information.""") + +# String prefixed with length written in a 32-bit integer (use parent endian) +PascalString32 = stringFactory("PascalString32", "Pascal32", + r"""Pascal string: string prefixed with 32-bit integer containing its length (endian depends on parent endian). +See GenericString to get more information.""") + + +class String(GenericString): + """ + String with fixed size (size in bytes). + See GenericString to get more information. + """ + static_size = staticmethod(lambda *args, **kw: args[1]*8) + + def __init__(self, parent, name, nbytes, description=None, + strip=None, charset=None, truncate=None): + GenericString.__init__(self, parent, name, "fixed", description, + strip=strip, charset=charset, nbytes=nbytes, truncate=truncate) +String.__name__ = "FixedString" + diff --git a/libs/hachoir_core/field/sub_file.py b/libs/hachoir_core/field/sub_file.py new file mode 100644 index 0000000..0f2912d --- /dev/null +++ b/libs/hachoir_core/field/sub_file.py @@ -0,0 +1,72 @@ +from hachoir_core.field import Bytes +from hachoir_core.tools import makePrintable, humanFilesize +from hachoir_core.stream import InputIOStream + +class SubFile(Bytes): + """ + File stored in another file + """ + def __init__(self, parent, name, length, description=None, + parser=None, filename=None, mime_type=None, parser_class=None): + if filename: + if not isinstance(filename, unicode): + filename = makePrintable(filename, "ISO-8859-1") + if not description: + description = 'File "%s" (%s)' % (filename, humanFilesize(length)) + Bytes.__init__(self, parent, name, length, description) + def createInputStream(cis, **args): + tags = args.setdefault("tags",[]) + if parser_class: + tags.append(( "class", parser_class )) + if parser is not None: + tags.append(( "id", parser.PARSER_TAGS["id"] )) + if mime_type: + tags.append(( "mime", mime_type )) + if filename: + tags.append(( "filename", filename )) + return cis(**args) + self.setSubIStream(createInputStream) + +class CompressedStream: + offset = 0 + + def __init__(self, stream, decompressor): + self.stream = stream + self.decompressor = decompressor(stream) + self._buffer = '' + + def read(self, size): + d = self._buffer + data = [ d[:size] ] + size -= len(d) + if size > 0: + d = self.decompressor(size) + data.append(d[:size]) + size -= len(d) + while size > 0: + n = 4096 + if self.stream.size: + n = min(self.stream.size - self.offset, n) + if not n: + break + d = self.stream.read(self.offset, n)[1] + self.offset += 8 * len(d) + d = self.decompressor(size, d) + data.append(d[:size]) + size -= len(d) + self._buffer = d[size+len(d):] + return ''.join(data) + +def CompressedField(field, decompressor): + def createInputStream(cis, source=None, **args): + if field._parent: + stream = cis(source=source) + args.setdefault("tags", []).extend(stream.tags) + else: + stream = field.stream + input = CompressedStream(stream, decompressor) + if source is None: + source = "Compressed source: '%s' (offset=%s)" % (stream.source, field.absolute_address) + return InputIOStream(input, source=source, **args) + field.setSubIStream(createInputStream) + return field diff --git a/libs/hachoir_core/field/timestamp.py b/libs/hachoir_core/field/timestamp.py new file mode 100644 index 0000000..a533a4b --- /dev/null +++ b/libs/hachoir_core/field/timestamp.py @@ -0,0 +1,86 @@ +from hachoir_core.tools import (humanDatetime, humanDuration, + timestampUNIX, timestampMac32, timestampUUID60, + timestampWin64, durationWin64) +from hachoir_core.field import Bits, FieldSet +from datetime import datetime + +class GenericTimestamp(Bits): + def __init__(self, parent, name, size, description=None): + Bits.__init__(self, parent, name, size, description) + + def createDisplay(self): + return humanDatetime(self.value) + + def createRawDisplay(self): + value = Bits.createValue(self) + return unicode(value) + + def __nonzero__(self): + return Bits.createValue(self) != 0 + +def timestampFactory(cls_name, handler, size): + class Timestamp(GenericTimestamp): + def __init__(self, parent, name, description=None): + GenericTimestamp.__init__(self, parent, name, size, description) + + def createValue(self): + value = Bits.createValue(self) + return handler(value) + cls = Timestamp + cls.__name__ = cls_name + return cls + +TimestampUnix32 = timestampFactory("TimestampUnix32", timestampUNIX, 32) +TimestampUnix64 = timestampFactory("TimestampUnix64", timestampUNIX, 64) +TimestampMac32 = timestampFactory("TimestampUnix32", timestampMac32, 32) +TimestampUUID60 = timestampFactory("TimestampUUID60", timestampUUID60, 60) +TimestampWin64 = timestampFactory("TimestampWin64", timestampWin64, 64) + +class TimeDateMSDOS32(FieldSet): + """ + 32-bit MS-DOS timestamp (16-bit time, 16-bit date) + """ + static_size = 32 + + def createFields(self): + # TODO: Create type "MSDOS_Second" : value*2 + yield Bits(self, "second", 5, "Second/2") + yield Bits(self, "minute", 6) + yield Bits(self, "hour", 5) + + yield Bits(self, "day", 5) + yield Bits(self, "month", 4) + # TODO: Create type "MSDOS_Year" : value+1980 + yield Bits(self, "year", 7, "Number of year after 1980") + + def createValue(self): + return datetime( + 1980+self["year"].value, self["month"].value, self["day"].value, + self["hour"].value, self["minute"].value, 2*self["second"].value) + + def createDisplay(self): + return humanDatetime(self.value) + +class DateTimeMSDOS32(TimeDateMSDOS32): + """ + 32-bit MS-DOS timestamp (16-bit date, 16-bit time) + """ + def createFields(self): + yield Bits(self, "day", 5) + yield Bits(self, "month", 4) + yield Bits(self, "year", 7, "Number of year after 1980") + yield Bits(self, "second", 5, "Second/2") + yield Bits(self, "minute", 6) + yield Bits(self, "hour", 5) + +class TimedeltaWin64(GenericTimestamp): + def __init__(self, parent, name, description=None): + GenericTimestamp.__init__(self, parent, name, 64, description) + + def createDisplay(self): + return humanDuration(self.value) + + def createValue(self): + value = Bits.createValue(self) + return durationWin64(value) + diff --git a/libs/hachoir_core/field/vector.py b/libs/hachoir_core/field/vector.py new file mode 100644 index 0000000..953fdbc --- /dev/null +++ b/libs/hachoir_core/field/vector.py @@ -0,0 +1,38 @@ +from hachoir_core.field import Field, FieldSet, ParserError + +class GenericVector(FieldSet): + def __init__(self, parent, name, nb_items, item_class, item_name="item", description=None): + # Sanity checks + assert issubclass(item_class, Field) + assert isinstance(item_class.static_size, (int, long)) + if not(0 < nb_items): + raise ParserError('Unable to create empty vector "%s" in %s' \ + % (name, parent.path)) + size = nb_items * item_class.static_size + self.__nb_items = nb_items + self._item_class = item_class + self._item_name = item_name + FieldSet.__init__(self, parent, name, description, size=size) + + def __len__(self): + return self.__nb_items + + def createFields(self): + name = self._item_name + "[]" + parser = self._item_class + for index in xrange(len(self)): + yield parser(self, name) + +class UserVector(GenericVector): + """ + To implement: + - item_name: name of a field without [] (eg. "color" becomes "color[0]"), + default value is "item" + - item_class: class of an item + """ + item_class = None + item_name = "item" + + def __init__(self, parent, name, nb_items, description=None): + GenericVector.__init__(self, parent, name, nb_items, self.item_class, self.item_name, description) + diff --git a/libs/hachoir_core/i18n.py b/libs/hachoir_core/i18n.py new file mode 100644 index 0000000..b34c748 --- /dev/null +++ b/libs/hachoir_core/i18n.py @@ -0,0 +1,214 @@ +# -*- coding: UTF-8 -*- +""" +Functions to manage internationalisation (i18n): +- initLocale(): setup locales and install Unicode compatible stdout and + stderr ; +- getTerminalCharset(): guess terminal charset ; +- gettext(text) translate a string to current language. The function always + returns Unicode string. You can also use the alias: _() ; +- ngettext(singular, plural, count): translate a sentence with singular and + plural form. The function always returns Unicode string. + +WARNING: Loading this module indirectly calls initLocale() which sets + locale LC_ALL to ''. This is needed to get user preferred locale + settings. +""" + +import hachoir_core.config as config +import hachoir_core +import locale +from os import path +import sys +from codecs import BOM_UTF8, BOM_UTF16_LE, BOM_UTF16_BE + +def _getTerminalCharset(): + """ + Function used by getTerminalCharset() to get terminal charset. + + @see getTerminalCharset() + """ + # (1) Try locale.getpreferredencoding() + try: + charset = locale.getpreferredencoding() + if charset: + return charset + except (locale.Error, AttributeError): + pass + + # (2) Try locale.nl_langinfo(CODESET) + try: + charset = locale.nl_langinfo(locale.CODESET) + if charset: + return charset + except (locale.Error, AttributeError): + pass + + # (3) Try sys.stdout.encoding + if hasattr(sys.stdout, "encoding") and sys.stdout.encoding: + return sys.stdout.encoding + + # (4) Otherwise, returns "ASCII" + return "ASCII" + +def getTerminalCharset(): + """ + Guess terminal charset using differents tests: + 1. Try locale.getpreferredencoding() + 2. Try locale.nl_langinfo(CODESET) + 3. Try sys.stdout.encoding + 4. Otherwise, returns "ASCII" + + WARNING: Call initLocale() before calling this function. + """ + try: + return getTerminalCharset.value + except AttributeError: + getTerminalCharset.value = _getTerminalCharset() + return getTerminalCharset.value + +class UnicodeStdout(object): + def __init__(self, old_device, charset): + self.device = old_device + self.charset = charset + + def flush(self): + self.device.flush() + + def write(self, text): + if isinstance(text, unicode): + text = text.encode(self.charset, 'replace') + self.device.write(text) + + def writelines(self, lines): + for text in lines: + self.write(text) + +def initLocale(): + # Only initialize locale once + if initLocale.is_done: + return getTerminalCharset() + initLocale.is_done = True + + # Setup locales + try: + locale.setlocale(locale.LC_ALL, "") + except (locale.Error, IOError): + pass + + # Get the terminal charset + charset = getTerminalCharset() + + # UnicodeStdout conflicts with the readline module + if config.unicode_stdout and ('readline' not in sys.modules): + # Replace stdout and stderr by unicode objet supporting unicode string + sys.stdout = UnicodeStdout(sys.stdout, charset) + sys.stderr = UnicodeStdout(sys.stderr, charset) + return charset +initLocale.is_done = False + +def _dummy_gettext(text): + return unicode(text) + +def _dummy_ngettext(singular, plural, count): + if 1 < abs(count) or not count: + return unicode(plural) + else: + return unicode(singular) + +def _initGettext(): + charset = initLocale() + + # Try to load gettext module + if config.use_i18n: + try: + import gettext + ok = True + except ImportError: + ok = False + else: + ok = False + + # gettext is not available or not needed: use dummy gettext functions + if not ok: + return (_dummy_gettext, _dummy_ngettext) + + # Gettext variables + package = hachoir_core.PACKAGE + locale_dir = path.join(path.dirname(__file__), "..", "locale") + + # Initialize gettext module + gettext.bindtextdomain(package, locale_dir) + gettext.textdomain(package) + translate = gettext.gettext + ngettext = gettext.ngettext + + # TODO: translate_unicode lambda function really sucks! + # => find native function to do that + unicode_gettext = lambda text: \ + unicode(translate(text), charset) + unicode_ngettext = lambda singular, plural, count: \ + unicode(ngettext(singular, plural, count), charset) + return (unicode_gettext, unicode_ngettext) + +UTF_BOMS = ( + (BOM_UTF8, "UTF-8"), + (BOM_UTF16_LE, "UTF-16-LE"), + (BOM_UTF16_BE, "UTF-16-BE"), +) + +# Set of valid characters for specific charset +CHARSET_CHARACTERS = ( + # U+00E0: LATIN SMALL LETTER A WITH GRAVE + (set(u"©®éêè\xE0ç".encode("ISO-8859-1")), "ISO-8859-1"), + (set(u"©®éêè\xE0ç€".encode("ISO-8859-15")), "ISO-8859-15"), + (set(u"©®".encode("MacRoman")), "MacRoman"), + (set(u"εδηιθκμοΡσςυΈί".encode("ISO-8859-7")), "ISO-8859-7"), +) + +def guessBytesCharset(bytes, default=None): + r""" + >>> guessBytesCharset("abc") + 'ASCII' + >>> guessBytesCharset("\xEF\xBB\xBFabc") + 'UTF-8' + >>> guessBytesCharset("abc\xC3\xA9") + 'UTF-8' + >>> guessBytesCharset("File written by Adobe Photoshop\xA8 4.0\0") + 'MacRoman' + >>> guessBytesCharset("\xE9l\xE9phant") + 'ISO-8859-1' + >>> guessBytesCharset("100 \xA4") + 'ISO-8859-15' + >>> guessBytesCharset('Word \xb8\xea\xe4\xef\xf3\xe7 - Microsoft Outlook 97 - \xd1\xf5\xe8\xec\xdf\xf3\xe5\xe9\xf2 e-mail') + 'ISO-8859-7' + """ + # Check for UTF BOM + for bom_bytes, charset in UTF_BOMS: + if bytes.startswith(bom_bytes): + return charset + + # Pure ASCII? + try: + text = unicode(bytes, 'ASCII', 'strict') + return 'ASCII' + except UnicodeDecodeError: + pass + + # Valid UTF-8? + try: + text = unicode(bytes, 'UTF-8', 'strict') + return 'UTF-8' + except UnicodeDecodeError: + pass + + # Create a set of non-ASCII characters + non_ascii_set = set( byte for byte in bytes if ord(byte) >= 128 ) + for characters, charset in CHARSET_CHARACTERS: + if characters.issuperset(non_ascii_set): + return charset + return default + +# Initialize _(), gettext() and ngettext() functions +gettext, ngettext = _initGettext() +_ = gettext + diff --git a/libs/hachoir_core/iso639.py b/libs/hachoir_core/iso639.py new file mode 100644 index 0000000..61a0ba9 --- /dev/null +++ b/libs/hachoir_core/iso639.py @@ -0,0 +1,558 @@ +# -*- coding: utf-8 -*- +""" +ISO639-2 standart: the module only contains the dictionary ISO639_2 +which maps a language code in three letters (eg. "fre") to a language +name in english (eg. "French"). +""" + +# ISO-639, the list comes from: +# http://www.loc.gov/standards/iso639-2/php/English_list.php +_ISO639 = ( + (u"Abkhazian", "abk", "ab"), + (u"Achinese", "ace", None), + (u"Acoli", "ach", None), + (u"Adangme", "ada", None), + (u"Adygei", "ady", None), + (u"Adyghe", "ady", None), + (u"Afar", "aar", "aa"), + (u"Afrihili", "afh", None), + (u"Afrikaans", "afr", "af"), + (u"Afro-Asiatic (Other)", "afa", None), + (u"Ainu", "ain", None), + (u"Akan", "aka", "ak"), + (u"Akkadian", "akk", None), + (u"Albanian", "alb/sqi", "sq"), + (u"Alemani", "gsw", None), + (u"Aleut", "ale", None), + (u"Algonquian languages", "alg", None), + (u"Altaic (Other)", "tut", None), + (u"Amharic", "amh", "am"), + (u"Angika", "anp", None), + (u"Apache languages", "apa", None), + (u"Arabic", "ara", "ar"), + (u"Aragonese", "arg", "an"), + (u"Aramaic", "arc", None), + (u"Arapaho", "arp", None), + (u"Araucanian", "arn", None), + (u"Arawak", "arw", None), + (u"Armenian", "arm/hye", "hy"), + (u"Aromanian", "rup", None), + (u"Artificial (Other)", "art", None), + (u"Arumanian", "rup", None), + (u"Assamese", "asm", "as"), + (u"Asturian", "ast", None), + (u"Athapascan languages", "ath", None), + (u"Australian languages", "aus", None), + (u"Austronesian (Other)", "map", None), + (u"Avaric", "ava", "av"), + (u"Avestan", "ave", "ae"), + (u"Awadhi", "awa", None), + (u"Aymara", "aym", "ay"), + (u"Azerbaijani", "aze", "az"), + (u"Bable", "ast", None), + (u"Balinese", "ban", None), + (u"Baltic (Other)", "bat", None), + (u"Baluchi", "bal", None), + (u"Bambara", "bam", "bm"), + (u"Bamileke languages", "bai", None), + (u"Banda", "bad", None), + (u"Bantu (Other)", "bnt", None), + (u"Basa", "bas", None), + (u"Bashkir", "bak", "ba"), + (u"Basque", "baq/eus", "eu"), + (u"Batak (Indonesia)", "btk", None), + (u"Beja", "bej", None), + (u"Belarusian", "bel", "be"), + (u"Bemba", "bem", None), + (u"Bengali", "ben", "bn"), + (u"Berber (Other)", "ber", None), + (u"Bhojpuri", "bho", None), + (u"Bihari", "bih", "bh"), + (u"Bikol", "bik", None), + (u"Bilin", "byn", None), + (u"Bini", "bin", None), + (u"Bislama", "bis", "bi"), + (u"Blin", "byn", None), + (u"Bokmål, Norwegian", "nob", "nb"), + (u"Bosnian", "bos", "bs"), + (u"Braj", "bra", None), + (u"Breton", "bre", "br"), + (u"Buginese", "bug", None), + (u"Bulgarian", "bul", "bg"), + (u"Buriat", "bua", None), + (u"Burmese", "bur/mya", "my"), + (u"Caddo", "cad", None), + (u"Carib", "car", None), + (u"Castilian", "spa", "es"), + (u"Catalan", "cat", "ca"), + (u"Caucasian (Other)", "cau", None), + (u"Cebuano", "ceb", None), + (u"Celtic (Other)", "cel", None), + (u"Central American Indian (Other)", "cai", None), + (u"Chagatai", "chg", None), + (u"Chamic languages", "cmc", None), + (u"Chamorro", "cha", "ch"), + (u"Chechen", "che", "ce"), + (u"Cherokee", "chr", None), + (u"Chewa", "nya", "ny"), + (u"Cheyenne", "chy", None), + (u"Chibcha", "chb", None), + (u"Chichewa", "nya", "ny"), + (u"Chinese", "chi/zho", "zh"), + (u"Chinook jargon", "chn", None), + (u"Chipewyan", "chp", None), + (u"Choctaw", "cho", None), + (u"Chuang", "zha", "za"), + (u"Church Slavic", "chu", "cu"), + (u"Church Slavonic", "chu", "cu"), + (u"Chuukese", "chk", None), + (u"Chuvash", "chv", "cv"), + (u"Classical Nepal Bhasa", "nwc", None), + (u"Classical Newari", "nwc", None), + (u"Coptic", "cop", None), + (u"Cornish", "cor", "kw"), + (u"Corsican", "cos", "co"), + (u"Cree", "cre", "cr"), + (u"Creek", "mus", None), + (u"Creoles and pidgins (Other)", "crp", None), + (u"Creoles and pidgins, English based (Other)", "cpe", None), + (u"Creoles and pidgins, French-based (Other)", "cpf", None), + (u"Creoles and pidgins, Portuguese-based (Other)", "cpp", None), + (u"Crimean Tatar", "crh", None), + (u"Crimean Turkish", "crh", None), + (u"Croatian", "scr/hrv", "hr"), + (u"Cushitic (Other)", "cus", None), + (u"Czech", "cze/ces", "cs"), + (u"Dakota", "dak", None), + (u"Danish", "dan", "da"), + (u"Dargwa", "dar", None), + (u"Dayak", "day", None), + (u"Delaware", "del", None), + (u"Dhivehi", "div", "dv"), + (u"Dimili", "zza", None), + (u"Dimli", "zza", None), + (u"Dinka", "din", None), + (u"Divehi", "div", "dv"), + (u"Dogri", "doi", None), + (u"Dogrib", "dgr", None), + (u"Dravidian (Other)", "dra", None), + (u"Duala", "dua", None), + (u"Dutch", "dut/nld", "nl"), + (u"Dutch, Middle (ca.1050-1350)", "dum", None), + (u"Dyula", "dyu", None), + (u"Dzongkha", "dzo", "dz"), + (u"Eastern Frisian", "frs", None), + (u"Efik", "efi", None), + (u"Egyptian (Ancient)", "egy", None), + (u"Ekajuk", "eka", None), + (u"Elamite", "elx", None), + (u"English", "eng", "en"), + (u"English, Middle (1100-1500)", "enm", None), + (u"English, Old (ca.450-1100)", "ang", None), + (u"Erzya", "myv", None), + (u"Esperanto", "epo", "eo"), + (u"Estonian", "est", "et"), + (u"Ewe", "ewe", "ee"), + (u"Ewondo", "ewo", None), + (u"Fang", "fan", None), + (u"Fanti", "fat", None), + (u"Faroese", "fao", "fo"), + (u"Fijian", "fij", "fj"), + (u"Filipino", "fil", None), + (u"Finnish", "fin", "fi"), + (u"Finno-Ugrian (Other)", "fiu", None), + (u"Flemish", "dut/nld", "nl"), + (u"Fon", "fon", None), + (u"French", "fre/fra", "fr"), + (u"French, Middle (ca.1400-1600)", "frm", None), + (u"French, Old (842-ca.1400)", "fro", None), + (u"Friulian", "fur", None), + (u"Fulah", "ful", "ff"), + (u"Ga", "gaa", None), + (u"Gaelic", "gla", "gd"), + (u"Galician", "glg", "gl"), + (u"Ganda", "lug", "lg"), + (u"Gayo", "gay", None), + (u"Gbaya", "gba", None), + (u"Geez", "gez", None), + (u"Georgian", "geo/kat", "ka"), + (u"German", "ger/deu", "de"), + (u"German, Low", "nds", None), + (u"German, Middle High (ca.1050-1500)", "gmh", None), + (u"German, Old High (ca.750-1050)", "goh", None), + (u"Germanic (Other)", "gem", None), + (u"Gikuyu", "kik", "ki"), + (u"Gilbertese", "gil", None), + (u"Gondi", "gon", None), + (u"Gorontalo", "gor", None), + (u"Gothic", "got", None), + (u"Grebo", "grb", None), + (u"Greek, Ancient (to 1453)", "grc", None), + (u"Greek, Modern (1453-)", "gre/ell", "el"), + (u"Greenlandic", "kal", "kl"), + (u"Guarani", "grn", "gn"), + (u"Gujarati", "guj", "gu"), + (u"Gwich´in", "gwi", None), + (u"Haida", "hai", None), + (u"Haitian", "hat", "ht"), + (u"Haitian Creole", "hat", "ht"), + (u"Hausa", "hau", "ha"), + (u"Hawaiian", "haw", None), + (u"Hebrew", "heb", "he"), + (u"Herero", "her", "hz"), + (u"Hiligaynon", "hil", None), + (u"Himachali", "him", None), + (u"Hindi", "hin", "hi"), + (u"Hiri Motu", "hmo", "ho"), + (u"Hittite", "hit", None), + (u"Hmong", "hmn", None), + (u"Hungarian", "hun", "hu"), + (u"Hupa", "hup", None), + (u"Iban", "iba", None), + (u"Icelandic", "ice/isl", "is"), + (u"Ido", "ido", "io"), + (u"Igbo", "ibo", "ig"), + (u"Ijo", "ijo", None), + (u"Iloko", "ilo", None), + (u"Inari Sami", "smn", None), + (u"Indic (Other)", "inc", None), + (u"Indo-European (Other)", "ine", None), + (u"Indonesian", "ind", "id"), + (u"Ingush", "inh", None), + (u"Interlingua", "ina", "ia"), + (u"Interlingue", "ile", "ie"), + (u"Inuktitut", "iku", "iu"), + (u"Inupiaq", "ipk", "ik"), + (u"Iranian (Other)", "ira", None), + (u"Irish", "gle", "ga"), + (u"Irish, Middle (900-1200)", "mga", None), + (u"Irish, Old (to 900)", "sga", None), + (u"Iroquoian languages", "iro", None), + (u"Italian", "ita", "it"), + (u"Japanese", "jpn", "ja"), + (u"Javanese", "jav", "jv"), + (u"Judeo-Arabic", "jrb", None), + (u"Judeo-Persian", "jpr", None), + (u"Kabardian", "kbd", None), + (u"Kabyle", "kab", None), + (u"Kachin", "kac", None), + (u"Kalaallisut", "kal", "kl"), + (u"Kalmyk", "xal", None), + (u"Kamba", "kam", None), + (u"Kannada", "kan", "kn"), + (u"Kanuri", "kau", "kr"), + (u"Kara-Kalpak", "kaa", None), + (u"Karachay-Balkar", "krc", None), + (u"Karelian", "krl", None), + (u"Karen", "kar", None), + (u"Kashmiri", "kas", "ks"), + (u"Kashubian", "csb", None), + (u"Kawi", "kaw", None), + (u"Kazakh", "kaz", "kk"), + (u"Khasi", "kha", None), + (u"Khmer", "khm", "km"), + (u"Khoisan (Other)", "khi", None), + (u"Khotanese", "kho", None), + (u"Kikuyu", "kik", "ki"), + (u"Kimbundu", "kmb", None), + (u"Kinyarwanda", "kin", "rw"), + (u"Kirdki", "zza", None), + (u"Kirghiz", "kir", "ky"), + (u"Kirmanjki", "zza", None), + (u"Klingon", "tlh", None), + (u"Komi", "kom", "kv"), + (u"Kongo", "kon", "kg"), + (u"Konkani", "kok", None), + (u"Korean", "kor", "ko"), + (u"Kosraean", "kos", None), + (u"Kpelle", "kpe", None), + (u"Kru", "kro", None), + (u"Kuanyama", "kua", "kj"), + (u"Kumyk", "kum", None), + (u"Kurdish", "kur", "ku"), + (u"Kurukh", "kru", None), + (u"Kutenai", "kut", None), + (u"Kwanyama", "kua", "kj"), + (u"Ladino", "lad", None), + (u"Lahnda", "lah", None), + (u"Lamba", "lam", None), + (u"Lao", "lao", "lo"), + (u"Latin", "lat", "la"), + (u"Latvian", "lav", "lv"), + (u"Letzeburgesch", "ltz", "lb"), + (u"Lezghian", "lez", None), + (u"Limburgan", "lim", "li"), + (u"Limburger", "lim", "li"), + (u"Limburgish", "lim", "li"), + (u"Lingala", "lin", "ln"), + (u"Lithuanian", "lit", "lt"), + (u"Lojban", "jbo", None), + (u"Low German", "nds", None), + (u"Low Saxon", "nds", None), + (u"Lower Sorbian", "dsb", None), + (u"Lozi", "loz", None), + (u"Luba-Katanga", "lub", "lu"), + (u"Luba-Lulua", "lua", None), + (u"Luiseno", "lui", None), + (u"Lule Sami", "smj", None), + (u"Lunda", "lun", None), + (u"Luo (Kenya and Tanzania)", "luo", None), + (u"Lushai", "lus", None), + (u"Luxembourgish", "ltz", "lb"), + (u"Macedo-Romanian", "rup", None), + (u"Macedonian", "mac/mkd", "mk"), + (u"Madurese", "mad", None), + (u"Magahi", "mag", None), + (u"Maithili", "mai", None), + (u"Makasar", "mak", None), + (u"Malagasy", "mlg", "mg"), + (u"Malay", "may/msa", "ms"), + (u"Malayalam", "mal", "ml"), + (u"Maldivian", "div", "dv"), + (u"Maltese", "mlt", "mt"), + (u"Manchu", "mnc", None), + (u"Mandar", "mdr", None), + (u"Mandingo", "man", None), + (u"Manipuri", "mni", None), + (u"Manobo languages", "mno", None), + (u"Manx", "glv", "gv"), + (u"Maori", "mao/mri", "mi"), + (u"Marathi", "mar", "mr"), + (u"Mari", "chm", None), + (u"Marshallese", "mah", "mh"), + (u"Marwari", "mwr", None), + (u"Masai", "mas", None), + (u"Mayan languages", "myn", None), + (u"Mende", "men", None), + (u"Mi'kmaq", "mic", None), + (u"Micmac", "mic", None), + (u"Minangkabau", "min", None), + (u"Mirandese", "mwl", None), + (u"Miscellaneous languages", "mis", None), + (u"Mohawk", "moh", None), + (u"Moksha", "mdf", None), + (u"Moldavian", "mol", "mo"), + (u"Mon-Khmer (Other)", "mkh", None), + (u"Mongo", "lol", None), + (u"Mongolian", "mon", "mn"), + (u"Mossi", "mos", None), + (u"Multiple languages", "mul", None), + (u"Munda languages", "mun", None), + (u"N'Ko", "nqo", None), + (u"Nahuatl", "nah", None), + (u"Nauru", "nau", "na"), + (u"Navaho", "nav", "nv"), + (u"Navajo", "nav", "nv"), + (u"Ndebele, North", "nde", "nd"), + (u"Ndebele, South", "nbl", "nr"), + (u"Ndonga", "ndo", "ng"), + (u"Neapolitan", "nap", None), + (u"Nepal Bhasa", "new", None), + (u"Nepali", "nep", "ne"), + (u"Newari", "new", None), + (u"Nias", "nia", None), + (u"Niger-Kordofanian (Other)", "nic", None), + (u"Nilo-Saharan (Other)", "ssa", None), + (u"Niuean", "niu", None), + (u"No linguistic content", "zxx", None), + (u"Nogai", "nog", None), + (u"Norse, Old", "non", None), + (u"North American Indian", "nai", None), + (u"North Ndebele", "nde", "nd"), + (u"Northern Frisian", "frr", None), + (u"Northern Sami", "sme", "se"), + (u"Northern Sotho", "nso", None), + (u"Norwegian", "nor", "no"), + (u"Norwegian Bokmål", "nob", "nb"), + (u"Norwegian Nynorsk", "nno", "nn"), + (u"Nubian languages", "nub", None), + (u"Nyamwezi", "nym", None), + (u"Nyanja", "nya", "ny"), + (u"Nyankole", "nyn", None), + (u"Nynorsk, Norwegian", "nno", "nn"), + (u"Nyoro", "nyo", None), + (u"Nzima", "nzi", None), + (u"Occitan (post 1500)", "oci", "oc"), + (u"Oirat", "xal", None), + (u"Ojibwa", "oji", "oj"), + (u"Old Bulgarian", "chu", "cu"), + (u"Old Church Slavonic", "chu", "cu"), + (u"Old Newari", "nwc", None), + (u"Old Slavonic", "chu", "cu"), + (u"Oriya", "ori", "or"), + (u"Oromo", "orm", "om"), + (u"Osage", "osa", None), + (u"Ossetian", "oss", "os"), + (u"Ossetic", "oss", "os"), + (u"Otomian languages", "oto", None), + (u"Pahlavi", "pal", None), + (u"Palauan", "pau", None), + (u"Pali", "pli", "pi"), + (u"Pampanga", "pam", None), + (u"Pangasinan", "pag", None), + (u"Panjabi", "pan", "pa"), + (u"Papiamento", "pap", None), + (u"Papuan (Other)", "paa", None), + (u"Pedi", "nso", None), + (u"Persian", "per/fas", "fa"), + (u"Persian, Old (ca.600-400 B.C.)", "peo", None), + (u"Philippine (Other)", "phi", None), + (u"Phoenician", "phn", None), + (u"Pilipino", "fil", None), + (u"Pohnpeian", "pon", None), + (u"Polish", "pol", "pl"), + (u"Portuguese", "por", "pt"), + (u"Prakrit languages", "pra", None), + (u"Provençal", "oci", "oc"), + (u"Provençal, Old (to 1500)", "pro", None), + (u"Punjabi", "pan", "pa"), + (u"Pushto", "pus", "ps"), + (u"Quechua", "que", "qu"), + (u"Raeto-Romance", "roh", "rm"), + (u"Rajasthani", "raj", None), + (u"Rapanui", "rap", None), + (u"Rarotongan", "rar", None), + (u"Reserved for local use", "qaa/qtz", None), + (u"Romance (Other)", "roa", None), + (u"Romanian", "rum/ron", "ro"), + (u"Romany", "rom", None), + (u"Rundi", "run", "rn"), + (u"Russian", "rus", "ru"), + (u"Salishan languages", "sal", None), + (u"Samaritan Aramaic", "sam", None), + (u"Sami languages (Other)", "smi", None), + (u"Samoan", "smo", "sm"), + (u"Sandawe", "sad", None), + (u"Sango", "sag", "sg"), + (u"Sanskrit", "san", "sa"), + (u"Santali", "sat", None), + (u"Sardinian", "srd", "sc"), + (u"Sasak", "sas", None), + (u"Saxon, Low", "nds", None), + (u"Scots", "sco", None), + (u"Scottish Gaelic", "gla", "gd"), + (u"Selkup", "sel", None), + (u"Semitic (Other)", "sem", None), + (u"Sepedi", "nso", None), + (u"Serbian", "scc/srp", "sr"), + (u"Serer", "srr", None), + (u"Shan", "shn", None), + (u"Shona", "sna", "sn"), + (u"Sichuan Yi", "iii", "ii"), + (u"Sicilian", "scn", None), + (u"Sidamo", "sid", None), + (u"Sign Languages", "sgn", None), + (u"Siksika", "bla", None), + (u"Sindhi", "snd", "sd"), + (u"Sinhala", "sin", "si"), + (u"Sinhalese", "sin", "si"), + (u"Sino-Tibetan (Other)", "sit", None), + (u"Siouan languages", "sio", None), + (u"Skolt Sami", "sms", None), + (u"Slave (Athapascan)", "den", None), + (u"Slavic (Other)", "sla", None), + (u"Slovak", "slo/slk", "sk"), + (u"Slovenian", "slv", "sl"), + (u"Sogdian", "sog", None), + (u"Somali", "som", "so"), + (u"Songhai", "son", None), + (u"Soninke", "snk", None), + (u"Sorbian languages", "wen", None), + (u"Sotho, Northern", "nso", None), + (u"Sotho, Southern", "sot", "st"), + (u"South American Indian (Other)", "sai", None), + (u"South Ndebele", "nbl", "nr"), + (u"Southern Altai", "alt", None), + (u"Southern Sami", "sma", None), + (u"Spanish", "spa", "es"), + (u"Sranan Togo", "srn", None), + (u"Sukuma", "suk", None), + (u"Sumerian", "sux", None), + (u"Sundanese", "sun", "su"), + (u"Susu", "sus", None), + (u"Swahili", "swa", "sw"), + (u"Swati", "ssw", "ss"), + (u"Swedish", "swe", "sv"), + (u"Swiss German", "gsw", None), + (u"Syriac", "syr", None), + (u"Tagalog", "tgl", "tl"), + (u"Tahitian", "tah", "ty"), + (u"Tai (Other)", "tai", None), + (u"Tajik", "tgk", "tg"), + (u"Tamashek", "tmh", None), + (u"Tamil", "tam", "ta"), + (u"Tatar", "tat", "tt"), + (u"Telugu", "tel", "te"), + (u"Tereno", "ter", None), + (u"Tetum", "tet", None), + (u"Thai", "tha", "th"), + (u"Tibetan", "tib/bod", "bo"), + (u"Tigre", "tig", None), + (u"Tigrinya", "tir", "ti"), + (u"Timne", "tem", None), + (u"Tiv", "tiv", None), + (u"tlhIngan-Hol", "tlh", None), + (u"Tlingit", "tli", None), + (u"Tok Pisin", "tpi", None), + (u"Tokelau", "tkl", None), + (u"Tonga (Nyasa)", "tog", None), + (u"Tonga (Tonga Islands)", "ton", "to"), + (u"Tsimshian", "tsi", None), + (u"Tsonga", "tso", "ts"), + (u"Tswana", "tsn", "tn"), + (u"Tumbuka", "tum", None), + (u"Tupi languages", "tup", None), + (u"Turkish", "tur", "tr"), + (u"Turkish, Ottoman (1500-1928)", "ota", None), + (u"Turkmen", "tuk", "tk"), + (u"Tuvalu", "tvl", None), + (u"Tuvinian", "tyv", None), + (u"Twi", "twi", "tw"), + (u"Udmurt", "udm", None), + (u"Ugaritic", "uga", None), + (u"Uighur", "uig", "ug"), + (u"Ukrainian", "ukr", "uk"), + (u"Umbundu", "umb", None), + (u"Undetermined", "und", None), + (u"Upper Sorbian", "hsb", None), + (u"Urdu", "urd", "ur"), + (u"Uyghur", "uig", "ug"), + (u"Uzbek", "uzb", "uz"), + (u"Vai", "vai", None), + (u"Valencian", "cat", "ca"), + (u"Venda", "ven", "ve"), + (u"Vietnamese", "vie", "vi"), + (u"Volapük", "vol", "vo"), + (u"Votic", "vot", None), + (u"Wakashan languages", "wak", None), + (u"Walamo", "wal", None), + (u"Walloon", "wln", "wa"), + (u"Waray", "war", None), + (u"Washo", "was", None), + (u"Welsh", "wel/cym", "cy"), + (u"Western Frisian", "fry", "fy"), + (u"Wolof", "wol", "wo"), + (u"Xhosa", "xho", "xh"), + (u"Yakut", "sah", None), + (u"Yao", "yao", None), + (u"Yapese", "yap", None), + (u"Yiddish", "yid", "yi"), + (u"Yoruba", "yor", "yo"), + (u"Yupik languages", "ypk", None), + (u"Zande", "znd", None), + (u"Zapotec", "zap", None), + (u"Zaza", "zza", None), + (u"Zazaki", "zza", None), + (u"Zenaga", "zen", None), + (u"Zhuang", "zha", "za"), + (u"Zulu", "zul", "zu"), + (u"Zuni", "zun", None), +) + +# Bibliographic ISO-639-2 form (eg. "fre" => "French") +ISO639_2 = {} +for line in _ISO639: + for key in line[1].split("/"): + ISO639_2[key] = line[0] +del _ISO639 + diff --git a/libs/hachoir_core/language.py b/libs/hachoir_core/language.py new file mode 100644 index 0000000..997f7a6 --- /dev/null +++ b/libs/hachoir_core/language.py @@ -0,0 +1,23 @@ +from hachoir_core.iso639 import ISO639_2 + +class Language: + def __init__(self, code): + code = str(code) + if code not in ISO639_2: + raise ValueError("Invalid language code: %r" % code) + self.code = code + + def __cmp__(self, other): + if other.__class__ != Language: + return 1 + return cmp(self.code, other.code) + + def __unicode__(self): + return ISO639_2[self.code] + + def __str__(self): + return self.__unicode__() + + def __repr__(self): + return "" % (unicode(self), self.code) + diff --git a/libs/hachoir_core/log.py b/libs/hachoir_core/log.py new file mode 100644 index 0000000..32fca06 --- /dev/null +++ b/libs/hachoir_core/log.py @@ -0,0 +1,144 @@ +import os, sys, time +import hachoir_core.config as config +from hachoir_core.i18n import _ + +class Log: + LOG_INFO = 0 + LOG_WARN = 1 + LOG_ERROR = 2 + + level_name = { + LOG_WARN: "[warn]", + LOG_ERROR: "[err!]", + LOG_INFO: "[info]" + } + + def __init__(self): + self.__buffer = {} + self.__file = None + self.use_print = True + self.use_buffer = False + self.on_new_message = None # Prototype: def func(level, prefix, text, context) + + def shutdown(self): + if self.__file: + self._writeIntoFile(_("Stop Hachoir")) + + def setFilename(self, filename, append=True): + """ + Use a file to store all messages. The + UTF-8 encoding will be used. Write an informative + message if the file can't be created. + + @param filename: C{L{string}} + """ + + # Look if file already exists or not + filename = os.path.expanduser(filename) + filename = os.path.realpath(filename) + append = os.access(filename, os.F_OK) + + # Create log file (or open it in append mode, if it already exists) + try: + import codecs + if append: + self.__file = codecs.open(filename, "a", "utf-8") + else: + self.__file = codecs.open(filename, "w", "utf-8") + self._writeIntoFile(_("Starting Hachoir")) + except IOError, err: + if err.errno == 2: + self.__file = None + self.info(_("[Log] setFilename(%s) fails: no such file") % filename) + else: + raise + + def _writeIntoFile(self, message): + timestamp = time.strftime("%Y-%m-%d %H:%M:%S") + self.__file.write(u"%s - %s\n" % (timestamp, message)) + self.__file.flush() + + def newMessage(self, level, text, ctxt=None): + """ + Write a new message : append it in the buffer, + display it to the screen (if needed), and write + it in the log file (if needed). + + @param level: Message level. + @type level: C{int} + @param text: Message content. + @type text: C{str} + @param ctxt: The caller instance. + """ + + if level < self.LOG_ERROR and config.quiet or \ + level <= self.LOG_INFO and not config.verbose: + return + if config.debug: + from hachoir_core.error import getBacktrace + backtrace = getBacktrace(None) + if backtrace: + text += "\n\n" + backtrace + + _text = text + if hasattr(ctxt, "_logger"): + _ctxt = ctxt._logger() + if _ctxt is not None: + text = "[%s] %s" % (_ctxt, text) + + # Add message to log buffer + if self.use_buffer: + if not self.__buffer.has_key(level): + self.__buffer[level] = [text] + else: + self.__buffer[level].append(text) + + # Add prefix + prefix = self.level_name.get(level, "[info]") + + # Display on stdout (if used) + if self.use_print: + sys.stdout.flush() + sys.stderr.write("%s %s\n" % (prefix, text)) + sys.stderr.flush() + + # Write into outfile (if used) + if self.__file: + self._writeIntoFile("%s %s" % (prefix, text)) + + # Use callback (if used) + if self.on_new_message: + self.on_new_message (level, prefix, _text, ctxt) + + def info(self, text): + """ + New informative message. + @type text: C{str} + """ + self.newMessage(Log.LOG_INFO, text) + + def warning(self, text): + """ + New warning message. + @type text: C{str} + """ + self.newMessage(Log.LOG_WARN, text) + + def error(self, text): + """ + New error message. + @type text: C{str} + """ + self.newMessage(Log.LOG_ERROR, text) + +log = Log() + +class Logger(object): + def _logger(self): + return "<%s>" % self.__class__.__name__ + def info(self, text): + log.newMessage(Log.LOG_INFO, text, self) + def warning(self, text): + log.newMessage(Log.LOG_WARN, text, self) + def error(self, text): + log.newMessage(Log.LOG_ERROR, text, self) diff --git a/libs/hachoir_core/memory.py b/libs/hachoir_core/memory.py new file mode 100644 index 0000000..54425f8 --- /dev/null +++ b/libs/hachoir_core/memory.py @@ -0,0 +1,99 @@ +import gc + +#---- Default implementation when resource is missing ---------------------- +PAGE_SIZE = 4096 + +def getMemoryLimit(): + """ + Get current memory limit in bytes. + + Return None on error. + """ + return None + +def setMemoryLimit(max_mem): + """ + Set memory limit in bytes. + Use value 'None' to disable memory limit. + + Return True if limit is set, False on error. + """ + return False + +def getMemorySize(): + """ + Read currenet process memory size: size of available virtual memory. + This value is NOT the real memory usage. + + This function only works on Linux (use /proc/self/statm file). + """ + try: + statm = open('/proc/self/statm').readline().split() + except IOError: + return None + return int(statm[0]) * PAGE_SIZE + +def clearCaches(): + """ + Try to clear all caches: call gc.collect() (Python garbage collector). + """ + gc.collect() + #import re; re.purge() + +try: +#---- 'resource' implementation --------------------------------------------- + from resource import getpagesize, getrlimit, setrlimit, RLIMIT_AS + + PAGE_SIZE = getpagesize() + + def getMemoryLimit(): + try: + limit = getrlimit(RLIMIT_AS)[0] + if 0 < limit: + limit *= PAGE_SIZE + return limit + except ValueError: + return None + + def setMemoryLimit(max_mem): + if max_mem is None: + max_mem = -1 + try: + setrlimit(RLIMIT_AS, (max_mem, -1)) + return True + except ValueError: + return False +except ImportError: + pass + +def limitedMemory(limit, func, *args, **kw): + """ + Limit memory grow when calling func(*args, **kw): + restrict memory grow to 'limit' bytes. + + Use try/except MemoryError to catch the error. + """ + # First step: clear cache to gain memory + clearCaches() + + # Get total program size + max_rss = getMemorySize() + if max_rss is not None: + # Get old limit and then set our new memory limit + old_limit = getMemoryLimit() + limit = max_rss + limit + limited = setMemoryLimit(limit) + else: + limited = False + + try: + # Call function + return func(*args, **kw) + finally: + # and unset our memory limit + if limited: + setMemoryLimit(old_limit) + + # After calling the function: clear all caches + clearCaches() + diff --git a/libs/hachoir_core/profiler.py b/libs/hachoir_core/profiler.py new file mode 100644 index 0000000..eabc575 --- /dev/null +++ b/libs/hachoir_core/profiler.py @@ -0,0 +1,31 @@ +from hotshot import Profile +from hotshot.stats import load as loadStats +from os import unlink + +def runProfiler(func, args=tuple(), kw={}, verbose=True, nb_func=25, sort_by=('cumulative', 'calls')): + profile_filename = "/tmp/profiler" + prof = Profile(profile_filename) + try: + if verbose: + print "[+] Run profiler" + result = prof.runcall(func, *args, **kw) + prof.close() + if verbose: + print "[+] Stop profiler" + print "[+] Process data..." + stat = loadStats(profile_filename) + if verbose: + print "[+] Strip..." + stat.strip_dirs() + if verbose: + print "[+] Sort data..." + stat.sort_stats(*sort_by) + if verbose: + print + print "[+] Display statistics" + print + stat.print_stats(nb_func) + return result + finally: + unlink(profile_filename) + diff --git a/libs/hachoir_core/stream/__init__.py b/libs/hachoir_core/stream/__init__.py new file mode 100644 index 0000000..163e12a --- /dev/null +++ b/libs/hachoir_core/stream/__init__.py @@ -0,0 +1,11 @@ +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.stream.stream import StreamError +from hachoir_core.stream.input import ( + InputStreamError, + InputStream, InputIOStream, StringInputStream, + InputSubStream, InputFieldStream, + FragmentedStream, ConcatStream) +from hachoir_core.stream.input_helper import FileInputStream, guessStreamCharset +from hachoir_core.stream.output import (OutputStreamError, + FileOutputStream, StringOutputStream, OutputStream) + diff --git a/libs/hachoir_core/stream/input.py b/libs/hachoir_core/stream/input.py new file mode 100644 index 0000000..1714bad --- /dev/null +++ b/libs/hachoir_core/stream/input.py @@ -0,0 +1,563 @@ +from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.error import info +from hachoir_core.log import Logger +from hachoir_core.bits import str2long +from hachoir_core.i18n import getTerminalCharset +from hachoir_core.tools import lowerBound +from hachoir_core.i18n import _ +from errno import ESPIPE +from weakref import ref as weakref_ref +from hachoir_core.stream import StreamError + +class InputStreamError(StreamError): + pass + +class ReadStreamError(InputStreamError): + def __init__(self, size, address, got=None): + self.size = size + self.address = address + self.got = got + if self.got is not None: + msg = _("Can't read %u bits at address %u (got %u bits)") % (self.size, self.address, self.got) + else: + msg = _("Can't read %u bits at address %u") % (self.size, self.address) + InputStreamError.__init__(self, msg) + +class NullStreamError(InputStreamError): + def __init__(self, source): + self.source = source + msg = _("Input size is nul (source='%s')!") % self.source + InputStreamError.__init__(self, msg) + +class FileFromInputStream: + _offset = 0 + _from_end = False + + def __init__(self, stream): + self.stream = stream + self._setSize(stream.askSize(self)) + + def _setSize(self, size): + if size is None: + self._size = size + elif size % 8: + raise InputStreamError("Invalid size") + else: + self._size = size // 8 + + def tell(self): + if self._from_end: + while self._size is None: + self.stream._feed(max(self.stream._current_size << 1, 1 << 16)) + self._from_end = False + self._offset += self._size + return self._offset + + def seek(self, pos, whence=0): + if whence == 0: + self._from_end = False + self._offset = pos + elif whence == 1: + self._offset += pos + elif whence == 2: + self._from_end = True + self._offset = pos + else: + raise ValueError("seek() second argument must be 0, 1 or 2") + + def read(self, size=None): + def read(address, size): + shift, data, missing = self.stream.read(8 * address, 8 * size) + if shift: + raise InputStreamError("TODO: handle non-byte-aligned data") + return data + if self._size or size is not None and not self._from_end: + # We don't want self.tell() to read anything + # and the size must be known if we read until the end. + pos = self.tell() + if size is None or None < self._size < pos + size: + size = self._size - pos + if size <= 0: + return '' + data = read(pos, size) + self._offset += len(data) + return data + elif self._from_end: + # TODO: not tested + max_size = - self._offset + if size is None or max_size < size: + size = max_size + if size <= 0: + return '' + data = '', '' + self._offset = max(0, self.stream._current_size // 8 + self._offset) + self._from_end = False + bs = max(max_size, 1 << 16) + while True: + d = read(self._offset, bs) + data = data[1], d + self._offset += len(d) + if self._size: + bs = self._size - self._offset + if not bs: + data = data[0] + data[1] + d = len(data) - max_size + return data[d:d+size] + else: + # TODO: not tested + data = [ ] + size = 1 << 16 + while True: + d = read(self._offset, size) + data.append(d) + self._offset += len(d) + if self._size: + size = self._size - self._offset + if not size: + return ''.join(data) + + +class InputStream(Logger): + _set_size = None + _current_size = 0 + + def __init__(self, source=None, size=None, packets=None, **args): + self.source = source + self._size = size # in bits + if size == 0: + raise NullStreamError(source) + self.tags = tuple(args.get("tags", tuple())) + self.packets = packets + + def askSize(self, client): + if self._size != self._current_size: + if self._set_size is None: + self._set_size = [] + self._set_size.append(weakref_ref(client)) + return self._size + + def _setSize(self, size=None): + assert self._size is None or self._current_size <= self._size + if self._size != self._current_size: + self._size = self._current_size + if not self._size: + raise NullStreamError(self.source) + if self._set_size: + for client in self._set_size: + client = client() + if client: + client._setSize(self._size) + del self._set_size + + size = property(lambda self: self._size, doc="Size of the stream in bits") + checked = property(lambda self: self._size == self._current_size) + + def sizeGe(self, size, const=False): + return self._current_size >= size or \ + not (None < self._size < size or const or self._feed(size)) + + def _feed(self, size): + return self.read(size-1,1)[2] + + def read(self, address, size): + """ + Read 'size' bits at position 'address' (in bits) + from the beginning of the stream. + """ + raise NotImplementedError + + def readBits(self, address, nbits, endian): + assert endian in (BIG_ENDIAN, LITTLE_ENDIAN) + + shift, data, missing = self.read(address, nbits) + if missing: + raise ReadStreamError(nbits, address) + value = str2long(data, endian) + if endian is BIG_ENDIAN: + value >>= len(data) * 8 - shift - nbits + else: + value >>= shift + return value & (1 << nbits) - 1 + + def readInteger(self, address, signed, nbits, endian): + """ Read an integer number """ + value = self.readBits(address, nbits, endian) + + # Signe number. Example with nbits=8: + # if 128 <= value: value -= 256 + if signed and (1 << (nbits-1)) <= value: + value -= (1 << nbits) + return value + + def readBytes(self, address, nb_bytes): + shift, data, missing = self.read(address, 8 * nb_bytes) + if shift: + raise InputStreamError("TODO: handle non-byte-aligned data") + if missing: + raise ReadStreamError(8 * nb_bytes, address) + return data + + def searchBytesLength(self, needle, include_needle, + start_address=0, end_address=None): + """ + If include_needle is True, add its length to the result. + Returns None is needle can't be found. + """ + + pos = self.searchBytes(needle, start_address, end_address) + if pos is None: + return None + length = (pos - start_address) // 8 + if include_needle: + length += len(needle) + return length + + def searchBytes(self, needle, start_address=0, end_address=None): + """ + Search some bytes in [start_address;end_address[. Addresses must + be aligned to byte. Returns the address of the bytes if found, + None else. + """ + if start_address % 8: + raise InputStreamError("Unable to search bytes with address with bit granularity") + length = len(needle) + size = max(3 * length, 4096) + buffer = '' + + if self._size and (end_address is None or self._size < end_address): + end_address = self._size + + while True: + if end_address is not None: + todo = (end_address - start_address) >> 3 + if todo < size: + if todo <= 0: + return None + size = todo + data = self.readBytes(start_address, size) + if end_address is None and self._size: + end_address = self._size + size = (end_address - start_address) >> 3 + assert size > 0 + data = data[:size] + start_address += 8 * size + buffer = buffer[len(buffer) - length + 1:] + data + found = buffer.find(needle) + if found >= 0: + return start_address + (found - len(buffer)) * 8 + + def file(self): + return FileFromInputStream(self) + + +class InputPipe(object): + """ + InputPipe makes input streams seekable by caching a certain + amount of data. The memory usage may be unlimited in worst cases. + A function (set_size) is called when the size of the stream is known. + + InputPipe sees the input stream as an array of blocks of + size = (2 ^ self.buffer_size) and self.buffers maps to this array. + It also maintains a circular ordered list of non-discarded blocks, + sorted by access time. + + Each element of self.buffers is an array of 3 elements: + * self.buffers[i][0] is the data. + len(self.buffers[i][0]) == 1 << self.buffer_size + (except at the end: the length may be smaller) + * self.buffers[i][1] is the index of a more recently used block + * self.buffers[i][2] is the opposite of self.buffers[1], + in order to have a double-linked list. + For any discarded block, self.buffers[i] = None + + self.last is the index of the most recently accessed block. + self.first is the first (= smallest index) non-discarded block. + + How InputPipe discards blocks: + * Just before returning from the read method. + * Only if there are more than self.buffer_nb_min blocks in memory. + * While self.buffers[self.first] is that least recently used block. + + Property: There is no hole in self.buffers, except at the beginning. + """ + buffer_nb_min = 256 + buffer_size = 16 + last = None + size = None + + def __init__(self, input, set_size=None): + self._input = input + self.first = self.address = 0 + self.buffers = [] + self.set_size = set_size + + current_size = property(lambda self: len(self.buffers) << self.buffer_size) + + def _append(self, data): + if self.last is None: + self.last = next = prev = 0 + else: + prev = self.last + last = self.buffers[prev] + next = last[1] + self.last = self.buffers[next][2] = last[1] = len(self.buffers) + self.buffers.append([ data, next, prev ]) + + def _get(self, index): + if index >= len(self.buffers): + return '' + buf = self.buffers[index] + if buf is None: + raise InputStreamError(_("Error: Buffers too small. Can't seek backward.")) + if self.last != index: + next = buf[1] + prev = buf[2] + self.buffers[next][2] = prev + self.buffers[prev][1] = next + first = self.buffers[self.last][1] + buf[1] = first + buf[2] = self.last + self.buffers[first][2] = index + self.buffers[self.last][1] = index + self.last = index + return buf[0] + + def _flush(self): + lim = len(self.buffers) - self.buffer_nb_min + while self.first < lim: + buf = self.buffers[self.first] + if buf[2] != self.last: + break + info("Discarding buffer %u." % self.first) + self.buffers[self.last][1] = buf[1] + self.buffers[buf[1]][2] = self.last + self.buffers[self.first] = None + self.first += 1 + + def seek(self, address): + assert 0 <= address + self.address = address + + def read(self, size): + end = self.address + size + for i in xrange(len(self.buffers), (end >> self.buffer_size) + 1): + data = self._input.read(1 << self.buffer_size) + if len(data) < 1 << self.buffer_size: + self.size = (len(self.buffers) << self.buffer_size) + len(data) + if self.set_size: + self.set_size(self.size) + if data: + self._append(data) + break + self._append(data) + block, offset = divmod(self.address, 1 << self.buffer_size) + data = ''.join(self._get(index) + for index in xrange(block, (end - 1 >> self.buffer_size) + 1) + )[offset:offset+size] + self._flush() + self.address += len(data) + return data + +class InputIOStream(InputStream): + def __init__(self, input, size=None, **args): + if not hasattr(input, "seek"): + if size is None: + input = InputPipe(input, self._setSize) + else: + input = InputPipe(input) + elif size is None: + try: + input.seek(0, 2) + size = input.tell() * 8 + except IOError, err: + if err.errno == ESPIPE: + input = InputPipe(input, self._setSize) + else: + charset = getTerminalCharset() + errmsg = unicode(str(err), charset) + source = args.get("source", "" % input) + raise InputStreamError(_("Unable to get size of %s: %s") % (source, errmsg)) + self._input = input + InputStream.__init__(self, size=size, **args) + + def __current_size(self): + if self._size: + return self._size + if self._input.size: + return 8 * self._input.size + return 8 * self._input.current_size + _current_size = property(__current_size) + + def read(self, address, size): + assert size > 0 + _size = self._size + address, shift = divmod(address, 8) + self._input.seek(address) + size = (size + shift + 7) >> 3 + data = self._input.read(size) + got = len(data) + missing = size != got + if missing and _size == self._size: + raise ReadStreamError(8 * size, 8 * address, 8 * got) + return shift, data, missing + + def file(self): + if hasattr(self._input, "fileno"): + from os import dup, fdopen + new_fd = dup(self._input.fileno()) + new_file = fdopen(new_fd, "r") + new_file.seek(0) + return new_file + return InputStream.file(self) + + +class StringInputStream(InputStream): + def __init__(self, data, source="", **args): + self.data = data + InputStream.__init__(self, source=source, size=8*len(data), **args) + self._current_size = self._size + + def read(self, address, size): + address, shift = divmod(address, 8) + size = (size + shift + 7) >> 3 + data = self.data[address:address+size] + got = len(data) + if got != size: + raise ReadStreamError(8 * size, 8 * address, 8 * got) + return shift, data, False + + +class InputSubStream(InputStream): + def __init__(self, stream, offset, size=None, source=None, **args): + if offset is None: + offset = 0 + if size is None and stream.size is not None: + size = stream.size - offset + if None < size <= 0: + raise ValueError("InputSubStream: offset is outside input stream") + self.stream = stream + self._offset = offset + if source is None: + source = "" % (stream.source, offset, size) + InputStream.__init__(self, source=source, size=size, **args) + self.stream.askSize(self) + + _current_size = property(lambda self: min(self._size, max(0, self.stream._current_size - self._offset))) + + def read(self, address, size): + return self.stream.read(self._offset + address, size) + +def InputFieldStream(field, **args): + if not field.parent: + return field.stream + stream = field.parent.stream + args["size"] = field.size + args.setdefault("source", stream.source + field.path) + return InputSubStream(stream, field.absolute_address, **args) + + +class FragmentedStream(InputStream): + def __init__(self, field, **args): + self.stream = field.parent.stream + data = field.getData() + self.fragments = [ (0, data.absolute_address, data.size) ] + self.next = field.next + args.setdefault("source", "%s%s" % (self.stream.source, field.path)) + InputStream.__init__(self, **args) + if not self.next: + self._current_size = data.size + self._setSize() + + def _feed(self, end): + if self._current_size < end: + if self.checked: + raise ReadStreamError(end - self._size, self._size) + a, fa, fs = self.fragments[-1] + while self.stream.sizeGe(fa + min(fs, end - a)): + a += fs + f = self.next + if a >= end: + self._current_size = end + if a == end and not f: + self._setSize() + return False + if f: + self.next = f.next + f = f.getData() + if not f: + self._current_size = a + self._setSize() + return True + fa = f.absolute_address + fs = f.size + self.fragments += [ (a, fa, fs) ] + self._current_size = a + max(0, self.stream.size - fa) + self._setSize() + return True + return False + + def read(self, address, size): + assert size > 0 + missing = self._feed(address + size) + if missing: + size = self._size - address + if size <= 0: + return 0, '', True + d = [] + i = lowerBound(self.fragments, lambda x: x[0] <= address) + a, fa, fs = self.fragments[i-1] + a -= address + fa -= a + fs += a + s = None + while True: + n = min(fs, size) + u, v, w = self.stream.read(fa, n) + assert not w + if s is None: + s = u + else: + assert not u + d += [ v ] + size -= n + if not size: + return s, ''.join(d), missing + a, fa, fs = self.fragments[i] + i += 1 + + +class ConcatStream(InputStream): + # TODO: concatene any number of any type of stream + def __init__(self, streams, **args): + if len(streams) > 2 or not streams[0].checked: + raise NotImplementedError + self.__size0 = streams[0].size + size1 = streams[1].askSize(self) + if size1 is not None: + args["size"] = self.__size0 + size1 + self.__streams = streams + InputStream.__init__(self, **args) + + _current_size = property(lambda self: self.__size0 + self.__streams[1]._current_size) + + def read(self, address, size): + _size = self._size + s = self.__size0 - address + shift, data, missing = None, '', False + if s > 0: + s = min(size, s) + shift, data, w = self.__streams[0].read(address, s) + assert not w + a, s = 0, size - s + else: + a, s = -s, size + if s: + u, v, missing = self.__streams[1].read(a, s) + if missing and _size == self._size: + raise ReadStreamError(s, a) + if shift is None: + shift = u + else: + assert not u + data += v + return shift, data, missing diff --git a/libs/hachoir_core/stream/input_helper.py b/libs/hachoir_core/stream/input_helper.py new file mode 100644 index 0000000..e793831 --- /dev/null +++ b/libs/hachoir_core/stream/input_helper.py @@ -0,0 +1,38 @@ +from hachoir_core.i18n import getTerminalCharset, guessBytesCharset, _ +from hachoir_core.stream import InputIOStream, InputSubStream, InputStreamError + +def FileInputStream(filename, real_filename=None, **args): + """ + Create an input stream of a file. filename must be unicode. + + real_filename is an optional argument used to specify the real filename, + its type can be 'str' or 'unicode'. Use real_filename when you are + not able to convert filename to real unicode string (ie. you have to + use unicode(name, 'replace') or unicode(name, 'ignore')). + """ + assert isinstance(filename, unicode) + if not real_filename: + real_filename = filename + try: + inputio = open(real_filename, 'rb') + except IOError, err: + charset = getTerminalCharset() + errmsg = unicode(str(err), charset) + raise InputStreamError(_("Unable to open file %s: %s") % (filename, errmsg)) + source = "file:" + filename + offset = args.pop("offset", 0) + size = args.pop("size", None) + if offset or size: + if size: + size = 8 * size + stream = InputIOStream(inputio, source=source, **args) + return InputSubStream(stream, 8 * offset, size, **args) + else: + args.setdefault("tags",[]).append(("filename", filename)) + return InputIOStream(inputio, source=source, **args) + +def guessStreamCharset(stream, address, size, default=None): + size = min(size, 1024*8) + bytes = stream.readBytes(address, size//8) + return guessBytesCharset(bytes, default) + diff --git a/libs/hachoir_core/stream/output.py b/libs/hachoir_core/stream/output.py new file mode 100644 index 0000000..e31637d --- /dev/null +++ b/libs/hachoir_core/stream/output.py @@ -0,0 +1,173 @@ +from cStringIO import StringIO +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.bits import long2raw +from hachoir_core.stream import StreamError +from errno import EBADF + +MAX_READ_NBYTES = 2 ** 16 + +class OutputStreamError(StreamError): + pass + +class OutputStream(object): + def __init__(self, output, filename=None): + self._output = output + self._filename = filename + self._bit_pos = 0 + self._byte = 0 + + def _getFilename(self): + return self._filename + filename = property(_getFilename) + + def writeBit(self, state, endian): + if self._bit_pos == 7: + self._bit_pos = 0 + if state: + if endian is BIG_ENDIAN: + self._byte |= 1 + else: + self._byte |= 128 + self._output.write(chr(self._byte)) + self._byte = 0 + else: + if state: + if endian is BIG_ENDIAN: + self._byte |= (1 << self._bit_pos) + else: + self._byte |= (1 << (7-self._bit_pos)) + self._bit_pos += 1 + + def writeBits(self, count, value, endian): + assert 0 <= value < 2**count + + # Feed bits to align to byte address + if self._bit_pos != 0: + n = 8 - self._bit_pos + if n <= count: + count -= n + if endian is BIG_ENDIAN: + self._byte |= (value >> count) + value &= ((1 << count) - 1) + else: + self._byte |= (value & ((1 << n)-1)) << self._bit_pos + value >>= n + self._output.write(chr(self._byte)) + self._bit_pos = 0 + self._byte = 0 + else: + if endian is BIG_ENDIAN: + self._byte |= (value << (8-self._bit_pos-count)) + else: + self._byte |= (value << self._bit_pos) + self._bit_pos += count + return + + # Write byte per byte + while 8 <= count: + count -= 8 + if endian is BIG_ENDIAN: + byte = (value >> count) + value &= ((1 << count) - 1) + else: + byte = (value & 0xFF) + value >>= 8 + self._output.write(chr(byte)) + + # Keep last bits + assert 0 <= count < 8 + self._bit_pos = count + if 0 < count: + assert 0 <= value < 2**count + if endian is BIG_ENDIAN: + self._byte = value << (8-count) + else: + self._byte = value + else: + assert value == 0 + self._byte = 0 + + def writeInteger(self, value, signed, size_byte, endian): + if signed: + value += 1 << (size_byte*8 - 1) + raw = long2raw(value, endian, size_byte) + self.writeBytes(raw) + + def copyBitsFrom(self, input, address, nb_bits, endian): + if (nb_bits % 8) == 0: + self.copyBytesFrom(input, address, nb_bits/8) + else: + # Arbitrary limit (because we should use a buffer, like copyBytesFrom(), + # but with endianess problem + assert nb_bits <= 128 + data = input.readBits(address, nb_bits, endian) + self.writeBits(nb_bits, data, endian) + + def copyBytesFrom(self, input, address, nb_bytes): + if (address % 8): + raise OutputStreamError("Unable to copy bytes with address with bit granularity") + buffer_size = 1 << 12 # 8192 (8 KB) + while 0 < nb_bytes: + # Compute buffer size + if nb_bytes < buffer_size: + buffer_size = nb_bytes + + # Read + data = input.readBytes(address, buffer_size) + + # Write + self.writeBytes(data) + + # Move address + address += buffer_size*8 + nb_bytes -= buffer_size + + def writeBytes(self, bytes): + if self._bit_pos != 0: + raise NotImplementedError() + self._output.write(bytes) + + def readBytes(self, address, nbytes): + """ + Read bytes from the stream at specified address (in bits). + Address have to be a multiple of 8. + nbytes have to in 1..MAX_READ_NBYTES (64 KB). + + This method is only supported for StringOuputStream (not on + FileOutputStream). + + Return read bytes as byte string. + """ + assert (address % 8) == 0 + assert (1 <= nbytes <= MAX_READ_NBYTES) + self._output.flush() + oldpos = self._output.tell() + try: + self._output.seek(0) + try: + return self._output.read(nbytes) + except IOError, err: + if err[0] == EBADF: + raise OutputStreamError("Stream doesn't support read() operation") + finally: + self._output.seek(oldpos) + +def StringOutputStream(): + """ + Create an output stream into a string. + """ + data = StringIO() + return OutputStream(data) + +def FileOutputStream(filename, real_filename=None): + """ + Create an output stream into file with given name. + + Filename have to be unicode, whereas (optional) real_filename can be str. + """ + assert isinstance(filename, unicode) + if not real_filename: + real_filename = filename + output = open(real_filename, 'wb') + return OutputStream(output, filename=filename) + diff --git a/libs/hachoir_core/stream/stream.py b/libs/hachoir_core/stream/stream.py new file mode 100644 index 0000000..58c9aea --- /dev/null +++ b/libs/hachoir_core/stream/stream.py @@ -0,0 +1,5 @@ +from hachoir_core.error import HachoirError + +class StreamError(HachoirError): + pass + diff --git a/libs/hachoir_core/text_handler.py b/libs/hachoir_core/text_handler.py new file mode 100644 index 0000000..e2c65f0 --- /dev/null +++ b/libs/hachoir_core/text_handler.py @@ -0,0 +1,60 @@ +""" +Utilities used to convert a field to human classic reprentation of data. +""" + +from hachoir_core.tools import ( + humanDuration, humanFilesize, alignValue, + durationWin64 as doDurationWin64, + deprecated) +from types import FunctionType, MethodType +from hachoir_core.field import Field + +def textHandler(field, handler): + assert isinstance(handler, (FunctionType, MethodType)) + assert issubclass(field.__class__, Field) + field.createDisplay = lambda: handler(field) + return field + +def displayHandler(field, handler): + assert isinstance(handler, (FunctionType, MethodType)) + assert issubclass(field.__class__, Field) + field.createDisplay = lambda: handler(field.value) + return field + +@deprecated("Use TimedeltaWin64 field type") +def durationWin64(field): + """ + Convert Windows 64-bit duration to string. The timestamp format is + a 64-bit number: number of 100ns. See also timestampWin64(). + + >>> durationWin64(type("", (), dict(value=2146280000, size=64))) + u'3 min 34 sec 628 ms' + >>> durationWin64(type("", (), dict(value=(1 << 64)-1, size=64))) + u'58494 years 88 days 5 hours' + """ + assert hasattr(field, "value") and hasattr(field, "size") + assert field.size == 64 + delta = doDurationWin64(field.value) + return humanDuration(delta) + +def filesizeHandler(field): + """ + Format field value using humanFilesize() + """ + return displayHandler(field, humanFilesize) + +def hexadecimal(field): + """ + Convert an integer to hexadecimal in lower case. Returns unicode string. + + >>> hexadecimal(type("", (), dict(value=412, size=16))) + u'0x019c' + >>> hexadecimal(type("", (), dict(value=0, size=32))) + u'0x00000000' + """ + assert hasattr(field, "value") and hasattr(field, "size") + size = field.size + padding = alignValue(size, 4) // 4 + pattern = u"0x%%0%ux" % padding + return pattern % field.value + diff --git a/libs/hachoir_core/timeout.py b/libs/hachoir_core/timeout.py new file mode 100644 index 0000000..d321419 --- /dev/null +++ b/libs/hachoir_core/timeout.py @@ -0,0 +1,76 @@ +""" +limitedTime(): set a timeout in seconds when calling a function, +raise a Timeout error if time exceed. +""" +from math import ceil + +IMPLEMENTATION = None + +class Timeout(RuntimeError): + """ + Timeout error, inherits from RuntimeError + """ + pass + +def signalHandler(signum, frame): + """ + Signal handler to catch timeout signal: raise Timeout exception. + """ + raise Timeout("Timeout exceed!") + +def limitedTime(second, func, *args, **kw): + """ + Call func(*args, **kw) with a timeout of second seconds. + """ + return func(*args, **kw) + +def fixTimeout(second): + """ + Fix timeout value: convert to integer with a minimum of 1 second + """ + if isinstance(second, float): + second = int(ceil(second)) + assert isinstance(second, (int, long)) + return max(second, 1) + +if not IMPLEMENTATION: + try: + from signal import signal, alarm, SIGALRM + + # signal.alarm() implementation + def limitedTime(second, func, *args, **kw): + second = fixTimeout(second) + old_alarm = signal(SIGALRM, signalHandler) + try: + alarm(second) + return func(*args, **kw) + finally: + alarm(0) + signal(SIGALRM, old_alarm) + + IMPLEMENTATION = "signal.alarm()" + except ImportError: + pass + +if not IMPLEMENTATION: + try: + from signal import signal, SIGXCPU + from resource import getrlimit, setrlimit, RLIMIT_CPU + + # resource.setrlimit(RLIMIT_CPU) implementation + # "Bug": timeout is 'CPU' time so sleep() are not part of the timeout + def limitedTime(second, func, *args, **kw): + second = fixTimeout(second) + old_alarm = signal(SIGXCPU, signalHandler) + current = getrlimit(RLIMIT_CPU) + try: + setrlimit(RLIMIT_CPU, (second, current[1])) + return func(*args, **kw) + finally: + setrlimit(RLIMIT_CPU, current) + signal(SIGXCPU, old_alarm) + + IMPLEMENTATION = "resource.setrlimit(RLIMIT_CPU)" + except ImportError: + pass + diff --git a/libs/hachoir_core/tools.py b/libs/hachoir_core/tools.py new file mode 100644 index 0000000..30fa327 --- /dev/null +++ b/libs/hachoir_core/tools.py @@ -0,0 +1,582 @@ +# -*- coding: utf-8 -*- + +""" +Various utilities. +""" + +from hachoir_core.i18n import _, ngettext +import re +import stat +from datetime import datetime, timedelta, MAXYEAR +from warnings import warn + +def deprecated(comment=None): + """ + This is a decorator which can be used to mark functions + as deprecated. It will result in a warning being emmitted + when the function is used. + + Examples: :: + + @deprecated + def oldfunc(): ... + + @deprecated("use newfunc()!") + def oldfunc2(): ... + + Code from: http://code.activestate.com/recipes/391367/ + """ + def _deprecated(func): + def newFunc(*args, **kwargs): + message = "Call to deprecated function %s" % func.__name__ + if comment: + message += ": " + comment + warn(message, category=DeprecationWarning, stacklevel=2) + return func(*args, **kwargs) + newFunc.__name__ = func.__name__ + newFunc.__doc__ = func.__doc__ + newFunc.__dict__.update(func.__dict__) + return newFunc + return _deprecated + +def paddingSize(value, align): + """ + Compute size of a padding field. + + >>> paddingSize(31, 4) + 1 + >>> paddingSize(32, 4) + 0 + >>> paddingSize(33, 4) + 3 + + Note: (value + paddingSize(value, align)) == alignValue(value, align) + """ + if value % align != 0: + return align - (value % align) + else: + return 0 + +def alignValue(value, align): + """ + Align a value to next 'align' multiple. + + >>> alignValue(31, 4) + 32 + >>> alignValue(32, 4) + 32 + >>> alignValue(33, 4) + 36 + + Note: alignValue(value, align) == (value + paddingSize(value, align)) + """ + + if value % align != 0: + return value + align - (value % align) + else: + return value + +def timedelta2seconds(delta): + """ + Convert a datetime.timedelta() objet to a number of second + (floatting point number). + + >>> timedelta2seconds(timedelta(seconds=2, microseconds=40000)) + 2.04 + >>> timedelta2seconds(timedelta(minutes=1, milliseconds=250)) + 60.25 + """ + return delta.microseconds / 1000000.0 \ + + delta.seconds + delta.days * 60*60*24 + +def humanDurationNanosec(nsec): + """ + Convert a duration in nanosecond to human natural representation. + Returns an unicode string. + + >>> humanDurationNanosec(60417893) + u'60.42 ms' + """ + + # Nano second + if nsec < 1000: + return u"%u nsec" % nsec + + # Micro seconds + usec, nsec = divmod(nsec, 1000) + if usec < 1000: + return u"%.2f usec" % (usec+float(nsec)/1000) + + # Milli seconds + msec, usec = divmod(usec, 1000) + if msec < 1000: + return u"%.2f ms" % (msec + float(usec)/1000) + return humanDuration(msec) + +def humanDuration(delta): + """ + Convert a duration in millisecond to human natural representation. + Returns an unicode string. + + >>> humanDuration(0) + u'0 ms' + >>> humanDuration(213) + u'213 ms' + >>> humanDuration(4213) + u'4 sec 213 ms' + >>> humanDuration(6402309) + u'1 hour 46 min 42 sec' + """ + if not isinstance(delta, timedelta): + delta = timedelta(microseconds=delta*1000) + + # Milliseconds + text = [] + if 1000 <= delta.microseconds: + text.append(u"%u ms" % (delta.microseconds//1000)) + + # Seconds + minutes, seconds = divmod(delta.seconds, 60) + hours, minutes = divmod(minutes, 60) + if seconds: + text.append(u"%u sec" % seconds) + if minutes: + text.append(u"%u min" % minutes) + if hours: + text.append(ngettext("%u hour", "%u hours", hours) % hours) + + # Days + years, days = divmod(delta.days, 365) + if days: + text.append(ngettext("%u day", "%u days", days) % days) + if years: + text.append(ngettext("%u year", "%u years", years) % years) + if 3 < len(text): + text = text[-3:] + elif not text: + return u"0 ms" + return u" ".join(reversed(text)) + +def humanFilesize(size): + """ + Convert a file size in byte to human natural representation. + It uses the values: 1 KB is 1024 bytes, 1 MB is 1024 KB, etc. + The result is an unicode string. + + >>> humanFilesize(1) + u'1 byte' + >>> humanFilesize(790) + u'790 bytes' + >>> humanFilesize(256960) + u'250.9 KB' + """ + if size < 10000: + return ngettext("%u byte", "%u bytes", size) % size + units = [_("KB"), _("MB"), _("GB"), _("TB")] + size = float(size) + divisor = 1024 + for unit in units: + size = size / divisor + if size < divisor: + return "%.1f %s" % (size, unit) + return "%u %s" % (size, unit) + +def humanBitSize(size): + """ + Convert a size in bit to human classic representation. + It uses the values: 1 Kbit is 1000 bits, 1 Mbit is 1000 Kbit, etc. + The result is an unicode string. + + >>> humanBitSize(1) + u'1 bit' + >>> humanBitSize(790) + u'790 bits' + >>> humanBitSize(256960) + u'257.0 Kbit' + """ + divisor = 1000 + if size < divisor: + return ngettext("%u bit", "%u bits", size) % size + units = [u"Kbit", u"Mbit", u"Gbit", u"Tbit"] + size = float(size) + for unit in units: + size = size / divisor + if size < divisor: + return "%.1f %s" % (size, unit) + return u"%u %s" % (size, unit) + +def humanBitRate(size): + """ + Convert a bit rate to human classic representation. It uses humanBitSize() + to convert size into human reprensation. The result is an unicode string. + + >>> humanBitRate(790) + u'790 bits/sec' + >>> humanBitRate(256960) + u'257.0 Kbit/sec' + """ + return "".join((humanBitSize(size), "/sec")) + +def humanFrequency(hertz): + """ + Convert a frequency in hertz to human classic representation. + It uses the values: 1 KHz is 1000 Hz, 1 MHz is 1000 KMhz, etc. + The result is an unicode string. + + >>> humanFrequency(790) + u'790 Hz' + >>> humanFrequency(629469) + u'629.5 kHz' + """ + divisor = 1000 + if hertz < divisor: + return u"%u Hz" % hertz + units = [u"kHz", u"MHz", u"GHz", u"THz"] + hertz = float(hertz) + for unit in units: + hertz = hertz / divisor + if hertz < divisor: + return u"%.1f %s" % (hertz, unit) + return u"%s %s" % (hertz, unit) + +regex_control_code = re.compile(r"([\x00-\x1f\x7f])") +controlchars = tuple({ + # Don't use "\0", because "\0"+"0"+"1" = "\001" = "\1" (1 character) + # Same rease to not use octal syntax ("\1") + ord("\n"): r"\n", + ord("\r"): r"\r", + ord("\t"): r"\t", + ord("\a"): r"\a", + ord("\b"): r"\b", + }.get(code, '\\x%02x' % code) + for code in xrange(128) +) + +def makePrintable(data, charset, quote=None, to_unicode=False, smart=True): + r""" + Prepare a string to make it printable in the specified charset. + It escapes control characters. Characters with code bigger than 127 + are escaped if data type is 'str' or if charset is "ASCII". + + Examples with Unicode: + >>> aged = unicode("âgé", "UTF-8") + >>> repr(aged) # text type is 'unicode' + "u'\\xe2g\\xe9'" + >>> makePrintable("abc\0", "UTF-8") + 'abc\\0' + >>> makePrintable(aged, "latin1") + '\xe2g\xe9' + >>> makePrintable(aged, "latin1", quote='"') + '"\xe2g\xe9"' + + Examples with string encoded in latin1: + >>> aged_latin = unicode("âgé", "UTF-8").encode("latin1") + >>> repr(aged_latin) # text type is 'str' + "'\\xe2g\\xe9'" + >>> makePrintable(aged_latin, "latin1") + '\\xe2g\\xe9' + >>> makePrintable("", "latin1") + '' + >>> makePrintable("a", "latin1", quote='"') + '"a"' + >>> makePrintable("", "latin1", quote='"') + '(empty)' + >>> makePrintable("abc", "latin1", quote="'") + "'abc'" + + Control codes: + >>> makePrintable("\0\x03\x0a\x10 \x7f", "latin1") + '\\0\\3\\n\\x10 \\x7f' + + Quote character may also be escaped (only ' and "): + >>> print makePrintable("a\"b", "latin-1", quote='"') + "a\"b" + >>> print makePrintable("a\"b", "latin-1", quote="'") + 'a"b' + >>> print makePrintable("a'b", "latin-1", quote="'") + 'a\'b' + """ + + if data: + if not isinstance(data, unicode): + data = unicode(data, "ISO-8859-1") + charset = "ASCII" + data = regex_control_code.sub( + lambda regs: controlchars[ord(regs.group(1))], data) + if quote: + if quote in "\"'": + data = data.replace(quote, '\\' + quote) + data = ''.join((quote, data, quote)) + elif quote: + data = "(empty)" + data = data.encode(charset, "backslashreplace") + if smart: + # Replace \x00\x01 by \0\1 + data = re.sub(r"\\x0([0-7])(?=[^0-7]|$)", r"\\\1", data) + if to_unicode: + data = unicode(data, charset) + return data + +def makeUnicode(text): + r""" + Convert text to printable Unicode string. For byte string (type 'str'), + use charset ISO-8859-1 for the conversion to Unicode + + >>> makeUnicode(u'abc\0d') + u'abc\\0d' + >>> makeUnicode('a\xe9') + u'a\xe9' + """ + if isinstance(text, str): + text = unicode(text, "ISO-8859-1") + elif not isinstance(text, unicode): + text = unicode(text) + text = regex_control_code.sub( + lambda regs: controlchars[ord(regs.group(1))], text) + text = re.sub(r"\\x0([0-7])(?=[^0-7]|$)", r"\\\1", text) + return text + +def binarySearch(seq, cmp_func): + """ + Search a value in a sequence using binary search. Returns index of the + value, or None if the value doesn't exist. + + 'seq' have to be sorted in ascending order according to the + comparaison function ; + + 'cmp_func', prototype func(x), is the compare function: + - Return strictly positive value if we have to search forward ; + - Return strictly negative value if we have to search backward ; + - Otherwise (zero) we got the value. + + >>> # Search number 5 (search forward) + ... binarySearch([0, 4, 5, 10], lambda x: 5-x) + 2 + >>> # Backward search + ... binarySearch([10, 5, 4, 0], lambda x: x-5) + 1 + """ + lower = 0 + upper = len(seq) + while lower < upper: + index = (lower + upper) >> 1 + diff = cmp_func(seq[index]) + if diff < 0: + upper = index + elif diff > 0: + lower = index + 1 + else: + return index + return None + +def lowerBound(seq, cmp_func): + f = 0 + l = len(seq) + while l > 0: + h = l >> 1 + m = f + h + if cmp_func(seq[m]): + f = m + f += 1 + l -= h + 1 + else: + l = h + return f + +def humanUnixAttributes(mode): + """ + Convert a Unix file attributes (or "file mode") to an unicode string. + + Original source code: + http://cvs.savannah.gnu.org/viewcvs/coreutils/lib/filemode.c?root=coreutils + + >>> humanUnixAttributes(0644) + u'-rw-r--r-- (644)' + >>> humanUnixAttributes(02755) + u'-rwxr-sr-x (2755)' + """ + + def ftypelet(mode): + if stat.S_ISREG (mode) or not stat.S_IFMT(mode): + return '-' + if stat.S_ISBLK (mode): return 'b' + if stat.S_ISCHR (mode): return 'c' + if stat.S_ISDIR (mode): return 'd' + if stat.S_ISFIFO(mode): return 'p' + if stat.S_ISLNK (mode): return 'l' + if stat.S_ISSOCK(mode): return 's' + return '?' + + chars = [ ftypelet(mode), 'r', 'w', 'x', 'r', 'w', 'x', 'r', 'w', 'x' ] + for i in xrange(1, 10): + if not mode & 1 << 9 - i: + chars[i] = '-' + if mode & stat.S_ISUID: + if chars[3] != 'x': + chars[3] = 'S' + else: + chars[3] = 's' + if mode & stat.S_ISGID: + if chars[6] != 'x': + chars[6] = 'S' + else: + chars[6] = 's' + if mode & stat.S_ISVTX: + if chars[9] != 'x': + chars[9] = 'T' + else: + chars[9] = 't' + return u"%s (%o)" % (''.join(chars), mode) + +def createDict(data, index): + """ + Create a new dictionnay from dictionnary key=>values: + just keep value number 'index' from all values. + + >>> data={10: ("dix", 100, "a"), 20: ("vingt", 200, "b")} + >>> createDict(data, 0) + {10: 'dix', 20: 'vingt'} + >>> createDict(data, 2) + {10: 'a', 20: 'b'} + """ + return dict( (key,values[index]) for key, values in data.iteritems() ) + +# Start of UNIX timestamp (Epoch): 1st January 1970 at 00:00 +UNIX_TIMESTAMP_T0 = datetime(1970, 1, 1) + +def timestampUNIX(value): + """ + Convert an UNIX (32-bit) timestamp to datetime object. Timestamp value + is the number of seconds since the 1st January 1970 at 00:00. Maximum + value is 2147483647: 19 january 2038 at 03:14:07. + + May raise ValueError for invalid value: value have to be in 0..2147483647. + + >>> timestampUNIX(0) + datetime.datetime(1970, 1, 1, 0, 0) + >>> timestampUNIX(1154175644) + datetime.datetime(2006, 7, 29, 12, 20, 44) + >>> timestampUNIX(1154175644.37) + datetime.datetime(2006, 7, 29, 12, 20, 44, 370000) + >>> timestampUNIX(2147483647) + datetime.datetime(2038, 1, 19, 3, 14, 7) + """ + if not isinstance(value, (float, int, long)): + raise TypeError("timestampUNIX(): an integer or float is required") + if not(0 <= value <= 2147483647): + raise ValueError("timestampUNIX(): value have to be in 0..2147483647") + return UNIX_TIMESTAMP_T0 + timedelta(seconds=value) + +# Start of Macintosh timestamp: 1st January 1904 at 00:00 +MAC_TIMESTAMP_T0 = datetime(1904, 1, 1) + +def timestampMac32(value): + """ + Convert an Mac (32-bit) timestamp to string. The format is the number + of seconds since the 1st January 1904 (to 2040). Returns unicode string. + + >>> timestampMac32(0) + datetime.datetime(1904, 1, 1, 0, 0) + >>> timestampMac32(2843043290) + datetime.datetime(1994, 2, 2, 14, 14, 50) + """ + if not isinstance(value, (float, int, long)): + raise TypeError("an integer or float is required") + if not(0 <= value <= 4294967295): + return _("invalid Mac timestamp (%s)") % value + return MAC_TIMESTAMP_T0 + timedelta(seconds=value) + +def durationWin64(value): + """ + Convert Windows 64-bit duration to string. The timestamp format is + a 64-bit number: number of 100ns. See also timestampWin64(). + + >>> str(durationWin64(1072580000)) + '0:01:47.258000' + >>> str(durationWin64(2146280000)) + '0:03:34.628000' + """ + if not isinstance(value, (float, int, long)): + raise TypeError("an integer or float is required") + if value < 0: + raise ValueError("value have to be a positive or nul integer") + return timedelta(microseconds=value/10) + +# Start of 64-bit Windows timestamp: 1st January 1600 at 00:00 +WIN64_TIMESTAMP_T0 = datetime(1601, 1, 1, 0, 0, 0) + +def timestampWin64(value): + """ + Convert Windows 64-bit timestamp to string. The timestamp format is + a 64-bit number which represents number of 100ns since the + 1st January 1601 at 00:00. Result is an unicode string. + See also durationWin64(). Maximum date is 28 may 60056. + + >>> timestampWin64(0) + datetime.datetime(1601, 1, 1, 0, 0) + >>> timestampWin64(127840491566710000) + datetime.datetime(2006, 2, 10, 12, 45, 56, 671000) + """ + try: + return WIN64_TIMESTAMP_T0 + durationWin64(value) + except OverflowError: + raise ValueError(_("date newer than year %s (value=%s)") % (MAXYEAR, value)) + +# Start of 60-bit UUID timestamp: 15 October 1582 at 00:00 +UUID60_TIMESTAMP_T0 = datetime(1582, 10, 15, 0, 0, 0) + +def timestampUUID60(value): + """ + Convert UUID 60-bit timestamp to string. The timestamp format is + a 60-bit number which represents number of 100ns since the + the 15 October 1582 at 00:00. Result is an unicode string. + + >>> timestampUUID60(0) + datetime.datetime(1582, 10, 15, 0, 0) + >>> timestampUUID60(130435676263032368) + datetime.datetime(1996, 2, 14, 5, 13, 46, 303236) + """ + if not isinstance(value, (float, int, long)): + raise TypeError("an integer or float is required") + if value < 0: + raise ValueError("value have to be a positive or nul integer") + try: + return UUID60_TIMESTAMP_T0 + timedelta(microseconds=value/10) + except OverflowError: + raise ValueError(_("timestampUUID60() overflow (value=%s)") % value) + +def humanDatetime(value, strip_microsecond=True): + """ + Convert a timestamp to Unicode string: use ISO format with space separator. + + >>> humanDatetime( datetime(2006, 7, 29, 12, 20, 44) ) + u'2006-07-29 12:20:44' + >>> humanDatetime( datetime(2003, 6, 30, 16, 0, 5, 370000) ) + u'2003-06-30 16:00:05' + >>> humanDatetime( datetime(2003, 6, 30, 16, 0, 5, 370000), False ) + u'2003-06-30 16:00:05.370000' + """ + text = unicode(value.isoformat()) + text = text.replace('T', ' ') + if strip_microsecond and "." in text: + text = text.split(".")[0] + return text + +NEWLINES_REGEX = re.compile("\n+") + +def normalizeNewline(text): + r""" + Replace Windows and Mac newlines with Unix newlines. + Replace multiple consecutive newlines with one newline. + + >>> normalizeNewline('a\r\nb') + 'a\nb' + >>> normalizeNewline('a\r\rb') + 'a\nb' + >>> normalizeNewline('a\n\nb') + 'a\nb' + """ + text = text.replace("\r\n", "\n") + text = text.replace("\r", "\n") + return NEWLINES_REGEX.sub("\n", text) + diff --git a/libs/hachoir_core/version.py b/libs/hachoir_core/version.py new file mode 100644 index 0000000..e3506e9 --- /dev/null +++ b/libs/hachoir_core/version.py @@ -0,0 +1,5 @@ +PACKAGE = "hachoir-core" +VERSION = "1.3.4" +WEBSITE = 'http://bitbucket.org/haypo/hachoir/wiki/hachoir-core' +LICENSE = 'GNU GPL v2' + diff --git a/libs/hachoir_metadata/__init__.py b/libs/hachoir_metadata/__init__.py new file mode 100644 index 0000000..5ab4743 --- /dev/null +++ b/libs/hachoir_metadata/__init__.py @@ -0,0 +1,15 @@ +from hachoir_metadata.version import VERSION as __version__ +from hachoir_metadata.metadata import extractMetadata + +# Just import the module, +# each module use registerExtractor() method +import hachoir_metadata.archive +import hachoir_metadata.audio +import hachoir_metadata.file_system +import hachoir_metadata.image +import hachoir_metadata.jpeg +import hachoir_metadata.misc +import hachoir_metadata.program +import hachoir_metadata.riff +import hachoir_metadata.video + diff --git a/libs/hachoir_metadata/archive.py b/libs/hachoir_metadata/archive.py new file mode 100644 index 0000000..7fa39ea --- /dev/null +++ b/libs/hachoir_metadata/archive.py @@ -0,0 +1,166 @@ +from hachoir_metadata.metadata_item import QUALITY_BEST, QUALITY_FASTEST +from hachoir_metadata.safe import fault_tolerant, getValue +from hachoir_metadata.metadata import ( + RootMetadata, Metadata, MultipleMetadata, registerExtractor) +from hachoir_parser.archive import (Bzip2Parser, CabFile, GzipParser, + TarFile, ZipFile, MarFile) +from hachoir_core.tools import humanUnixAttributes +from hachoir_core.i18n import _ + +def maxNbFile(meta): + if meta.quality <= QUALITY_FASTEST: + return 0 + if QUALITY_BEST <= meta.quality: + return None + return 1 + int(10 * meta.quality) + +def computeCompressionRate(meta): + """ + Compute compression rate, sizes have to be in byte. + """ + if not meta.has("file_size") \ + or not meta.get("compr_size", 0): + return + file_size = meta.get("file_size") + if not file_size: + return + meta.compr_rate = float(file_size) / meta.get("compr_size") + +class Bzip2Metadata(RootMetadata): + def extract(self, zip): + if "file" in zip: + self.compr_size = zip["file"].size/8 + +class GzipMetadata(RootMetadata): + def extract(self, gzip): + self.useHeader(gzip) + computeCompressionRate(self) + + @fault_tolerant + def useHeader(self, gzip): + self.compression = gzip["compression"].display + if gzip["mtime"]: + self.last_modification = gzip["mtime"].value + self.os = gzip["os"].display + if gzip["has_filename"].value: + self.filename = getValue(gzip, "filename") + if gzip["has_comment"].value: + self.comment = getValue(gzip, "comment") + self.compr_size = gzip["file"].size/8 + self.file_size = gzip["size"].value + +class ZipMetadata(MultipleMetadata): + def extract(self, zip): + max_nb = maxNbFile(self) + for index, field in enumerate(zip.array("file")): + if max_nb is not None and max_nb <= index: + self.warning("ZIP archive contains many files, but only first %s files are processed" % max_nb) + break + self.processFile(field) + + @fault_tolerant + def processFile(self, field): + meta = Metadata(self) + meta.filename = field["filename"].value + meta.creation_date = field["last_mod"].value + meta.compression = field["compression"].display + if "data_desc" in field: + meta.file_size = field["data_desc/file_uncompressed_size"].value + if field["data_desc/file_compressed_size"].value: + meta.compr_size = field["data_desc/file_compressed_size"].value + else: + meta.file_size = field["uncompressed_size"].value + if field["compressed_size"].value: + meta.compr_size = field["compressed_size"].value + computeCompressionRate(meta) + self.addGroup(field.name, meta, "File \"%s\"" % meta.get('filename')) + +class TarMetadata(MultipleMetadata): + def extract(self, tar): + max_nb = maxNbFile(self) + for index, field in enumerate(tar.array("file")): + if max_nb is not None and max_nb <= index: + self.warning("TAR archive contains many files, but only first %s files are processed" % max_nb) + break + meta = Metadata(self) + self.extractFile(field, meta) + if meta.has("filename"): + title = _('File "%s"') % meta.getText('filename') + else: + title = _("File") + self.addGroup(field.name, meta, title) + + @fault_tolerant + def extractFile(self, field, meta): + meta.filename = field["name"].value + meta.file_attr = humanUnixAttributes(field.getOctal("mode")) + meta.file_size = field.getOctal("size") + try: + if field.getOctal("mtime"): + meta.last_modification = field.getDatetime() + except ValueError: + pass + meta.file_type = field["type"].display + meta.author = "%s (uid=%s), group %s (gid=%s)" %\ + (field["uname"].value, field.getOctal("uid"), + field["gname"].value, field.getOctal("gid")) + + +class CabMetadata(MultipleMetadata): + def extract(self, cab): + if "folder[0]" in cab: + self.useFolder(cab["folder[0]"]) + self.format_version = "Microsoft Cabinet version %s" % cab["cab_version"].display + self.comment = "%s folders, %s files" % ( + cab["nb_folder"].value, cab["nb_files"].value) + max_nb = maxNbFile(self) + for index, field in enumerate(cab.array("file")): + if max_nb is not None and max_nb <= index: + self.warning("CAB archive contains many files, but only first %s files are processed" % max_nb) + break + self.useFile(field) + + @fault_tolerant + def useFolder(self, folder): + compr = folder["compr_method"].display + if folder["compr_method"].value != 0: + compr += " (level %u)" % folder["compr_level"].value + self.compression = compr + + @fault_tolerant + def useFile(self, field): + meta = Metadata(self) + meta.filename = field["filename"].value + meta.file_size = field["filesize"].value + meta.creation_date = field["timestamp"].value + attr = field["attributes"].value + if attr != "(none)": + meta.file_attr = attr + if meta.has("filename"): + title = _("File \"%s\"") % meta.getText('filename') + else: + title = _("File") + self.addGroup(field.name, meta, title) + +class MarMetadata(MultipleMetadata): + def extract(self, mar): + self.comment = "Contains %s files" % mar["nb_file"].value + self.format_version = "Microsoft Archive version %s" % mar["version"].value + max_nb = maxNbFile(self) + for index, field in enumerate(mar.array("file")): + if max_nb is not None and max_nb <= index: + self.warning("MAR archive contains many files, but only first %s files are processed" % max_nb) + break + meta = Metadata(self) + meta.filename = field["filename"].value + meta.compression = "None" + meta.file_size = field["filesize"].value + self.addGroup(field.name, meta, "File \"%s\"" % meta.getText('filename')) + +registerExtractor(CabFile, CabMetadata) +registerExtractor(GzipParser, GzipMetadata) +registerExtractor(Bzip2Parser, Bzip2Metadata) +registerExtractor(TarFile, TarMetadata) +registerExtractor(ZipFile, ZipMetadata) +registerExtractor(MarFile, MarMetadata) + diff --git a/libs/hachoir_metadata/audio.py b/libs/hachoir_metadata/audio.py new file mode 100644 index 0000000..566613e --- /dev/null +++ b/libs/hachoir_metadata/audio.py @@ -0,0 +1,406 @@ +from hachoir_metadata.metadata import (registerExtractor, + Metadata, RootMetadata, MultipleMetadata) +from hachoir_parser.audio import AuFile, MpegAudioFile, RealAudioFile, AiffFile, FlacParser +from hachoir_parser.container import OggFile, RealMediaFile +from hachoir_core.i18n import _ +from hachoir_core.tools import makePrintable, timedelta2seconds, humanBitRate +from datetime import timedelta +from hachoir_metadata.metadata_item import QUALITY_FAST, QUALITY_NORMAL, QUALITY_BEST +from hachoir_metadata.safe import fault_tolerant, getValue + +def computeComprRate(meta, size): + if not meta.has("duration") \ + or not meta.has("sample_rate") \ + or not meta.has("bits_per_sample") \ + or not meta.has("nb_channel") \ + or not size: + return + orig_size = timedelta2seconds(meta.get("duration")) * meta.get('sample_rate') * meta.get('bits_per_sample') * meta.get('nb_channel') + meta.compr_rate = float(orig_size) / size + +def computeBitRate(meta): + if not meta.has("bits_per_sample") \ + or not meta.has("nb_channel") \ + or not meta.has("sample_rate"): + return + meta.bit_rate = meta.get('bits_per_sample') * meta.get('nb_channel') * meta.get('sample_rate') + +VORBIS_KEY_TO_ATTR = { + "ARTIST": "artist", + "ALBUM": "album", + "TRACKNUMBER": "track_number", + "TRACKTOTAL": "track_total", + "ENCODER": "producer", + "TITLE": "title", + "LOCATION": "location", + "DATE": "creation_date", + "ORGANIZATION": "organization", + "GENRE": "music_genre", + "": "comment", + "COMPOSER": "music_composer", + "DESCRIPTION": "comment", + "COMMENT": "comment", + "WWW": "url", + "WOAF": "url", + "LICENSE": "copyright", +} + +@fault_tolerant +def readVorbisComment(metadata, comment): + metadata.producer = getValue(comment, "vendor") + for item in comment.array("metadata"): + if "=" in item.value: + key, value = item.value.split("=", 1) + key = key.upper() + if key in VORBIS_KEY_TO_ATTR: + key = VORBIS_KEY_TO_ATTR[key] + setattr(metadata, key, value) + elif value: + metadata.warning("Skip Vorbis comment %s: %s" % (key, value)) + +class OggMetadata(MultipleMetadata): + def extract(self, ogg): + granule_quotient = None + for index, page in enumerate(ogg.array("page")): + if "segments" not in page: + continue + page = page["segments"] + if "vorbis_hdr" in page: + meta = Metadata(self) + self.vorbisHeader(page["vorbis_hdr"], meta) + self.addGroup("audio[]", meta, "Audio") + if not granule_quotient and meta.has("sample_rate"): + granule_quotient = meta.get('sample_rate') + if "theora_hdr" in page: + meta = Metadata(self) + self.theoraHeader(page["theora_hdr"], meta) + self.addGroup("video[]", meta, "Video") + if "video_hdr" in page: + meta = Metadata(self) + self.videoHeader(page["video_hdr"], meta) + self.addGroup("video[]", meta, "Video") + if not granule_quotient and meta.has("frame_rate"): + granule_quotient = meta.get('frame_rate') + if "comment" in page: + readVorbisComment(self, page["comment"]) + if 3 <= index: + # Only process pages 0..3 + break + + # Compute duration + if granule_quotient and QUALITY_NORMAL <= self.quality: + page = ogg.createLastPage() + if page and "abs_granule_pos" in page: + try: + self.duration = timedelta(seconds=float(page["abs_granule_pos"].value) / granule_quotient) + except OverflowError: + pass + + def videoHeader(self, header, meta): + meta.compression = header["fourcc"].display + meta.width = header["width"].value + meta.height = header["height"].value + meta.bits_per_pixel = header["bits_per_sample"].value + if header["time_unit"].value: + meta.frame_rate = 10000000.0 / header["time_unit"].value + + def theoraHeader(self, header, meta): + meta.compression = "Theora" + meta.format_version = "Theora version %u.%u (revision %u)" % (\ + header["version_major"].value, + header["version_minor"].value, + header["version_revision"].value) + meta.width = header["frame_width"].value + meta.height = header["frame_height"].value + if header["fps_den"].value: + meta.frame_rate = float(header["fps_num"].value) / header["fps_den"].value + if header["aspect_ratio_den"].value: + meta.aspect_ratio = float(header["aspect_ratio_num"].value) / header["aspect_ratio_den"].value + meta.pixel_format = header["pixel_format"].display + meta.comment = "Quality: %s" % header["quality"].value + + def vorbisHeader(self, header, meta): + meta.compression = u"Vorbis" + meta.sample_rate = header["audio_sample_rate"].value + meta.nb_channel = header["audio_channels"].value + meta.format_version = u"Vorbis version %s" % header["vorbis_version"].value + meta.bit_rate = header["bitrate_nominal"].value + +class AuMetadata(RootMetadata): + def extract(self, audio): + self.sample_rate = audio["sample_rate"].value + self.nb_channel = audio["channels"].value + self.compression = audio["codec"].display + if "info" in audio: + self.comment = audio["info"].value + self.bits_per_sample = audio.getBitsPerSample() + computeBitRate(self) + if "audio_data" in audio: + if self.has("bit_rate"): + self.duration = timedelta(seconds=float(audio["audio_data"].size) / self.get('bit_rate')) + computeComprRate(self, audio["audio_data"].size) + +class RealAudioMetadata(RootMetadata): + FOURCC_TO_BITRATE = { + u"28_8": 15200, # 28.8 kbit/sec (audio bit rate: 15.2 kbit/s) + u"14_4": 8000, # 14.4 kbit/sec + u"lpcJ": 8000, # 14.4 kbit/sec + } + + def extract(self, real): + version = real["version"].value + if "metadata" in real: + self.useMetadata(real["metadata"]) + self.useRoot(real) + self.format_version = "Real audio version %s" % version + if version == 3: + size = getValue(real, "data_size") + elif "filesize" in real and "headersize" in real: + size = (real["filesize"].value + 40) - (real["headersize"].value + 16) + else: + size = None + if size: + size *= 8 + if self.has("bit_rate"): + sec = float(size) / self.get('bit_rate') + self.duration = timedelta(seconds=sec) + computeComprRate(self, size) + + @fault_tolerant + def useMetadata(self, info): + self.title = info["title"].value + self.author = info["author"].value + self.copyright = info["copyright"].value + self.comment = info["comment"].value + + @fault_tolerant + def useRoot(self, real): + self.bits_per_sample = 16 # FIXME: Is that correct? + if real["version"].value != 3: + self.sample_rate = real["sample_rate"].value + self.nb_channel = real["channels"].value + else: + self.sample_rate = 8000 + self.nb_channel = 1 + fourcc = getValue(real, "FourCC") + if fourcc: + self.compression = fourcc + try: + self.bit_rate = self.FOURCC_TO_BITRATE[fourcc] + except LookupError: + pass + +class RealMediaMetadata(MultipleMetadata): + KEY_TO_ATTR = { + "generated by": "producer", + "creation date": "creation_date", + "modification date": "last_modification", + "description": "comment", + } + + def extract(self, media): + if "file_prop" in media: + self.useFileProp(media["file_prop"]) + if "content_desc" in media: + self.useContentDesc(media["content_desc"]) + for index, stream in enumerate(media.array("stream_prop")): + self.useStreamProp(stream, index) + + @fault_tolerant + def useFileInfoProp(self, prop): + key = prop["name"].value.lower() + value = prop["value"].value + if key in self.KEY_TO_ATTR: + setattr(self, self.KEY_TO_ATTR[key], value) + elif value: + self.warning("Skip %s: %s" % (prop["name"].value, value)) + + @fault_tolerant + def useFileProp(self, prop): + self.bit_rate = prop["avg_bit_rate"].value + self.duration = timedelta(milliseconds=prop["duration"].value) + + @fault_tolerant + def useContentDesc(self, content): + self.title = content["title"].value + self.author = content["author"].value + self.copyright = content["copyright"].value + self.comment = content["comment"].value + + @fault_tolerant + def useStreamProp(self, stream, index): + meta = Metadata(self) + meta.comment = "Start: %s" % stream["stream_start"].value + if getValue(stream, "mime_type") == "logical-fileinfo": + for prop in stream.array("file_info/prop"): + self.useFileInfoProp(prop) + else: + meta.bit_rate = stream["avg_bit_rate"].value + meta.duration = timedelta(milliseconds=stream["duration"].value) + meta.mime_type = getValue(stream, "mime_type") + meta.title = getValue(stream, "desc") + self.addGroup("stream[%u]" % index, meta, "Stream #%u" % (1+index)) + +class MpegAudioMetadata(RootMetadata): + TAG_TO_KEY = { + # ID3 version 2.2 + "TP1": "author", + "COM": "comment", + "TEN": "producer", + "TRK": "track_number", + "TAL": "album", + "TT2": "title", + "TYE": "creation_date", + "TCO": "music_genre", + + # ID3 version 2.3+ + "TPE1": "author", + "COMM": "comment", + "TENC": "producer", + "TRCK": "track_number", + "TALB": "album", + "TIT2": "title", + "TYER": "creation_date", + "WXXX": "url", + "TCON": "music_genre", + "TLAN": "language", + "TCOP": "copyright", + "TDAT": "creation_date", + "TRDA": "creation_date", + "TORY": "creation_date", + "TIT1": "title", + } + + def processID3v2(self, field): + # Read value + if "content" not in field: + return + content = field["content"] + if "text" not in content: + return + if "title" in content and content["title"].value: + value = "%s: %s" % (content["title"].value, content["text"].value) + else: + value = content["text"].value + + # Known tag? + tag = field["tag"].value + if tag not in self.TAG_TO_KEY: + if tag: + if isinstance(tag, str): + tag = makePrintable(tag, "ISO-8859-1", to_unicode=True) + self.warning("Skip ID3v2 tag %s: %s" % (tag, value)) + return + key = self.TAG_TO_KEY[tag] + setattr(self, key, value) + + def readID3v2(self, id3): + for field in id3: + if field.is_field_set and "tag" in field: + self.processID3v2(field) + + def extract(self, mp3): + if "/frames/frame[0]" in mp3: + frame = mp3["/frames/frame[0]"] + self.nb_channel = (frame.getNbChannel(), frame["channel_mode"].display) + self.format_version = u"MPEG version %s layer %s" % \ + (frame["version"].display, frame["layer"].display) + self.sample_rate = frame.getSampleRate() + self.bits_per_sample = 16 + if mp3["frames"].looksConstantBitRate(): + self.computeBitrate(frame) + else: + self.computeVariableBitrate(mp3) + if "id3v1" in mp3: + id3 = mp3["id3v1"] + self.comment = id3["comment"].value + self.author = id3["author"].value + self.title = id3["song"].value + self.album = id3["album"].value + if id3["year"].value != "0": + self.creation_date = id3["year"].value + if "track_nb" in id3: + self.track_number = id3["track_nb"].value + if "id3v2" in mp3: + self.readID3v2(mp3["id3v2"]) + if "frames" in mp3: + computeComprRate(self, mp3["frames"].size) + + def computeBitrate(self, frame): + bit_rate = frame.getBitRate() # may returns None on error + if not bit_rate: + return + self.bit_rate = (bit_rate, _("%s (constant)") % humanBitRate(bit_rate)) + self.duration = timedelta(seconds=float(frame["/frames"].size) / bit_rate) + + def computeVariableBitrate(self, mp3): + if self.quality <= QUALITY_FAST: + return + count = 0 + if QUALITY_BEST <= self.quality: + self.warning("Process all MPEG audio frames to compute exact duration") + max_count = None + else: + max_count = 500 * self.quality + total_bit_rate = 0.0 + for index, frame in enumerate(mp3.array("frames/frame")): + if index < 3: + continue + bit_rate = frame.getBitRate() + if bit_rate: + total_bit_rate += float(bit_rate) + count += 1 + if max_count and max_count <= count: + break + if not count: + return + bit_rate = total_bit_rate / count + self.bit_rate = (bit_rate, + _("%s (Variable bit rate)") % humanBitRate(bit_rate)) + duration = timedelta(seconds=float(mp3["frames"].size) / bit_rate) + self.duration = duration + +class AiffMetadata(RootMetadata): + def extract(self, aiff): + if "common" in aiff: + self.useCommon(aiff["common"]) + computeBitRate(self) + + @fault_tolerant + def useCommon(self, info): + self.nb_channel = info["nb_channel"].value + self.bits_per_sample = info["sample_size"].value + self.sample_rate = getValue(info, "sample_rate") + if self.has("sample_rate"): + rate = self.get("sample_rate") + if rate: + sec = float(info["nb_sample"].value) / rate + self.duration = timedelta(seconds=sec) + if "codec" in info: + self.compression = info["codec"].display + +class FlacMetadata(RootMetadata): + def extract(self, flac): + if "metadata/stream_info/content" in flac: + self.useStreamInfo(flac["metadata/stream_info/content"]) + if "metadata/comment/content" in flac: + readVorbisComment(self, flac["metadata/comment/content"]) + + @fault_tolerant + def useStreamInfo(self, info): + self.nb_channel = info["nb_channel"].value + 1 + self.bits_per_sample = info["bits_per_sample"].value + 1 + self.sample_rate = info["sample_hertz"].value + sec = info["total_samples"].value + if sec: + sec = float(sec) / info["sample_hertz"].value + self.duration = timedelta(seconds=sec) + +registerExtractor(AuFile, AuMetadata) +registerExtractor(MpegAudioFile, MpegAudioMetadata) +registerExtractor(OggFile, OggMetadata) +registerExtractor(RealMediaFile, RealMediaMetadata) +registerExtractor(RealAudioFile, RealAudioMetadata) +registerExtractor(AiffFile, AiffMetadata) +registerExtractor(FlacParser, FlacMetadata) + diff --git a/libs/hachoir_metadata/config.py b/libs/hachoir_metadata/config.py new file mode 100644 index 0000000..c45d6a1 --- /dev/null +++ b/libs/hachoir_metadata/config.py @@ -0,0 +1,2 @@ +MAX_STR_LENGTH = 300 # characters +RAW_OUTPUT = False diff --git a/libs/hachoir_metadata/file_system.py b/libs/hachoir_metadata/file_system.py new file mode 100644 index 0000000..b111c48 --- /dev/null +++ b/libs/hachoir_metadata/file_system.py @@ -0,0 +1,28 @@ +from hachoir_metadata.metadata import RootMetadata, registerExtractor +from hachoir_metadata.safe import fault_tolerant +from hachoir_parser.file_system import ISO9660 +from datetime import datetime + +class ISO9660_Metadata(RootMetadata): + def extract(self, iso): + desc = iso['volume[0]/content'] + self.title = desc['volume_id'].value + self.title = desc['vol_set_id'].value + self.author = desc['publisher'].value + self.author = desc['data_preparer'].value + self.producer = desc['application'].value + self.copyright = desc['copyright'].value + self.readTimestamp('creation_date', desc['creation_ts'].value) + self.readTimestamp('last_modification', desc['modification_ts'].value) + + @fault_tolerant + def readTimestamp(self, key, value): + if value.startswith("0000"): + return + value = datetime( + int(value[0:4]), int(value[4:6]), int(value[6:8]), + int(value[8:10]), int(value[10:12]), int(value[12:14])) + setattr(self, key, value) + +registerExtractor(ISO9660, ISO9660_Metadata) + diff --git a/libs/hachoir_metadata/filter.py b/libs/hachoir_metadata/filter.py new file mode 100644 index 0000000..b4af8e3 --- /dev/null +++ b/libs/hachoir_metadata/filter.py @@ -0,0 +1,52 @@ +from hachoir_metadata.timezone import UTC +from datetime import date, datetime + +# Year in 1850..2030 +MIN_YEAR = 1850 +MAX_YEAR = 2030 + +class Filter: + def __init__(self, valid_types, min=None, max=None): + self.types = valid_types + self.min = min + self.max = max + + def __call__(self, value): + if not isinstance(value, self.types): + return True + if self.min is not None and value < self.min: + return False + if self.max is not None and self.max < value: + return False + return True + +class NumberFilter(Filter): + def __init__(self, min=None, max=None): + Filter.__init__(self, (int, long, float), min, max) + +class DatetimeFilter(Filter): + def __init__(self, min=None, max=None): + Filter.__init__(self, (date, datetime), + datetime(MIN_YEAR, 1, 1), + datetime(MAX_YEAR, 12, 31)) + self.min_date = date(MIN_YEAR, 1, 1) + self.max_date = date(MAX_YEAR, 12, 31) + self.min_tz = datetime(MIN_YEAR, 1, 1, tzinfo=UTC) + self.max_tz = datetime(MAX_YEAR, 12, 31, tzinfo=UTC) + + def __call__(self, value): + """ + Use different min/max values depending on value type + (datetime with timezone, datetime or date). + """ + if not isinstance(value, self.types): + return True + if hasattr(value, "tzinfo") and value.tzinfo: + return (self.min_tz <= value <= self.max_tz) + elif isinstance(value, datetime): + return (self.min <= value <= self.max) + else: + return (self.min_date <= value <= self.max_date) + +DATETIME_FILTER = DatetimeFilter() + diff --git a/libs/hachoir_metadata/formatter.py b/libs/hachoir_metadata/formatter.py new file mode 100644 index 0000000..0d04f92 --- /dev/null +++ b/libs/hachoir_metadata/formatter.py @@ -0,0 +1,25 @@ +from hachoir_core.i18n import _, ngettext + +NB_CHANNEL_NAME = {1: _("mono"), 2: _("stereo")} + +def humanAudioChannel(value): + return NB_CHANNEL_NAME.get(value, unicode(value)) + +def humanFrameRate(value): + if isinstance(value, (int, long, float)): + return _("%.1f fps") % value + else: + return value + +def humanComprRate(rate): + return u"%.1fx" % rate + +def humanAltitude(value): + return ngettext("%.1f meter", "%.1f meters", value) % value + +def humanPixelSize(value): + return ngettext("%s pixel", "%s pixels", value) % value + +def humanDPI(value): + return u"%s DPI" % value + diff --git a/libs/hachoir_metadata/image.py b/libs/hachoir_metadata/image.py new file mode 100644 index 0000000..905cdd7 --- /dev/null +++ b/libs/hachoir_metadata/image.py @@ -0,0 +1,299 @@ +from hachoir_metadata.metadata import (registerExtractor, + Metadata, RootMetadata, MultipleMetadata) +from hachoir_parser.image import ( + BmpFile, IcoFile, PcxFile, GifFile, PngFile, TiffFile, + XcfFile, TargaFile, WMF_File, PsdFile) +from hachoir_parser.image.png import getBitsPerPixel as pngBitsPerPixel +from hachoir_parser.image.xcf import XcfProperty +from hachoir_core.i18n import _ +from hachoir_metadata.safe import fault_tolerant + +def computeComprRate(meta, compr_size): + """ + Compute image compression rate. Skip size of color palette, focus on + image pixels. Original size is width x height x bpp. Compressed size + is an argument (in bits). + + Set "compr_data" with a string like "1.52x". + """ + if not meta.has("width") \ + or not meta.has("height") \ + or not meta.has("bits_per_pixel"): + return + if not compr_size: + return + orig_size = meta.get('width') * meta.get('height') * meta.get('bits_per_pixel') + meta.compr_rate = float(orig_size) / compr_size + +class BmpMetadata(RootMetadata): + def extract(self, image): + if "header" not in image: + return + hdr = image["header"] + self.width = hdr["width"].value + self.height = hdr["height"].value + bpp = hdr["bpp"].value + if bpp: + if bpp <= 8 and "used_colors" in hdr: + self.nb_colors = hdr["used_colors"].value + self.bits_per_pixel = bpp + self.compression = hdr["compression"].display + self.format_version = u"Microsoft Bitmap version %s" % hdr.getFormatVersion() + + self.width_dpi = hdr["horizontal_dpi"].value + self.height_dpi = hdr["vertical_dpi"].value + + if "pixels" in image: + computeComprRate(self, image["pixels"].size) + +class TiffMetadata(RootMetadata): + key_to_attr = { + "img_width": "width", + "img_height": "width", + + # TODO: Enable that (need link to value) +# "description": "comment", +# "doc_name": "title", +# "orientation": "image_orientation", + } + def extract(self, tiff): + if "ifd" in tiff: + self.useIFD(tiff["ifd"]) + + def useIFD(self, ifd): + for field in ifd: + try: + attrname = self.key_to_attr[field.name] + except KeyError: + continue + if "value" not in field: + continue + value = field["value"].value + setattr(self, attrname, value) + +class IcoMetadata(MultipleMetadata): + color_to_bpp = { + 2: 1, + 16: 4, + 256: 8 + } + + def extract(self, icon): + for index, header in enumerate(icon.array("icon_header")): + image = Metadata(self) + + # Read size and colors from header + image.width = header["width"].value + image.height = header["height"].value + bpp = header["bpp"].value + nb_colors = header["nb_color"].value + if nb_colors != 0: + image.nb_colors = nb_colors + if bpp == 0 and nb_colors in self.color_to_bpp: + bpp = self.color_to_bpp[nb_colors] + elif bpp == 0: + bpp = 8 + image.bits_per_pixel = bpp + image.setHeader(_("Icon #%u (%sx%s)") + % (1+index, image.get("width", "?"), image.get("height", "?"))) + + # Read compression from data (if available) + key = "icon_data[%u]/header/codec" % index + if key in icon: + image.compression = icon[key].display + key = "icon_data[%u]/pixels" % index + if key in icon: + computeComprRate(image, icon[key].size) + + # Store new image + self.addGroup("image[%u]" % index, image) + +class PcxMetadata(RootMetadata): + @fault_tolerant + def extract(self, pcx): + self.width = 1 + pcx["xmax"].value + self.height = 1 + pcx["ymax"].value + self.width_dpi = pcx["horiz_dpi"].value + self.height_dpi = pcx["vert_dpi"].value + self.bits_per_pixel = pcx["bpp"].value + if 1 <= pcx["bpp"].value <= 8: + self.nb_colors = 2 ** pcx["bpp"].value + self.compression = _("Run-length encoding (RLE)") + self.format_version = "PCX: %s" % pcx["version"].display + if "image_data" in pcx: + computeComprRate(self, pcx["image_data"].size) + +class XcfMetadata(RootMetadata): + # Map image type to bits/pixel + TYPE_TO_BPP = {0: 24, 1: 8, 2: 8} + + def extract(self, xcf): + self.width = xcf["width"].value + self.height = xcf["height"].value + try: + self.bits_per_pixel = self.TYPE_TO_BPP[ xcf["type"].value ] + except KeyError: + pass + self.format_version = xcf["type"].display + self.readProperties(xcf) + + @fault_tolerant + def processProperty(self, prop): + type = prop["type"].value + if type == XcfProperty.PROP_PARASITES: + for field in prop["data"]: + if "name" not in field or "data" not in field: + continue + if field["name"].value == "gimp-comment": + self.comment = field["data"].value + elif type == XcfProperty.PROP_COMPRESSION: + self.compression = prop["data/compression"].display + elif type == XcfProperty.PROP_RESOLUTION: + self.width_dpi = int(prop["data/xres"].value) + self.height_dpi = int(prop["data/yres"].value) + + def readProperties(self, xcf): + for prop in xcf.array("property"): + self.processProperty(prop) + +class PngMetadata(RootMetadata): + TEXT_TO_ATTR = { + "software": "producer", + } + + def extract(self, png): + if "header" in png: + self.useHeader(png["header"]) + if "time" in png: + self.useTime(png["time"]) + if "physical" in png: + self.usePhysical(png["physical"]) + for comment in png.array("text"): + if "text" not in comment: + continue + keyword = comment["keyword"].value + text = comment["text"].value + try: + key = self.TEXT_TO_ATTR[keyword.lower()] + setattr(self, key, text) + except KeyError: + if keyword.lower() != "comment": + self.comment = "%s=%s" % (keyword, text) + else: + self.comment = text + compr_size = sum( data.size for data in png.array("data") ) + computeComprRate(self, compr_size) + + @fault_tolerant + def useTime(self, field): + self.creation_date = field.value + + @fault_tolerant + def usePhysical(self, field): + self.width_dpi = field["pixel_per_unit_x"].value + self.height_dpi = field["pixel_per_unit_y"].value + + @fault_tolerant + def useHeader(self, header): + self.width = header["width"].value + self.height = header["height"].value + + # Read number of colors and pixel format + if "/palette/size" in header: + nb_colors = header["/palette/size"].value // 3 + else: + nb_colors = None + if not header["has_palette"].value: + if header["has_alpha"].value: + self.pixel_format = _("RGBA") + else: + self.pixel_format = _("RGB") + elif "/transparency" in header: + self.pixel_format = _("Color index with transparency") + if nb_colors: + nb_colors -= 1 + else: + self.pixel_format = _("Color index") + self.bits_per_pixel = pngBitsPerPixel(header) + if nb_colors: + self.nb_colors = nb_colors + + # Read compression, timestamp, etc. + self.compression = header["compression"].display + +class GifMetadata(RootMetadata): + def extract(self, gif): + self.useScreen(gif["/screen"]) + if self.has("bits_per_pixel"): + self.nb_colors = (1 << self.get('bits_per_pixel')) + self.compression = _("LZW") + self.format_version = "GIF version %s" % gif["version"].value + for comments in gif.array("comments"): + for comment in gif.array(comments.name + "/comment"): + self.comment = comment.value + if "graphic_ctl/has_transp" in gif and gif["graphic_ctl/has_transp"].value: + self.pixel_format = _("Color index with transparency") + else: + self.pixel_format = _("Color index") + + @fault_tolerant + def useScreen(self, screen): + self.width = screen["width"].value + self.height = screen["height"].value + self.bits_per_pixel = (1 + screen["bpp"].value) + +class TargaMetadata(RootMetadata): + def extract(self, tga): + self.width = tga["width"].value + self.height = tga["height"].value + self.bits_per_pixel = tga["bpp"].value + if tga["nb_color"].value: + self.nb_colors = tga["nb_color"].value + self.compression = tga["codec"].display + if "pixels" in tga: + computeComprRate(self, tga["pixels"].size) + +class WmfMetadata(RootMetadata): + def extract(self, wmf): + if wmf.isAPM(): + if "amf_header/rect" in wmf: + rect = wmf["amf_header/rect"] + self.width = (rect["right"].value - rect["left"].value) + self.height = (rect["bottom"].value - rect["top"].value) + self.bits_per_pixel = 24 + elif wmf.isEMF(): + emf = wmf["emf_header"] + if "description" in emf: + desc = emf["description"].value + if "\0" in desc: + self.producer, self.title = desc.split("\0", 1) + else: + self.producer = desc + if emf["nb_colors"].value: + self.nb_colors = emf["nb_colors"].value + self.bits_per_pixel = 8 + else: + self.bits_per_pixel = 24 + self.width = emf["width_px"].value + self.height = emf["height_px"].value + +class PsdMetadata(RootMetadata): + @fault_tolerant + def extract(self, psd): + self.width = psd["width"].value + self.height = psd["height"].value + self.bits_per_pixel = psd["depth"].value * psd["nb_channels"].value + self.pixel_format = psd["color_mode"].display + self.compression = psd["compression"].display + +registerExtractor(IcoFile, IcoMetadata) +registerExtractor(GifFile, GifMetadata) +registerExtractor(XcfFile, XcfMetadata) +registerExtractor(TargaFile, TargaMetadata) +registerExtractor(PcxFile, PcxMetadata) +registerExtractor(BmpFile, BmpMetadata) +registerExtractor(PngFile, PngMetadata) +registerExtractor(TiffFile, TiffMetadata) +registerExtractor(WMF_File, WmfMetadata) +registerExtractor(PsdFile, PsdMetadata) + diff --git a/libs/hachoir_metadata/jpeg.py b/libs/hachoir_metadata/jpeg.py new file mode 100644 index 0000000..29247dc --- /dev/null +++ b/libs/hachoir_metadata/jpeg.py @@ -0,0 +1,289 @@ +from hachoir_metadata.metadata import RootMetadata, registerExtractor +from hachoir_metadata.image import computeComprRate +from hachoir_parser.image.exif import ExifEntry +from hachoir_parser.image.jpeg import ( + JpegFile, JpegChunk, + QUALITY_HASH_COLOR, QUALITY_SUM_COLOR, + QUALITY_HASH_GRAY, QUALITY_SUM_GRAY) +from hachoir_core.field import MissingField +from hachoir_core.i18n import _ +from hachoir_core.tools import makeUnicode +from hachoir_metadata.safe import fault_tolerant +from datetime import datetime + +def deg2float(degree, minute, second): + return degree + (float(minute) + float(second) / 60.0) / 60.0 + +class JpegMetadata(RootMetadata): + EXIF_KEY = { + # Exif metadatas + ExifEntry.TAG_CAMERA_MANUFACTURER: "camera_manufacturer", + ExifEntry.TAG_CAMERA_MODEL: "camera_model", + ExifEntry.TAG_ORIENTATION: "image_orientation", + ExifEntry.TAG_EXPOSURE: "camera_exposure", + ExifEntry.TAG_FOCAL: "camera_focal", + ExifEntry.TAG_BRIGHTNESS: "camera_brightness", + ExifEntry.TAG_APERTURE: "camera_aperture", + + # Generic metadatas + ExifEntry.TAG_IMG_TITLE: "title", + ExifEntry.TAG_SOFTWARE: "producer", + ExifEntry.TAG_FILE_TIMESTAMP: "creation_date", + ExifEntry.TAG_WIDTH: "width", + ExifEntry.TAG_HEIGHT: "height", + ExifEntry.TAG_USER_COMMENT: "comment", + } + + IPTC_KEY = { + 80: "author", + 90: "city", + 101: "country", + 116: "copyright", + 120: "title", + 231: "comment", + } + + orientation_name = { + 1: _('Horizontal (normal)'), + 2: _('Mirrored horizontal'), + 3: _('Rotated 180'), + 4: _('Mirrored vertical'), + 5: _('Mirrored horizontal then rotated 90 counter-clock-wise'), + 6: _('Rotated 90 clock-wise'), + 7: _('Mirrored horizontal then rotated 90 clock-wise'), + 8: _('Rotated 90 counter clock-wise'), + } + + def extract(self, jpeg): + if "start_frame/content" in jpeg: + self.startOfFrame(jpeg["start_frame/content"]) + elif "start_scan/content/nr_components" in jpeg: + self.bits_per_pixel = 8 * jpeg["start_scan/content/nr_components"].value + if "app0/content" in jpeg: + self.extractAPP0(jpeg["app0/content"]) + + if "exif/content" in jpeg: + for ifd in jpeg.array("exif/content/ifd"): + for entry in ifd.array("entry"): + self.processIfdEntry(ifd, entry) + self.readGPS(ifd) + if "photoshop/content" in jpeg: + psd = jpeg["photoshop/content"] + if "version/content/reader_name" in psd: + self.producer = psd["version/content/reader_name"].value + if "iptc/content" in psd: + self.parseIPTC(psd["iptc/content"]) + for field in jpeg.array("comment"): + if "content/comment" in field: + self.comment = field["content/comment"].value + self.computeQuality(jpeg) + if "data" in jpeg: + computeComprRate(self, jpeg["data"].size) + if not self.has("producer") and "photoshop" in jpeg: + self.producer = u"Adobe Photoshop" + if self.has("compression"): + self.compression = "JPEG" + + @fault_tolerant + def startOfFrame(self, sof): + # Set compression method + key = sof["../type"].value + self.compression = "JPEG (%s)" % JpegChunk.START_OF_FRAME[key] + + # Read image size and bits/pixel + self.width = sof["width"].value + self.height = sof["height"].value + nb_components = sof["nr_components"].value + self.bits_per_pixel = 8 * nb_components + if nb_components == 3: + self.pixel_format = _("YCbCr") + elif nb_components == 1: + self.pixel_format = _("Grayscale") + self.nb_colors = 256 + + @fault_tolerant + def computeQuality(self, jpeg): + # This function is an adaption to Python of ImageMagick code + # to compute JPEG quality using quantization tables + + # Read quantization tables + qtlist = [] + for dqt in jpeg.array("quantization"): + for qt in dqt.array("content/qt"): + # TODO: Take care of qt["index"].value? + qtlist.append(qt) + if not qtlist: + return + + # Compute sum of all coefficients + sumcoeff = 0 + for qt in qtlist: + coeff = qt.array("coeff") + for index in xrange(64): + sumcoeff += coeff[index].value + + # Choose the right quality table and compute hash value + try: + hashval= qtlist[0]["coeff[2]"].value + qtlist[0]["coeff[53]"].value + if 2 <= len(qtlist): + hashval += qtlist[1]["coeff[0]"].value + qtlist[1]["coeff[63]"].value + hashtable = QUALITY_HASH_COLOR + sumtable = QUALITY_SUM_COLOR + else: + hashtable = QUALITY_HASH_GRAY + sumtable = QUALITY_SUM_GRAY + except (MissingField, IndexError): + # A coefficient is missing, so don't compute JPEG quality + return + + # Find the JPEG quality + for index in xrange(100): + if (hashval >= hashtable[index]) or (sumcoeff >= sumtable[index]): + quality = "%s%%" % (index + 1) + if (hashval > hashtable[index]) or (sumcoeff > sumtable[index]): + quality += " " + _("(approximate)") + self.comment = "JPEG quality: %s" % quality + return + + @fault_tolerant + def extractAPP0(self, app0): + self.format_version = u"JFIF %u.%02u" \ + % (app0["ver_maj"].value, app0["ver_min"].value) + if "y_density" in app0: + self.width_dpi = app0["x_density"].value + self.height_dpi = app0["y_density"].value + + @fault_tolerant + def processIfdEntry(self, ifd, entry): + # Skip unknown tags + tag = entry["tag"].value + if tag not in self.EXIF_KEY: + return + key = self.EXIF_KEY[tag] + if key in ("width", "height") and self.has(key): + # EXIF "valid size" are sometimes not updated when the image is scaled + # so we just ignore it + return + + # Read value + if "value" in entry: + value = entry["value"].value + else: + value = ifd["value_%s" % entry.name].value + + # Convert value to string + if tag == ExifEntry.TAG_ORIENTATION: + value = self.orientation_name.get(value, value) + elif tag == ExifEntry.TAG_EXPOSURE: + if not value: + return + if isinstance(value, float): + value = (value, u"1/%g" % (1/value)) + elif entry["type"].value in (ExifEntry.TYPE_RATIONAL, ExifEntry.TYPE_SIGNED_RATIONAL): + value = (value, u"%.3g" % value) + + # Store information + setattr(self, key, value) + + @fault_tolerant + def readGPS(self, ifd): + # Read latitude and longitude + latitude_ref = None + longitude_ref = None + latitude = None + longitude = None + altitude_ref = 1 + altitude = None + timestamp = None + datestamp = None + for entry in ifd.array("entry"): + tag = entry["tag"].value + if tag == ExifEntry.TAG_GPS_LATITUDE_REF: + if entry["value"].value == "N": + latitude_ref = 1 + else: + latitude_ref = -1 + elif tag == ExifEntry.TAG_GPS_LONGITUDE_REF: + if entry["value"].value == "E": + longitude_ref = 1 + else: + longitude_ref = -1 + elif tag == ExifEntry.TAG_GPS_ALTITUDE_REF: + if entry["value"].value == 1: + altitude_ref = -1 + else: + altitude_ref = 1 + elif tag == ExifEntry.TAG_GPS_LATITUDE: + latitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)] + elif tag == ExifEntry.TAG_GPS_LONGITUDE: + longitude = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)] + elif tag == ExifEntry.TAG_GPS_ALTITUDE: + altitude = ifd["value_%s" % entry.name].value + elif tag == ExifEntry.TAG_GPS_DATESTAMP: + datestamp = ifd["value_%s" % entry.name].value + elif tag == ExifEntry.TAG_GPS_TIMESTAMP: + items = [ifd["value_%s[%u]" % (entry.name, index)].value for index in xrange(3)] + items = map(int, items) + items = map(str, items) + timestamp = ":".join(items) + if latitude_ref and latitude: + value = deg2float(*latitude) + if latitude_ref < 0: + value = -value + self.latitude = value + if longitude and longitude_ref: + value = deg2float(*longitude) + if longitude_ref < 0: + value = -value + self.longitude = value + if altitude: + value = altitude + if altitude_ref < 0: + value = -value + self.altitude = value + if datestamp: + if timestamp: + datestamp += " " + timestamp + self.creation_date = datestamp + + def parseIPTC(self, iptc): + datestr = hourstr = None + for field in iptc: + # Skip incomplete field + if "tag" not in field or "content" not in field: + continue + + # Get value + value = field["content"].value + if isinstance(value, (str, unicode)): + value = value.replace("\r", " ") + value = value.replace("\n", " ") + + # Skip unknown tag + tag = field["tag"].value + if tag == 55: + datestr = value + continue + if tag == 60: + hourstr = value + continue + if tag not in self.IPTC_KEY: + if tag != 0: + self.warning("Skip IPTC key %s: %s" % ( + field["tag"].display, makeUnicode(value))) + continue + setattr(self, self.IPTC_KEY[tag], value) + if datestr and hourstr: + try: + year = int(datestr[0:4]) + month = int(datestr[4:6]) + day = int(datestr[6:8]) + hour = int(hourstr[0:2]) + min = int(hourstr[2:4]) + sec = int(hourstr[4:6]) + self.creation_date = datetime(year, month, day, hour, min, sec) + except ValueError: + pass + +registerExtractor(JpegFile, JpegMetadata) + diff --git a/libs/hachoir_metadata/metadata.py b/libs/hachoir_metadata/metadata.py new file mode 100644 index 0000000..37461c9 --- /dev/null +++ b/libs/hachoir_metadata/metadata.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +from hachoir_core.compatibility import any, sorted +from hachoir_core.endian import endian_name +from hachoir_core.tools import makePrintable, makeUnicode +from hachoir_core.dict import Dict +from hachoir_core.error import error, HACHOIR_ERRORS +from hachoir_core.i18n import _ +from hachoir_core.log import Logger +from hachoir_metadata.metadata_item import ( + MIN_PRIORITY, MAX_PRIORITY, QUALITY_NORMAL) +from hachoir_metadata.register import registerAllItems + +extractors = {} + +class Metadata(Logger): + header = u"Metadata" + + def __init__(self, parent, quality=QUALITY_NORMAL): + assert isinstance(self.header, unicode) + + # Limit to 0.0 .. 1.0 + if parent: + quality = parent.quality + else: + quality = min(max(0.0, quality), 1.0) + + object.__init__(self) + object.__setattr__(self, "_Metadata__data", {}) + object.__setattr__(self, "quality", quality) + header = self.__class__.header + object.__setattr__(self, "_Metadata__header", header) + + registerAllItems(self) + + def _logger(self): + pass + + def __setattr__(self, key, value): + """ + Add a new value to data with name 'key'. Skip duplicates. + """ + # Invalid key? + if key not in self.__data: + raise KeyError(_("%s has no metadata '%s'") % (self.__class__.__name__, key)) + + # Skip duplicates + self.__data[key].add(value) + + def setHeader(self, text): + object.__setattr__(self, "header", text) + + def getItems(self, key): + try: + return self.__data[key] + except LookupError: + raise ValueError("Metadata has no value '%s'" % key) + + def getItem(self, key, index): + try: + return self.getItems(key)[index] + except (LookupError, ValueError): + return None + + def has(self, key): + return 1 <= len(self.getItems(key)) + + def get(self, key, default=None, index=0): + """ + Read first value of tag with name 'key'. + + >>> from datetime import timedelta + >>> a = RootMetadata() + >>> a.duration = timedelta(seconds=2300) + >>> a.get('duration') + datetime.timedelta(0, 2300) + >>> a.get('author', u'Anonymous') + u'Anonymous' + """ + item = self.getItem(key, index) + if item is None: + if default is None: + raise ValueError("Metadata has no value '%s' (index %s)" % (key, index)) + else: + return default + return item.value + + def getValues(self, key): + try: + data = self.__data[key] + except LookupError: + raise ValueError("Metadata has no value '%s'" % key) + return [ item.value for item in data ] + + def getText(self, key, default=None, index=0): + """ + Read first value, as unicode string, of tag with name 'key'. + + >>> from datetime import timedelta + >>> a = RootMetadata() + >>> a.duration = timedelta(seconds=2300) + >>> a.getText('duration') + u'38 min 20 sec' + >>> a.getText('titre', u'Unknown') + u'Unknown' + """ + item = self.getItem(key, index) + if item is not None: + return item.text + else: + return default + + def register(self, data): + assert data.key not in self.__data + data.metadata = self + self.__data[data.key] = data + + def __iter__(self): + return self.__data.itervalues() + + def __str__(self): + r""" + Create a multi-line ASCII string (end of line is "\n") which + represents all datas. + + >>> a = RootMetadata() + >>> a.author = "haypo" + >>> a.copyright = unicode("© Hachoir", "UTF-8") + >>> print a + Metadata: + - Author: haypo + - Copyright: \xa9 Hachoir + + @see __unicode__() and exportPlaintext() + """ + text = self.exportPlaintext() + return "\n".join( makePrintable(line, "ASCII") for line in text ) + + def __unicode__(self): + r""" + Create a multi-line Unicode string (end of line is "\n") which + represents all datas. + + >>> a = RootMetadata() + >>> a.copyright = unicode("© Hachoir", "UTF-8") + >>> print repr(unicode(a)) + u'Metadata:\n- Copyright: \xa9 Hachoir' + + @see __str__() and exportPlaintext() + """ + return "\n".join(self.exportPlaintext()) + + def exportPlaintext(self, priority=None, human=True, line_prefix=u"- ", title=None): + r""" + Convert metadata to multi-line Unicode string and skip datas + with priority lower than specified priority. + + Default priority is Metadata.MAX_PRIORITY. If human flag is True, data + key are translated to better human name (eg. "bit_rate" becomes + "Bit rate") which may be translated using gettext. + + If priority is too small, metadata are empty and so None is returned. + + >>> print RootMetadata().exportPlaintext() + None + >>> meta = RootMetadata() + >>> meta.copyright = unicode("© Hachoir", "UTF-8") + >>> print repr(meta.exportPlaintext()) + [u'Metadata:', u'- Copyright: \xa9 Hachoir'] + + @see __str__() and __unicode__() + """ + if priority is not None: + priority = max(priority, MIN_PRIORITY) + priority = min(priority, MAX_PRIORITY) + else: + priority = MAX_PRIORITY + if not title: + title = self.header + text = ["%s:" % title] + for data in sorted(self): + if priority < data.priority: + break + if not data.values: + continue + if human: + title = data.description + else: + title = data.key + for item in data.values: + if human: + value = item.text + else: + value = makeUnicode(item.value) + text.append("%s%s: %s" % (line_prefix, title, value)) + if 1 < len(text): + return text + else: + return None + + def __nonzero__(self): + return any(item for item in self.__data.itervalues()) + +class RootMetadata(Metadata): + def __init__(self, quality=QUALITY_NORMAL): + Metadata.__init__(self, None, quality) + +class MultipleMetadata(RootMetadata): + header = _("Common") + def __init__(self, quality=QUALITY_NORMAL): + RootMetadata.__init__(self, quality) + object.__setattr__(self, "_MultipleMetadata__groups", Dict()) + object.__setattr__(self, "_MultipleMetadata__key_counter", {}) + + def __contains__(self, key): + return key in self.__groups + + def __getitem__(self, key): + return self.__groups[key] + + def iterGroups(self): + return self.__groups.itervalues() + + def __nonzero__(self): + if RootMetadata.__nonzero__(self): + return True + return any(bool(group) for group in self.__groups) + + def addGroup(self, key, metadata, header=None): + """ + Add a new group (metadata of a sub-document). + + Returns False if the group is skipped, True if it has been added. + """ + if not metadata: + self.warning("Skip empty group %s" % key) + return False + if key.endswith("[]"): + key = key[:-2] + if key in self.__key_counter: + self.__key_counter[key] += 1 + else: + self.__key_counter[key] = 1 + key += "[%u]" % self.__key_counter[key] + if header: + metadata.setHeader(header) + self.__groups.append(key, metadata) + return True + + def exportPlaintext(self, priority=None, human=True, line_prefix=u"- "): + common = Metadata.exportPlaintext(self, priority, human, line_prefix) + if common: + text = common + else: + text = [] + for key, metadata in self.__groups.iteritems(): + if not human: + title = key + else: + title = None + value = metadata.exportPlaintext(priority, human, line_prefix, title=title) + if value: + text.extend(value) + if len(text): + return text + else: + return None + +def registerExtractor(parser, extractor): + assert parser not in extractors + assert issubclass(extractor, RootMetadata) + extractors[parser] = extractor + +def extractMetadata(parser, quality=QUALITY_NORMAL): + """ + Create a Metadata class from a parser. Returns None if no metadata + extractor does exist for the parser class. + """ + try: + extractor = extractors[parser.__class__] + except KeyError: + return None + metadata = extractor(quality) + try: + metadata.extract(parser) + except HACHOIR_ERRORS, err: + error("Error during metadata extraction: %s" % unicode(err)) + if metadata: + metadata.mime_type = parser.mime_type + metadata.endian = endian_name[parser.endian] + return metadata + diff --git a/libs/hachoir_metadata/metadata_item.py b/libs/hachoir_metadata/metadata_item.py new file mode 100644 index 0000000..bddd3b0 --- /dev/null +++ b/libs/hachoir_metadata/metadata_item.py @@ -0,0 +1,146 @@ +from hachoir_core.tools import makeUnicode, normalizeNewline +from hachoir_core.error import HACHOIR_ERRORS +from hachoir_metadata import config +from hachoir_metadata.setter import normalizeString + +MIN_PRIORITY = 100 +MAX_PRIORITY = 999 + +QUALITY_FASTEST = 0.0 +QUALITY_FAST = 0.25 +QUALITY_NORMAL = 0.5 +QUALITY_GOOD = 0.75 +QUALITY_BEST = 1.0 + +class DataValue: + def __init__(self, value, text): + self.value = value + self.text = text + +class Data: + def __init__(self, key, priority, description, + text_handler=None, type=None, filter=None, conversion=None): + """ + handler is only used if value is not string nor unicode, prototype: + def handler(value) -> str/unicode + """ + assert MIN_PRIORITY <= priority <= MAX_PRIORITY + assert isinstance(description, unicode) + self.metadata = None + self.key = key + self.description = description + self.values = [] + if type and not isinstance(type, (tuple, list)): + type = (type,) + self.type = type + self.text_handler = text_handler + self.filter = filter + self.priority = priority + self.conversion = conversion + + def _createItem(self, value, text=None): + if text is None: + if isinstance(value, unicode): + text = value + elif self.text_handler: + text = self.text_handler(value) + assert isinstance(text, unicode) + else: + text = makeUnicode(value) + return DataValue(value, text) + + def add(self, value): + if isinstance(value, tuple): + if len(value) != 2: + raise ValueError("Data.add() only accept tuple of 2 elements: (value,text)") + value, text = value + else: + text = None + + # Skip value 'None' + if value is None: + return + + if isinstance(value, (str, unicode)): + value = normalizeString(value) + if not value: + return + + # Convert string to Unicode string using charset ISO-8859-1 + if self.conversion: + try: + new_value = self.conversion(self.metadata, self.key, value) + except HACHOIR_ERRORS, err: + self.metadata.warning("Error during conversion of %r value: %s" % ( + self.key, err)) + return + if new_value is None: + dest_types = " or ".join(str(item.__name__) for item in self.type) + self.metadata.warning("Unable to convert %s=%r (%s) to %s" % ( + self.key, value, type(value).__name__, dest_types)) + return + if isinstance(new_value, tuple): + if text: + value = new_value[0] + else: + value, text = new_value + else: + value = new_value + elif isinstance(value, str): + value = unicode(value, "ISO-8859-1") + + if self.type and not isinstance(value, self.type): + dest_types = " or ".join(str(item.__name__) for item in self.type) + self.metadata.warning("Key %r: value %r type (%s) is not %s" % ( + self.key, value, type(value).__name__, dest_types)) + return + + # Skip empty strings + if isinstance(value, unicode): + value = normalizeNewline(value) + if config.MAX_STR_LENGTH \ + and config.MAX_STR_LENGTH < len(value): + value = value[:config.MAX_STR_LENGTH] + "(...)" + + # Skip duplicates + if value in self: + return + + # Use filter + if self.filter and not self.filter(value): + self.metadata.warning("Skip value %s=%r (filter)" % (self.key, value)) + return + + # For string, if you have "verlongtext" and "verylo", + # keep the longer value + if isinstance(value, unicode): + for index, item in enumerate(self.values): + item = item.value + if not isinstance(item, unicode): + continue + if value.startswith(item): + # Find longer value, replace the old one + self.values[index] = self._createItem(value, text) + return + if item.startswith(value): + # Find truncated value, skip it + return + + # Add new value + self.values.append(self._createItem(value, text)) + + def __len__(self): + return len(self.values) + + def __getitem__(self, index): + return self.values[index] + + def __contains__(self, value): + for item in self.values: + if value == item.value: + return True + return False + + def __cmp__(self, other): + return cmp(self.priority, other.priority) + diff --git a/libs/hachoir_metadata/misc.py b/libs/hachoir_metadata/misc.py new file mode 100644 index 0000000..04c70a6 --- /dev/null +++ b/libs/hachoir_metadata/misc.py @@ -0,0 +1,262 @@ +from hachoir_metadata.metadata import RootMetadata, registerExtractor +from hachoir_metadata.safe import fault_tolerant +from hachoir_parser.container import SwfFile +from hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile +from hachoir_core.field import isString +from hachoir_core.error import warning +from hachoir_parser import guessParser +from hachoir_metadata.setter import normalizeString + +class TorrentMetadata(RootMetadata): + KEY_TO_ATTR = { + u"announce": "url", + u"comment": "comment", + u"creation_date": "creation_date", + } + INFO_TO_ATTR = { + u"length": "file_size", + u"name": "filename", + } + + def extract(self, torrent): + for field in torrent[0]: + self.processRoot(field) + + @fault_tolerant + def processRoot(self, field): + if field.name in self.KEY_TO_ATTR: + key = self.KEY_TO_ATTR[field.name] + value = field.value + setattr(self, key, value) + elif field.name == "info" and "value" in field: + for field in field["value"]: + self.processInfo(field) + + @fault_tolerant + def processInfo(self, field): + if field.name in self.INFO_TO_ATTR: + key = self.INFO_TO_ATTR[field.name] + value = field.value + setattr(self, key, value) + elif field.name == "piece_length": + self.comment = "Piece length: %s" % field.display + +class TTF_Metadata(RootMetadata): + NAMEID_TO_ATTR = { + 0: "copyright", # Copyright notice + 3: "title", # Unique font identifier + 5: "version", # Version string + 8: "author", # Manufacturer name + 11: "url", # URL Vendor + 14: "copyright", # License info URL + } + + def extract(self, ttf): + if "header" in ttf: + self.extractHeader(ttf["header"]) + if "names" in ttf: + self.extractNames(ttf["names"]) + + @fault_tolerant + def extractHeader(self, header): + self.creation_date = header["created"].value + self.last_modification = header["modified"].value + self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value + self.comment = u"Font direction: %s" % header["font_dir"].display + + @fault_tolerant + def extractNames(self, names): + offset = names["offset"].value + for header in names.array("header"): + key = header["nameID"].value + foffset = offset + header["offset"].value + field = names.getFieldByAddress(foffset*8) + if not field or not isString(field): + continue + value = field.value + if key not in self.NAMEID_TO_ATTR: + continue + key = self.NAMEID_TO_ATTR[key] + if key == "version" and value.startswith(u"Version "): + # "Version 1.2" => "1.2" + value = value[8:] + setattr(self, key, value) + +class OLE2_Metadata(RootMetadata): + SUMMARY_ID_TO_ATTR = { + 2: "title", # Title + 3: "title", # Subject + 4: "author", + 6: "comment", + 8: "author", # Last saved by + 12: "creation_date", + 13: "last_modification", + 14: "nb_page", + 18: "producer", + } + IGNORE_SUMMARY = set(( + 1, # Code page + )) + + DOC_SUMMARY_ID_TO_ATTR = { + 3: "title", # Subject + 14: "author", # Manager + } + IGNORE_DOC_SUMMARY = set(( + 1, # Code page + )) + + def extract(self, ole2): + self._extract(ole2) + + def _extract(self, fieldset, main_document=True): + if main_document: + # _feedAll() is needed to make sure that we get all root[*] fragments + fieldset._feedAll() + if "root[0]" in fieldset: + self.useRoot(fieldset["root[0]"]) + doc_summary = self.getField(fieldset, main_document, "doc_summary[0]") + if doc_summary: + self.useSummary(doc_summary, True) + word_doc = self.getField(fieldset, main_document, "word_doc[0]") + if word_doc: + self.useWordDocument(word_doc) + summary = self.getField(fieldset, main_document, "summary[0]") + if summary: + self.useSummary(summary, False) + + @fault_tolerant + def useRoot(self, root): + stream = root.getSubIStream() + ministream = guessParser(stream) + if not ministream: + warning("Unable to create the OLE2 mini stream parser!") + return + self._extract(ministream, main_document=False) + + def getField(self, fieldset, main_document, name): + if name not in fieldset: + return None + # _feedAll() is needed to make sure that we get all fragments + # eg. summary[0], summary[1], ..., summary[n] + fieldset._feedAll() + field = fieldset[name] + if main_document: + stream = field.getSubIStream() + field = guessParser(stream) + if not field: + warning("Unable to create the OLE2 parser for %s!" % name) + return None + return field + + @fault_tolerant + def useSummary(self, summary, is_doc_summary): + if "os" in summary: + self.os = summary["os"].display + if "section[0]" not in summary: + return + summary = summary["section[0]"] + for property in summary.array("property_index"): + self.useProperty(summary, property, is_doc_summary) + + @fault_tolerant + def useWordDocument(self, doc): + self.comment = "Encrypted: %s" % doc["fEncrypted"].value + + @fault_tolerant + def useProperty(self, summary, property, is_doc_summary): + field = summary.getFieldByAddress(property["offset"].value*8) + if not field \ + or "value" not in field: + return + field = field["value"] + if not field.hasValue(): + return + + # Get value + value = field.value + if isinstance(value, (str, unicode)): + value = normalizeString(value) + if not value: + return + + # Get property identifier + prop_id = property["id"].value + if is_doc_summary: + id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR + ignore = self.IGNORE_DOC_SUMMARY + else: + id_to_attr = self.SUMMARY_ID_TO_ATTR + ignore = self.IGNORE_SUMMARY + if prop_id in ignore: + return + + # Get Hachoir metadata key + try: + key = id_to_attr[prop_id] + use_prefix = False + except LookupError: + key = "comment" + use_prefix = True + if use_prefix: + prefix = property["id"].display + if (prefix in ("TotalEditingTime", "LastPrinted")) \ + and (not field): + # Ignore null time delta + return + value = "%s: %s" % (prefix, value) + else: + if (key == "last_modification") and (not field): + # Ignore null timestamp + return + setattr(self, key, value) + +class PcfMetadata(RootMetadata): + PROP_TO_KEY = { + 'CHARSET_REGISTRY': 'charset', + 'COPYRIGHT': 'copyright', + 'WEIGHT_NAME': 'font_weight', + 'FOUNDRY': 'author', + 'FONT': 'title', + '_XMBDFED_INFO': 'producer', + } + + def extract(self, pcf): + if "properties" in pcf: + self.useProperties(pcf["properties"]) + + def useProperties(self, properties): + last = properties["total_str_length"] + offset0 = last.address + last.size + for index in properties.array("property"): + # Search name and value + value = properties.getFieldByAddress(offset0+index["value_offset"].value*8) + if not value: + continue + value = value.value + if not value: + continue + name = properties.getFieldByAddress(offset0+index["name_offset"].value*8) + if not name: + continue + name = name.value + if name not in self.PROP_TO_KEY: + warning("Skip %s=%r" % (name, value)) + continue + key = self.PROP_TO_KEY[name] + setattr(self, key, value) + +class SwfMetadata(RootMetadata): + def extract(self, swf): + self.height = swf["rect/ymax"].value # twips + self.width = swf["rect/xmax"].value # twips + self.format_version = "flash version %s" % swf["version"].value + self.frame_rate = swf["frame_rate"].value + self.comment = "Frame count: %s" % swf["frame_count"].value + +registerExtractor(TorrentFile, TorrentMetadata) +registerExtractor(TrueTypeFontFile, TTF_Metadata) +registerExtractor(OLE2_File, OLE2_Metadata) +registerExtractor(PcfFile, PcfMetadata) +registerExtractor(SwfFile, SwfMetadata) + diff --git a/libs/hachoir_metadata/program.py b/libs/hachoir_metadata/program.py new file mode 100644 index 0000000..a524cee --- /dev/null +++ b/libs/hachoir_metadata/program.py @@ -0,0 +1,100 @@ +from hachoir_metadata.metadata import RootMetadata, registerExtractor +from hachoir_parser.program import ExeFile +from hachoir_metadata.safe import fault_tolerant, getValue + +class ExeMetadata(RootMetadata): + KEY_TO_ATTR = { + u"ProductName": "title", + u"LegalCopyright": "copyright", + u"LegalTrademarks": "copyright", + u"LegalTrademarks1": "copyright", + u"LegalTrademarks2": "copyright", + u"CompanyName": "author", + u"BuildDate": "creation_date", + u"FileDescription": "title", + u"ProductVersion": "version", + } + SKIP_KEY = set((u"InternalName", u"OriginalFilename", u"FileVersion", u"BuildVersion")) + + def extract(self, exe): + if exe.isPE(): + self.extractPE(exe) + elif exe.isNE(): + self.extractNE(exe) + + def extractNE(self, exe): + if "ne_header" in exe: + self.useNE_Header(exe["ne_header"]) + if "info" in exe: + self.useNEInfo(exe["info"]) + + @fault_tolerant + def useNEInfo(self, info): + for node in info.array("node"): + if node["name"].value == "StringFileInfo": + self.readVersionInfo(node["node[0]"]) + + def extractPE(self, exe): + # Read information from headers + if "pe_header" in exe: + self.usePE_Header(exe["pe_header"]) + if "pe_opt_header" in exe: + self.usePE_OptHeader(exe["pe_opt_header"]) + + # Use PE resource + resource = exe.getResource() + if resource and "version_info/node[0]" in resource: + for node in resource.array("version_info/node[0]/node"): + if getValue(node, "name") == "StringFileInfo" \ + and "node[0]" in node: + self.readVersionInfo(node["node[0]"]) + + @fault_tolerant + def useNE_Header(self, hdr): + if hdr["is_dll"].value: + self.format_version = u"New-style executable: Dynamic-link library (DLL)" + elif hdr["is_win_app"].value: + self.format_version = u"New-style executable: Windows 3.x application" + else: + self.format_version = u"New-style executable for Windows 3.x" + + @fault_tolerant + def usePE_Header(self, hdr): + self.creation_date = hdr["creation_date"].value + self.comment = "CPU: %s" % hdr["cpu"].display + if hdr["is_dll"].value: + self.format_version = u"Portable Executable: Dynamic-link library (DLL)" + else: + self.format_version = u"Portable Executable: Windows application" + + @fault_tolerant + def usePE_OptHeader(self, hdr): + self.comment = "Subsystem: %s" % hdr["subsystem"].display + + def readVersionInfo(self, info): + values = {} + for node in info.array("node"): + if "value" not in node or "name" not in node: + continue + value = node["value"].value.strip(" \0") + if not value: + continue + key = node["name"].value + values[key] = value + + if "ProductName" in values and "FileDescription" in values: + # Make sure that FileDescription is set before ProductName + # as title value + self.title = values["FileDescription"] + self.title = values["ProductName"] + del values["FileDescription"] + del values["ProductName"] + + for key, value in values.iteritems(): + if key in self.KEY_TO_ATTR: + setattr(self, self.KEY_TO_ATTR[key], value) + elif key not in self.SKIP_KEY: + self.comment = "%s=%s" % (key, value) + +registerExtractor(ExeFile, ExeMetadata) + diff --git a/libs/hachoir_metadata/qt/__init__.py b/libs/hachoir_metadata/qt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/hachoir_metadata/qt/dialog.ui b/libs/hachoir_metadata/qt/dialog.ui new file mode 100644 index 0000000..498a8da --- /dev/null +++ b/libs/hachoir_metadata/qt/dialog.ui @@ -0,0 +1,64 @@ + + Form + + + + 0 + 0 + 441 + 412 + + + + hachoir-metadata + + + + + + + + Open + + + + + + + + 0 + 0 + + + + + + + + + + true + + + false + + + 0 + + + 0 + + + + + + + Quit + + + + + + + + diff --git a/libs/hachoir_metadata/register.py b/libs/hachoir_metadata/register.py new file mode 100644 index 0000000..3cbde86 --- /dev/null +++ b/libs/hachoir_metadata/register.py @@ -0,0 +1,112 @@ +from hachoir_core.i18n import _ +from hachoir_core.tools import ( + humanDuration, humanBitRate, + humanFrequency, humanBitSize, humanFilesize, + humanDatetime) +from hachoir_core.language import Language +from hachoir_metadata.filter import Filter, NumberFilter, DATETIME_FILTER +from datetime import date, datetime, timedelta +from hachoir_metadata.formatter import ( + humanAudioChannel, humanFrameRate, humanComprRate, humanAltitude, + humanPixelSize, humanDPI) +from hachoir_metadata.setter import ( + setDatetime, setTrackNumber, setTrackTotal, setLanguage) +from hachoir_metadata.metadata_item import Data + +MIN_SAMPLE_RATE = 1000 # 1 kHz +MAX_SAMPLE_RATE = 192000 # 192 kHz +MAX_NB_CHANNEL = 8 # 8 channels +MAX_WIDTH = 20000 # 20 000 pixels +MAX_BIT_RATE = 500 * 1024 * 1024 # 500 Mbit/s +MAX_HEIGHT = MAX_WIDTH +MAX_DPI_WIDTH = 10000 +MAX_DPI_HEIGHT = MAX_DPI_WIDTH +MAX_NB_COLOR = 2 ** 24 # 16 million of color +MAX_BITS_PER_PIXEL = 256 # 256 bits/pixel +MAX_FRAME_RATE = 150 # 150 frame/sec +MAX_NB_PAGE = 20000 +MAX_COMPR_RATE = 1000.0 +MIN_COMPR_RATE = 0.001 +MAX_TRACK = 999 + +DURATION_FILTER = Filter(timedelta, + timedelta(milliseconds=1), + timedelta(days=365)) + +def registerAllItems(meta): + meta.register(Data("title", 100, _("Title"), type=unicode)) + meta.register(Data("artist", 101, _("Artist"), type=unicode)) + meta.register(Data("author", 102, _("Author"), type=unicode)) + meta.register(Data("music_composer", 103, _("Music composer"), type=unicode)) + + meta.register(Data("album", 200, _("Album"), type=unicode)) + meta.register(Data("duration", 201, _("Duration"), # integer in milliseconde + type=timedelta, text_handler=humanDuration, filter=DURATION_FILTER)) + meta.register(Data("nb_page", 202, _("Nb page"), filter=NumberFilter(1, MAX_NB_PAGE))) + meta.register(Data("music_genre", 203, _("Music genre"), type=unicode)) + meta.register(Data("language", 204, _("Language"), conversion=setLanguage, type=Language)) + meta.register(Data("track_number", 205, _("Track number"), conversion=setTrackNumber, + filter=NumberFilter(1, MAX_TRACK), type=(int, long))) + meta.register(Data("track_total", 206, _("Track total"), conversion=setTrackTotal, + filter=NumberFilter(1, MAX_TRACK), type=(int, long))) + meta.register(Data("organization", 210, _("Organization"), type=unicode)) + meta.register(Data("version", 220, _("Version"))) + + + meta.register(Data("width", 301, _("Image width"), filter=NumberFilter(1, MAX_WIDTH), type=(int, long), text_handler=humanPixelSize)) + meta.register(Data("height", 302, _("Image height"), filter=NumberFilter(1, MAX_HEIGHT), type=(int, long), text_handler=humanPixelSize)) + meta.register(Data("nb_channel", 303, _("Channel"), text_handler=humanAudioChannel, filter=NumberFilter(1, MAX_NB_CHANNEL), type=(int, long))) + meta.register(Data("sample_rate", 304, _("Sample rate"), text_handler=humanFrequency, filter=NumberFilter(MIN_SAMPLE_RATE, MAX_SAMPLE_RATE), type=(int, long, float))) + meta.register(Data("bits_per_sample", 305, _("Bits/sample"), text_handler=humanBitSize, filter=NumberFilter(1, 64), type=(int, long))) + meta.register(Data("image_orientation", 306, _("Image orientation"))) + meta.register(Data("nb_colors", 307, _("Number of colors"), filter=NumberFilter(1, MAX_NB_COLOR), type=(int, long))) + meta.register(Data("bits_per_pixel", 308, _("Bits/pixel"), filter=NumberFilter(1, MAX_BITS_PER_PIXEL), type=(int, long))) + meta.register(Data("filename", 309, _("File name"), type=unicode)) + meta.register(Data("file_size", 310, _("File size"), text_handler=humanFilesize, type=(int, long))) + meta.register(Data("pixel_format", 311, _("Pixel format"))) + meta.register(Data("compr_size", 312, _("Compressed file size"), text_handler=humanFilesize, type=(int, long))) + meta.register(Data("compr_rate", 313, _("Compression rate"), text_handler=humanComprRate, filter=NumberFilter(MIN_COMPR_RATE, MAX_COMPR_RATE), type=(int, long, float))) + + meta.register(Data("width_dpi", 320, _("Image DPI width"), filter=NumberFilter(1, MAX_DPI_WIDTH), type=(int, long), text_handler=humanDPI)) + meta.register(Data("height_dpi", 321, _("Image DPI height"), filter=NumberFilter(1, MAX_DPI_HEIGHT), type=(int, long), text_handler=humanDPI)) + + meta.register(Data("file_attr", 400, _("File attributes"))) + meta.register(Data("file_type", 401, _("File type"))) + meta.register(Data("subtitle_author", 402, _("Subtitle author"), type=unicode)) + + meta.register(Data("creation_date", 500, _("Creation date"), text_handler=humanDatetime, + filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime)) + meta.register(Data("last_modification", 501, _("Last modification"), text_handler=humanDatetime, + filter=DATETIME_FILTER, type=(datetime, date), conversion=setDatetime)) + meta.register(Data("latitude", 510, _("Latitude"), type=float)) + meta.register(Data("longitude", 511, _("Longitude"), type=float)) + meta.register(Data("altitude", 511, _("Altitude"), type=float, text_handler=humanAltitude)) + meta.register(Data("location", 530, _("Location"), type=unicode)) + meta.register(Data("city", 531, _("City"), type=unicode)) + meta.register(Data("country", 532, _("Country"), type=unicode)) + meta.register(Data("charset", 540, _("Charset"), type=unicode)) + meta.register(Data("font_weight", 550, _("Font weight"))) + + meta.register(Data("camera_aperture", 520, _("Camera aperture"))) + meta.register(Data("camera_focal", 521, _("Camera focal"))) + meta.register(Data("camera_exposure", 522, _("Camera exposure"))) + meta.register(Data("camera_brightness", 530, _("Camera brightness"))) + meta.register(Data("camera_model", 531, _("Camera model"), type=unicode)) + meta.register(Data("camera_manufacturer", 532, _("Camera manufacturer"), type=unicode)) + + meta.register(Data("compression", 600, _("Compression"))) + meta.register(Data("copyright", 601, _("Copyright"), type=unicode)) + meta.register(Data("url", 602, _("URL"), type=unicode)) + meta.register(Data("frame_rate", 603, _("Frame rate"), text_handler=humanFrameRate, + filter=NumberFilter(1, MAX_FRAME_RATE), type=(int, long, float))) + meta.register(Data("bit_rate", 604, _("Bit rate"), text_handler=humanBitRate, + filter=NumberFilter(1, MAX_BIT_RATE), type=(int, long, float))) + meta.register(Data("aspect_ratio", 604, _("Aspect ratio"), type=(int, long, float))) + + meta.register(Data("os", 900, _("OS"), type=unicode)) + meta.register(Data("producer", 901, _("Producer"), type=unicode)) + meta.register(Data("comment", 902, _("Comment"), type=unicode)) + meta.register(Data("format_version", 950, _("Format version"), type=unicode)) + meta.register(Data("mime_type", 951, _("MIME type"), type=unicode)) + meta.register(Data("endian", 952, _("Endianness"), type=unicode)) + diff --git a/libs/hachoir_metadata/riff.py b/libs/hachoir_metadata/riff.py new file mode 100644 index 0000000..adcc0bd --- /dev/null +++ b/libs/hachoir_metadata/riff.py @@ -0,0 +1,190 @@ +""" +Extract metadata from RIFF file format: AVI video and WAV sound. +""" + +from hachoir_metadata.metadata import Metadata, MultipleMetadata, registerExtractor +from hachoir_metadata.safe import fault_tolerant, getValue +from hachoir_parser.container.riff import RiffFile +from hachoir_parser.video.fourcc import UNCOMPRESSED_AUDIO +from hachoir_core.tools import humanFilesize, makeUnicode, timedelta2seconds +from hachoir_core.i18n import _ +from hachoir_metadata.audio import computeComprRate as computeAudioComprRate +from datetime import timedelta + +class RiffMetadata(MultipleMetadata): + TAG_TO_KEY = { + "INAM": "title", + "IART": "artist", + "ICMT": "comment", + "ICOP": "copyright", + "IENG": "author", # (engineer) + "ISFT": "producer", + "ICRD": "creation_date", + "IDIT": "creation_date", + } + + def extract(self, riff): + type = riff["type"].value + if type == "WAVE": + self.extractWAVE(riff) + size = getValue(riff, "audio_data/size") + if size: + computeAudioComprRate(self, size*8) + elif type == "AVI ": + if "headers" in riff: + self.extractAVI(riff["headers"]) + self.extractInfo(riff["headers"]) + elif type == "ACON": + self.extractAnim(riff) + if "info" in riff: + self.extractInfo(riff["info"]) + + def processChunk(self, chunk): + if "text" not in chunk: + return + value = chunk["text"].value + tag = chunk["tag"].value + if tag not in self.TAG_TO_KEY: + self.warning("Skip RIFF metadata %s: %s" % (tag, value)) + return + key = self.TAG_TO_KEY[tag] + setattr(self, key, value) + + @fault_tolerant + def extractWAVE(self, wav): + format = wav["format"] + + # Number of channel, bits/sample, sample rate + self.nb_channel = format["nb_channel"].value + self.bits_per_sample = format["bit_per_sample"].value + self.sample_rate = format["sample_per_sec"].value + + self.compression = format["codec"].display + if "nb_sample/nb_sample" in wav \ + and 0 < format["sample_per_sec"].value: + self.duration = timedelta(seconds=float(wav["nb_sample/nb_sample"].value) / format["sample_per_sec"].value) + if format["codec"].value in UNCOMPRESSED_AUDIO: + # Codec with fixed bit rate + self.bit_rate = format["nb_channel"].value * format["bit_per_sample"].value * format["sample_per_sec"].value + if not self.has("duration") \ + and "audio_data/size" in wav \ + and self.has("bit_rate"): + duration = float(wav["audio_data/size"].value)*8 / self.get('bit_rate') + self.duration = timedelta(seconds=duration) + + def extractInfo(self, fieldset): + for field in fieldset: + if not field.is_field_set: + continue + if "tag" in field: + if field["tag"].value == "LIST": + self.extractInfo(field) + else: + self.processChunk(field) + + @fault_tolerant + def extractAVIVideo(self, header, meta): + meta.compression = "%s (fourcc:\"%s\")" \ + % (header["fourcc"].display, makeUnicode(header["fourcc"].value)) + if header["rate"].value and header["scale"].value: + fps = float(header["rate"].value) / header["scale"].value + meta.frame_rate = fps + if 0 < fps: + self.duration = meta.duration = timedelta(seconds=float(header["length"].value) / fps) + + if "../stream_fmt/width" in header: + format = header["../stream_fmt"] + meta.width = format["width"].value + meta.height = format["height"].value + meta.bits_per_pixel = format["depth"].value + else: + meta.width = header["right"].value - header["left"].value + meta.height = header["bottom"].value - header["top"].value + + @fault_tolerant + def extractAVIAudio(self, format, meta): + meta.nb_channel = format["channel"].value + meta.sample_rate = format["sample_rate"].value + meta.bit_rate = format["bit_rate"].value * 8 + if format["bits_per_sample"].value: + meta.bits_per_sample = format["bits_per_sample"].value + if "../stream_hdr" in format: + header = format["../stream_hdr"] + if header["rate"].value and header["scale"].value: + frame_rate = float(header["rate"].value) / header["scale"].value + meta.duration = timedelta(seconds=float(header["length"].value) / frame_rate) + if header["fourcc"].value != "": + meta.compression = "%s (fourcc:\"%s\")" \ + % (format["codec"].display, header["fourcc"].value) + if not meta.has("compression"): + meta.compression = format["codec"].display + + self.computeAudioComprRate(meta) + + @fault_tolerant + def computeAudioComprRate(self, meta): + uncompr = meta.get('bit_rate', 0) + if not uncompr: + return + compr = meta.get('nb_channel') * meta.get('sample_rate') * meta.get('bits_per_sample', default=16) + if not compr: + return + meta.compr_rate = float(compr) / uncompr + + @fault_tolerant + def useAviHeader(self, header): + microsec = header["microsec_per_frame"].value + if microsec: + self.frame_rate = 1000000.0 / microsec + total_frame = getValue(header, "total_frame") + if total_frame and not self.has("duration"): + self.duration = timedelta(microseconds=total_frame * microsec) + self.width = header["width"].value + self.height = header["height"].value + + def extractAVI(self, headers): + audio_index = 1 + for stream in headers.array("stream"): + if "stream_hdr/stream_type" not in stream: + continue + stream_type = stream["stream_hdr/stream_type"].value + if stream_type == "vids": + if "stream_hdr" in stream: + meta = Metadata(self) + self.extractAVIVideo(stream["stream_hdr"], meta) + self.addGroup("video", meta, "Video stream") + elif stream_type == "auds": + if "stream_fmt" in stream: + meta = Metadata(self) + self.extractAVIAudio(stream["stream_fmt"], meta) + self.addGroup("audio[%u]" % audio_index, meta, "Audio stream") + audio_index += 1 + if "avi_hdr" in headers: + self.useAviHeader(headers["avi_hdr"]) + + # Compute global bit rate + if self.has("duration") and "/movie/size" in headers: + self.bit_rate = float(headers["/movie/size"].value) * 8 / timedelta2seconds(self.get('duration')) + + # Video has index? + if "/index" in headers: + self.comment = _("Has audio/video index (%s)") \ + % humanFilesize(headers["/index"].size/8) + + @fault_tolerant + def extractAnim(self, riff): + if "anim_rate/rate[0]" in riff: + count = 0 + total = 0 + for rate in riff.array("anim_rate/rate"): + count += 1 + if 100 < count: + break + total += rate.value / 60.0 + if count and total: + self.frame_rate = count / total + if not self.has("frame_rate") and "anim_hdr/jiffie_rate" in riff: + self.frame_rate = 60.0 / riff["anim_hdr/jiffie_rate"].value + +registerExtractor(RiffFile, RiffMetadata) + diff --git a/libs/hachoir_metadata/safe.py b/libs/hachoir_metadata/safe.py new file mode 100644 index 0000000..e1d91ab --- /dev/null +++ b/libs/hachoir_metadata/safe.py @@ -0,0 +1,27 @@ +from hachoir_core.error import HACHOIR_ERRORS, warning + +def fault_tolerant(func, *args): + def safe_func(*args, **kw): + try: + func(*args, **kw) + except HACHOIR_ERRORS, err: + warning("Error when calling function %s(): %s" % ( + func.__name__, err)) + return safe_func + +def getFieldAttribute(fieldset, key, attrname): + try: + field = fieldset[key] + if field.hasValue(): + return getattr(field, attrname) + except HACHOIR_ERRORS, err: + warning("Unable to get %s of field %s/%s: %s" % ( + attrname, fieldset.path, key, err)) + return None + +def getValue(fieldset, key): + return getFieldAttribute(fieldset, key, "value") + +def getDisplay(fieldset, key): + return getFieldAttribute(fieldset, key, "display") + diff --git a/libs/hachoir_metadata/setter.py b/libs/hachoir_metadata/setter.py new file mode 100644 index 0000000..41da414 --- /dev/null +++ b/libs/hachoir_metadata/setter.py @@ -0,0 +1,171 @@ +from datetime import date, datetime +import re +from hachoir_core.language import Language +from locale import setlocale, LC_ALL +from time import strptime +from hachoir_metadata.timezone import createTimezone +from hachoir_metadata import config + +NORMALIZE_REGEX = re.compile("[-/.: ]+") +YEAR_REGEX1 = re.compile("^([0-9]{4})$") + +# Date regex: YYYY-MM-DD (US format) +DATE_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})$") + +# Date regex: YYYY-MM-DD HH:MM:SS (US format) +DATETIME_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$") + +# Datetime regex: "MM-DD-YYYY HH:MM:SS" (FR format) +DATETIME_REGEX2 = re.compile("^([01]?[0-9])~([0-9]{2})~([0-9]{4})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$") + +# Timezone regex: "(...) +0200" +TIMEZONE_REGEX = re.compile("^(.*)~([+-][0-9]{2})00$") + +# Timestmap: 'February 2007' +MONTH_YEAR = "%B~%Y" + +# Timestmap: 'Sun Feb 24 15:51:09 2008' +RIFF_TIMESTAMP = "%a~%b~%d~%H~%M~%S~%Y" + +# Timestmap: 'Thu, 19 Jul 2007 09:03:57' +ISO_TIMESTAMP = "%a,~%d~%b~%Y~%H~%M~%S" + +def parseDatetime(value): + """ + Year and date: + >>> parseDatetime("2000") + (datetime.date(2000, 1, 1), u'2000') + >>> parseDatetime("2004-01-02") + datetime.date(2004, 1, 2) + + Timestamp: + >>> parseDatetime("2004-01-02 18:10:45") + datetime.datetime(2004, 1, 2, 18, 10, 45) + >>> parseDatetime("2004-01-02 18:10:45") + datetime.datetime(2004, 1, 2, 18, 10, 45) + + Timestamp with timezone: + >>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0000') + datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=) + >>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0200') + datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=) + """ + value = NORMALIZE_REGEX.sub("~", value.strip()) + regs = YEAR_REGEX1.match(value) + if regs: + try: + year = int(regs.group(1)) + return (date(year, 1, 1), unicode(year)) + except ValueError: + pass + regs = DATE_REGEX1.match(value) + if regs: + try: + year = int(regs.group(1)) + month = int(regs.group(2)) + day = int(regs.group(3)) + return date(year, month, day) + except ValueError: + pass + regs = DATETIME_REGEX1.match(value) + if regs: + try: + year = int(regs.group(1)) + month = int(regs.group(2)) + day = int(regs.group(3)) + hour = int(regs.group(4)) + min = int(regs.group(5)) + sec = int(regs.group(6)) + return datetime(year, month, day, hour, min, sec) + except ValueError: + pass + regs = DATETIME_REGEX2.match(value) + if regs: + try: + month = int(regs.group(1)) + day = int(regs.group(2)) + year = int(regs.group(3)) + hour = int(regs.group(4)) + min = int(regs.group(5)) + sec = int(regs.group(6)) + return datetime(year, month, day, hour, min, sec) + except ValueError: + pass + current_locale = setlocale(LC_ALL, "C") + try: + match = TIMEZONE_REGEX.match(value) + if match: + without_timezone = match.group(1) + delta = int(match.group(2)) + delta = createTimezone(delta) + else: + without_timezone = value + delta = None + try: + timestamp = strptime(without_timezone, ISO_TIMESTAMP) + arguments = list(timestamp[0:6]) + [0, delta] + return datetime(*arguments) + except ValueError: + pass + + try: + timestamp = strptime(without_timezone, RIFF_TIMESTAMP) + arguments = list(timestamp[0:6]) + [0, delta] + return datetime(*arguments) + except ValueError: + pass + + try: + timestamp = strptime(value, MONTH_YEAR) + arguments = list(timestamp[0:3]) + return date(*arguments) + except ValueError: + pass + finally: + setlocale(LC_ALL, current_locale) + return None + +def setDatetime(meta, key, value): + if isinstance(value, (str, unicode)): + return parseDatetime(value) + elif isinstance(value, (date, datetime)): + return value + return None + +def setLanguage(meta, key, value): + """ + >>> setLanguage(None, None, "fre") + + >>> setLanguage(None, None, u"ger") + + """ + return Language(value) + +def setTrackTotal(meta, key, total): + """ + >>> setTrackTotal(None, None, "10") + 10 + """ + try: + return int(total) + except ValueError: + meta.warning("Invalid track total: %r" % total) + return None + +def setTrackNumber(meta, key, number): + if isinstance(number, (int, long)): + return number + if "/" in number: + number, total = number.split("/", 1) + meta.track_total = total + try: + return int(number) + except ValueError: + meta.warning("Invalid track number: %r" % number) + return None + +def normalizeString(text): + if config.RAW_OUTPUT: + return text + return text.strip(" \t\v\n\r\0") + diff --git a/libs/hachoir_metadata/timezone.py b/libs/hachoir_metadata/timezone.py new file mode 100644 index 0000000..324e56e --- /dev/null +++ b/libs/hachoir_metadata/timezone.py @@ -0,0 +1,42 @@ +from datetime import tzinfo, timedelta + +class TimezoneUTC(tzinfo): + """UTC timezone""" + ZERO = timedelta(0) + + def utcoffset(self, dt): + return TimezoneUTC.ZERO + + def tzname(self, dt): + return u"UTC" + + def dst(self, dt): + return TimezoneUTC.ZERO + + def __repr__(self): + return "" + +class Timezone(TimezoneUTC): + """Fixed offset in hour from UTC.""" + def __init__(self, offset): + self._offset = timedelta(minutes=offset*60) + self._name = u"%+03u00" % offset + + def utcoffset(self, dt): + return self._offset + + def tzname(self, dt): + return self._name + + def __repr__(self): + return "" % ( + self._offset, self._name) + +UTC = TimezoneUTC() + +def createTimezone(offset): + if offset: + return Timezone(offset) + else: + return UTC + diff --git a/libs/hachoir_metadata/version.py b/libs/hachoir_metadata/version.py new file mode 100644 index 0000000..03a3bae --- /dev/null +++ b/libs/hachoir_metadata/version.py @@ -0,0 +1,5 @@ +PACKAGE = "hachoir-metadata" +VERSION = "1.3.3" +WEBSITE = "http://bitbucket.org/haypo/hachoir/wiki/hachoir-metadata" +LICENSE = "GNU GPL v2" + diff --git a/libs/hachoir_metadata/video.py b/libs/hachoir_metadata/video.py new file mode 100644 index 0000000..215ef22 --- /dev/null +++ b/libs/hachoir_metadata/video.py @@ -0,0 +1,412 @@ +from hachoir_core.field import MissingField +from hachoir_metadata.metadata import (registerExtractor, + Metadata, RootMetadata, MultipleMetadata) +from hachoir_metadata.metadata_item import QUALITY_GOOD +from hachoir_metadata.safe import fault_tolerant +from hachoir_parser.video import MovFile, AsfFile, FlvFile +from hachoir_parser.video.asf import Descriptor as ASF_Descriptor +from hachoir_parser.container import MkvFile +from hachoir_parser.container.mkv import dateToDatetime +from hachoir_core.i18n import _ +from hachoir_core.tools import makeUnicode, makePrintable, timedelta2seconds +from datetime import timedelta + +class MkvMetadata(MultipleMetadata): + tag_key = { + "TITLE": "title", + "URL": "url", + "COPYRIGHT": "copyright", + + # TODO: use maybe another name? + # Its value may be different than (...)/Info/DateUTC/date + "DATE_RECORDED": "creation_date", + + # TODO: Extract subtitle metadata + "SUBTITLE": "subtitle_author", + } + + def extract(self, mkv): + for segment in mkv.array("Segment"): + self.processSegment(segment) + + def processSegment(self, segment): + for field in segment: + if field.name.startswith("Info["): + self.processInfo(field) + elif field.name.startswith("Tags["): + for tag in field.array("Tag"): + self.processTag(tag) + elif field.name.startswith("Tracks["): + self.processTracks(field) + elif field.name.startswith("Cluster["): + if self.quality < QUALITY_GOOD: + return + + def processTracks(self, tracks): + for entry in tracks.array("TrackEntry"): + self.processTrack(entry) + + def processTrack(self, track): + if "TrackType/enum" not in track: + return + if track["TrackType/enum"].display == "video": + self.processVideo(track) + elif track["TrackType/enum"].display == "audio": + self.processAudio(track) + elif track["TrackType/enum"].display == "subtitle": + self.processSubtitle(track) + + def trackCommon(self, track, meta): + if "Name/unicode" in track: + meta.title = track["Name/unicode"].value + if "Language/string" in track \ + and track["Language/string"].value not in ("mis", "und"): + meta.language = track["Language/string"].value + + def processVideo(self, track): + video = Metadata(self) + self.trackCommon(track, video) + try: + video.compression = track["CodecID/string"].value + if "Video" in track: + video.width = track["Video/PixelWidth/unsigned"].value + video.height = track["Video/PixelHeight/unsigned"].value + except MissingField: + pass + self.addGroup("video[]", video, "Video stream") + + def getDouble(self, field, parent): + float_key = '%s/float' % parent + if float_key in field: + return field[float_key].value + double_key = '%s/double' % parent + if double_key in field: + return field[double_key].value + return None + + def processAudio(self, track): + audio = Metadata(self) + self.trackCommon(track, audio) + if "Audio" in track: + frequency = self.getDouble(track, "Audio/SamplingFrequency") + if frequency is not None: + audio.sample_rate = frequency + if "Audio/Channels/unsigned" in track: + audio.nb_channel = track["Audio/Channels/unsigned"].value + if "Audio/BitDepth/unsigned" in track: + audio.bits_per_sample = track["Audio/BitDepth/unsigned"].value + if "CodecID/string" in track: + audio.compression = track["CodecID/string"].value + self.addGroup("audio[]", audio, "Audio stream") + + def processSubtitle(self, track): + sub = Metadata(self) + self.trackCommon(track, sub) + try: + sub.compression = track["CodecID/string"].value + except MissingField: + pass + self.addGroup("subtitle[]", sub, "Subtitle") + + def processTag(self, tag): + for field in tag.array("SimpleTag"): + self.processSimpleTag(field) + + def processSimpleTag(self, tag): + if "TagName/unicode" not in tag \ + or "TagString/unicode" not in tag: + return + name = tag["TagName/unicode"].value + if name not in self.tag_key: + return + key = self.tag_key[name] + value = tag["TagString/unicode"].value + setattr(self, key, value) + + def processInfo(self, info): + if "TimecodeScale/unsigned" in info: + duration = self.getDouble(info, "Duration") + if duration is not None: + try: + seconds = duration * info["TimecodeScale/unsigned"].value * 1e-9 + self.duration = timedelta(seconds=seconds) + except OverflowError: + # Catch OverflowError for timedelta (long int too large + # to be converted to an int) + pass + if "DateUTC/date" in info: + try: + self.creation_date = dateToDatetime(info["DateUTC/date"].value) + except OverflowError: + pass + if "WritingApp/unicode" in info: + self.producer = info["WritingApp/unicode"].value + if "MuxingApp/unicode" in info: + self.producer = info["MuxingApp/unicode"].value + if "Title/unicode" in info: + self.title = info["Title/unicode"].value + +class FlvMetadata(MultipleMetadata): + def extract(self, flv): + if "video[0]" in flv: + meta = Metadata(self) + self.extractVideo(flv["video[0]"], meta) + self.addGroup("video", meta, "Video stream") + if "audio[0]" in flv: + meta = Metadata(self) + self.extractAudio(flv["audio[0]"], meta) + self.addGroup("audio", meta, "Audio stream") + # TODO: Computer duration + # One technic: use last video/audio chunk and use timestamp + # But this is very slow + self.format_version = flv.description + + if "metadata/entry[1]" in flv: + self.extractAMF(flv["metadata/entry[1]"]) + if self.has('duration'): + self.bit_rate = flv.size / timedelta2seconds(self.get('duration')) + + @fault_tolerant + def extractAudio(self, audio, meta): + if audio["codec"].display == "MP3" and "music_data" in audio: + meta.compression = audio["music_data"].description + else: + meta.compression = audio["codec"].display + meta.sample_rate = audio.getSampleRate() + if audio["is_16bit"].value: + meta.bits_per_sample = 16 + else: + meta.bits_per_sample = 8 + if audio["is_stereo"].value: + meta.nb_channel = 2 + else: + meta.nb_channel = 1 + + @fault_tolerant + def extractVideo(self, video, meta): + meta.compression = video["codec"].display + + def extractAMF(self, amf): + for entry in amf.array("item"): + self.useAmfEntry(entry) + + @fault_tolerant + def useAmfEntry(self, entry): + key = entry["key"].value + if key == "duration": + self.duration = timedelta(seconds=entry["value"].value) + elif key == "creator": + self.producer = entry["value"].value + elif key == "audiosamplerate": + self.sample_rate = entry["value"].value + elif key == "framerate": + self.frame_rate = entry["value"].value + elif key == "metadatacreator": + self.producer = entry["value"].value + elif key == "metadatadate": + self.creation_date = entry.value + elif key == "width": + self.width = int(entry["value"].value) + elif key == "height": + self.height = int(entry["value"].value) + +class MovMetadata(RootMetadata): + def extract(self, mov): + for atom in mov: + if "movie" in atom: + self.processMovie(atom["movie"]) + + @fault_tolerant + def processMovieHeader(self, hdr): + self.creation_date = hdr["creation_date"].value + self.last_modification = hdr["lastmod_date"].value + self.duration = timedelta(seconds=float(hdr["duration"].value) / hdr["time_scale"].value) + self.comment = _("Play speed: %.1f%%") % (hdr["play_speed"].value*100) + self.comment = _("User volume: %.1f%%") % (float(hdr["volume"].value)*100//255) + + @fault_tolerant + def processTrackHeader(self, hdr): + width = int(hdr["frame_size_width"].value) + height = int(hdr["frame_size_height"].value) + if width and height: + self.width = width + self.height = height + + def processTrack(self, atom): + for field in atom: + if "track_hdr" in field: + self.processTrackHeader(field["track_hdr"]) + + def processMovie(self, atom): + for field in atom: + if "track" in field: + self.processTrack(field["track"]) + if "movie_hdr" in field: + self.processMovieHeader(field["movie_hdr"]) + + +class AsfMetadata(MultipleMetadata): + EXT_DESC_TO_ATTR = { + "Encoder": "producer", + "ToolName": "producer", + "AlbumTitle": "album", + "Track": "track_number", + "TrackNumber": "track_total", + "Year": "creation_date", + "AlbumArtist": "author", + } + SKIP_EXT_DESC = set(( + # Useless informations + "WMFSDKNeeded", "WMFSDKVersion", + "Buffer Average", "VBR Peak", "EncodingTime", + "MediaPrimaryClassID", "UniqueFileIdentifier", + )) + + def extract(self, asf): + if "header/content" in asf: + self.processHeader(asf["header/content"]) + + def processHeader(self, header): + compression = [] + is_vbr = None + + if "ext_desc/content" in header: + # Extract all data from ext_desc + data = {} + for desc in header.array("ext_desc/content/descriptor"): + self.useExtDescItem(desc, data) + + # Have ToolName and ToolVersion? If yes, group them to producer key + if "ToolName" in data and "ToolVersion" in data: + self.producer = "%s (version %s)" % (data["ToolName"], data["ToolVersion"]) + del data["ToolName"] + del data["ToolVersion"] + + # "IsVBR" key + if "IsVBR" in data: + is_vbr = (data["IsVBR"] == 1) + del data["IsVBR"] + + # Store data + for key, value in data.iteritems(): + if key in self.EXT_DESC_TO_ATTR: + key = self.EXT_DESC_TO_ATTR[key] + else: + if isinstance(key, str): + key = makePrintable(key, "ISO-8859-1", to_unicode=True) + value = "%s=%s" % (key, value) + key = "comment" + setattr(self, key, value) + + if "file_prop/content" in header: + self.useFileProp(header["file_prop/content"], is_vbr) + + if "codec_list/content" in header: + for codec in header.array("codec_list/content/codec"): + if "name" in codec: + text = codec["name"].value + if "desc" in codec and codec["desc"].value: + text = "%s (%s)" % (text, codec["desc"].value) + compression.append(text) + + audio_index = 1 + video_index = 1 + for index, stream_prop in enumerate(header.array("stream_prop")): + if "content/audio_header" in stream_prop: + meta = Metadata(self) + self.streamProperty(header, index, meta) + self.streamAudioHeader(stream_prop["content/audio_header"], meta) + if self.addGroup("audio[%u]" % audio_index, meta, "Audio stream #%u" % audio_index): + audio_index += 1 + elif "content/video_header" in stream_prop: + meta = Metadata(self) + self.streamProperty(header, index, meta) + self.streamVideoHeader(stream_prop["content/video_header"], meta) + if self.addGroup("video[%u]" % video_index, meta, "Video stream #%u" % video_index): + video_index += 1 + + if "metadata/content" in header: + info = header["metadata/content"] + try: + self.title = info["title"].value + self.author = info["author"].value + self.copyright = info["copyright"].value + except MissingField: + pass + + @fault_tolerant + def streamAudioHeader(self, audio, meta): + if not meta.has("compression"): + meta.compression = audio["twocc"].display + meta.nb_channel = audio["channels"].value + meta.sample_rate = audio["sample_rate"].value + meta.bits_per_sample = audio["bits_per_sample"].value + + @fault_tolerant + def streamVideoHeader(self, video, meta): + meta.width = video["width"].value + meta.height = video["height"].value + if "bmp_info" in video: + bmp_info = video["bmp_info"] + if not meta.has("compression"): + meta.compression = bmp_info["codec"].display + meta.bits_per_pixel = bmp_info["bpp"].value + + @fault_tolerant + def useExtDescItem(self, desc, data): + if desc["type"].value == ASF_Descriptor.TYPE_BYTE_ARRAY: + # Skip binary data + return + key = desc["name"].value + if "/" in key: + # Replace "WM/ToolName" with "ToolName" + key = key.split("/", 1)[1] + if key in self.SKIP_EXT_DESC: + # Skip some keys + return + value = desc["value"].value + if not value: + return + value = makeUnicode(value) + data[key] = value + + @fault_tolerant + def useFileProp(self, prop, is_vbr): + self.creation_date = prop["creation_date"].value + self.duration = prop["play_duration"].value + if prop["seekable"].value: + self.comment = u"Is seekable" + value = prop["max_bitrate"].value + text = prop["max_bitrate"].display + if is_vbr is True: + text = "VBR (%s max)" % text + elif is_vbr is False: + text = "%s (CBR)" % text + else: + text = "%s (max)" % text + self.bit_rate = (value, text) + + def streamProperty(self, header, index, meta): + key = "bit_rates/content/bit_rate[%u]/avg_bitrate" % index + if key in header: + meta.bit_rate = header[key].value + + # TODO: Use codec list + # It doesn't work when the video uses /header/content/bitrate_mutex + # since the codec list are shared between streams but... how is it + # shared? +# key = "codec_list/content/codec[%u]" % index +# if key in header: +# codec = header[key] +# if "name" in codec: +# text = codec["name"].value +# if "desc" in codec and codec["desc"].value: +# meta.compression = "%s (%s)" % (text, codec["desc"].value) +# else: +# meta.compression = text + +registerExtractor(MovFile, MovMetadata) +registerExtractor(AsfFile, AsfMetadata) +registerExtractor(FlvFile, FlvMetadata) +registerExtractor(MkvFile, MkvMetadata) + diff --git a/libs/hachoir_parser/__init__.py b/libs/hachoir_parser/__init__.py new file mode 100644 index 0000000..1b9860a --- /dev/null +++ b/libs/hachoir_parser/__init__.py @@ -0,0 +1,7 @@ +from hachoir_parser.version import __version__ +from hachoir_parser.parser import ValidateError, HachoirParser, Parser +from hachoir_parser.parser_list import ParserList, HachoirParserList +from hachoir_parser.guess import (QueryParser, guessParser, createParser) +from hachoir_parser import (archive, audio, container, + file_system, image, game, misc, network, program, video) + diff --git a/libs/hachoir_parser/archive/__init__.py b/libs/hachoir_parser/archive/__init__.py new file mode 100644 index 0000000..ecd09e8 --- /dev/null +++ b/libs/hachoir_parser/archive/__init__.py @@ -0,0 +1,12 @@ +from hachoir_parser.archive.ace import AceFile +from hachoir_parser.archive.ar import ArchiveFile +from hachoir_parser.archive.bzip2_parser import Bzip2Parser +from hachoir_parser.archive.cab import CabFile +from hachoir_parser.archive.gzip_parser import GzipParser +from hachoir_parser.archive.tar import TarFile +from hachoir_parser.archive.zip import ZipFile +from hachoir_parser.archive.rar import RarFile +from hachoir_parser.archive.rpm import RpmFile +from hachoir_parser.archive.sevenzip import SevenZipParser +from hachoir_parser.archive.mar import MarFile + diff --git a/libs/hachoir_parser/archive/ace.py b/libs/hachoir_parser/archive/ace.py new file mode 100644 index 0000000..0365292 --- /dev/null +++ b/libs/hachoir_parser/archive/ace.py @@ -0,0 +1,267 @@ +""" +ACE parser + +From wotsit.org and the SDK header (bitflags) + +Partial study of a new block type (5) I've called "new_recovery", as its +syntax is very close to the former one (of type 2). + +Status: can only read totally file and header blocks. +Author: Christophe Gisquet +Creation date: 19 january 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, + Bit, Bits, NullBits, RawBytes, Enum, + UInt8, UInt16, UInt32, + PascalString8, PascalString16, String, + TimeDateMSDOS32) +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.msdos import MSDOSFileAttr32 + +MAGIC = "**ACE**" + +OS_MSDOS = 0 +OS_WIN32 = 2 +HOST_OS = { + 0: "MS-DOS", + 1: "OS/2", + 2: "Win32", + 3: "Unix", + 4: "MAC-OS", + 5: "Win NT", + 6: "Primos", + 7: "APPLE GS", + 8: "ATARI", + 9: "VAX VMS", + 10: "AMIGA", + 11: "NEXT", +} + +COMPRESSION_TYPE = { + 0: "Store", + 1: "Lempel-Ziv 77", + 2: "ACE v2.0", +} + +COMPRESSION_MODE = { + 0: "fastest", + 1: "fast", + 2: "normal", + 3: "good", + 4: "best", +} + +# TODO: Computing the CRC16 would also prove useful +#def markerValidate(self): +# return not self["extend"].value and self["signature"].value == MAGIC and \ +# self["host_os"].value<12 + +class MarkerFlags(StaticFieldSet): + format = ( + (Bit, "extend", "Whether the header is extended"), + (Bit, "has_comment", "Whether the archive has a comment"), + (NullBits, "unused", 7, "Reserved bits"), + (Bit, "sfx", "SFX"), + (Bit, "limited_dict", "Junior SFX with 256K dictionary"), + (Bit, "multi_volume", "Part of a set of ACE archives"), + (Bit, "has_av_string", "This header holds an AV-string"), + (Bit, "recovery_record", "Recovery record preset"), + (Bit, "locked", "Archive is locked"), + (Bit, "solid", "Archive uses solid compression") + ) + +def markerFlags(self): + yield MarkerFlags(self, "flags", "Marker flags") + +def markerHeader(self): + yield String(self, "signature", 7, "Signature") + yield UInt8(self, "ver_extract", "Version needed to extract archive") + yield UInt8(self, "ver_created", "Version used to create archive") + yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS) + yield UInt8(self, "vol_num", "Volume number") + yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)") + yield Bits(self, "reserved", 64, "Reserved size for future extensions") + flags = self["flags"] + if flags["has_av_string"].value: + yield PascalString8(self, "av_string", "AV String") + if flags["has_comment"].value: + size = filesizeHandler(UInt16(self, "comment_size", "Comment size")) + yield size + if size.value > 0: + yield RawBytes(self, "compressed_comment", size.value, \ + "Compressed comment") + +class FileFlags(StaticFieldSet): + format = ( + (Bit, "extend", "Whether the header is extended"), + (Bit, "has_comment", "Presence of file comment"), + (Bits, "unused", 10, "Unused bit flags"), + (Bit, "encrypted", "File encrypted with password"), + (Bit, "previous", "File continued from previous volume"), + (Bit, "next", "File continues on the next volume"), + (Bit, "solid", "File compressed using previously archived files") + ) + +def fileFlags(self): + yield FileFlags(self, "flags", "File flags") + +def fileHeader(self): + yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file")) + yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size")) + yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)") + if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32): + yield MSDOSFileAttr32(self, "file_attr", "File attributes") + else: + yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal) + yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal) + yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE) + yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE) + yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal) + yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal) + # Filename + yield PascalString16(self, "filename", "Filename") + # Comment + if self["flags/has_comment"].value: + yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment")) + if self["comment_size"].value > 0: + yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data") + +def fileBody(self): + size = self["compressed_size"].value + if size > 0: + yield RawBytes(self, "compressed_data", size, "Compressed data") + +def fileDesc(self): + return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display) + +def recoveryHeader(self): + yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data")) + self.body_size = self["rec_blk_size"].size + yield String(self, "signature", 7, "Signature, normally '**ACE**'") + yield textHandler(UInt32(self, "relative_start", + "Relative start (to this block) of the data this block is mode of"), + hexadecimal) + yield UInt32(self, "num_blocks", "Number of blocks the data is split into") + yield UInt32(self, "size_blocks", "Size of these blocks") + yield UInt16(self, "crc16_blocks", "CRC16 over recovery data") + # size_blocks blocks of size size_blocks follow + # The ultimate data is the xor data of all those blocks + size = self["size_blocks"].value + for index in xrange(self["num_blocks"].value): + yield RawBytes(self, "data[]", size, "Recovery block %i" % index) + yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks") + +def recoveryDesc(self): + return "Recovery block, size=%u" % self["body_size"].display + +def newRecoveryHeader(self): + """ + This header is described nowhere + """ + if self["flags/extend"].value: + yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following")) + self.body_size = self["body_size"].value + yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"), + hexadecimal) + yield String(self, "signature", 7, "Signature, normally '**ACE**'") + yield textHandler(UInt32(self, "relative_start", + "Offset (=crc16's) of this block in the file"), hexadecimal) + yield textHandler(UInt32(self, "unknown[]", + "Unknown field, probably 0"), hexadecimal) + +class BaseFlags(StaticFieldSet): + format = ( + (Bit, "extend", "Whether the header is extended"), + (NullBits, "unused", 15, "Unused bit flags") + ) + +def parseFlags(self): + yield BaseFlags(self, "flags", "Unknown flags") + +def parseHeader(self): + if self["flags/extend"].value: + yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following")) + self.body_size = self["body_size"].value + +def parseBody(self): + if self.body_size > 0: + yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled") + +class Block(FieldSet): + TAG_INFO = { + 0: ("header", "Archiver header", markerFlags, markerHeader, None), + 1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody), + 2: ("recovery[]", recoveryDesc, recoveryHeader, None, None), + 5: ("new_recovery[]", None, None, newRecoveryHeader, None) + } + + def __init__(self, parent, name, description=None): + FieldSet.__init__(self, parent, name, description) + self.body_size = 0 + self.desc_func = None + type = self["block_type"].value + if type in self.TAG_INFO: + self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type] + if desc: + if isinstance(desc, str): + self._description = desc + else: + self.desc_func = desc + else: + self.warning("Processing as unknown block block of type %u" % type) + if not self.parseFlags: + self.parseFlags = parseFlags + if not self.parseHeader: + self.parseHeader = parseHeader + if not self.parseBody: + self.parseBody = parseBody + + def createFields(self): + yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal) + yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)")) + yield UInt8(self, "block_type", "Block type") + + # Flags + for flag in self.parseFlags(self): + yield flag + + # Rest of the header + for field in self.parseHeader(self): + yield field + size = self["head_size"].value - (self.current_size//8) + (2+2) + if size > 0: + yield RawBytes(self, "extra_data", size, "Extra header data, unhandled") + + # Body in itself + for field in self.parseBody(self): + yield field + + def createDescription(self): + if self.desc_func: + return self.desc_func(self) + else: + return "Block: %s" % self["type"].display + +class AceFile(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "ace", + "category": "archive", + "file_ext": ("ace",), + "mime": (u"application/x-ace-compressed",), + "min_size": 50*8, + "description": "ACE archive" + } + + def validate(self): + if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC: + return "Invalid magic" + return True + + def createFields(self): + while not self.eof: + yield Block(self, "block[]") + diff --git a/libs/hachoir_parser/archive/ar.py b/libs/hachoir_parser/archive/ar.py new file mode 100644 index 0000000..421cdc5 --- /dev/null +++ b/libs/hachoir_parser/archive/ar.py @@ -0,0 +1,52 @@ +""" +GNU ar archive : archive file (.a) and Debian (.deb) archive. +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + String, RawBytes, UnixLine) +from hachoir_core.endian import BIG_ENDIAN + +class ArchiveFileEntry(FieldSet): + def createFields(self): + yield UnixLine(self, "header", "Header") + info = self["header"].value.split() + if len(info) != 7: + raise ParserError("Invalid file entry header") + size = int(info[5]) + if 0 < size: + yield RawBytes(self, "content", size, "File data") + + def createDescription(self): + return "File entry (%s)" % self["header"].value.split()[0] + +class ArchiveFile(Parser): + endian = BIG_ENDIAN + MAGIC = '!\n' + PARSER_TAGS = { + "id": "unix_archive", + "category": "archive", + "file_ext": ("a", "deb"), + "mime": + (u"application/x-debian-package", + u"application/x-archive", + u"application/x-dpkg"), + "min_size": (8 + 13)*8, # file signature + smallest file as possible + "magic": ((MAGIC, 0),), + "description": "Unix archive" + } + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return "Invalid magic string" + return True + + def createFields(self): + yield String(self, "id", 8, "Unix archive identifier (\"\")", charset="ASCII") + while not self.eof: + data = self.stream.readBytes(self.current_size, 1) + if data == "\n": + yield RawBytes(self, "empty_line[]", 1, "Empty line") + else: + yield ArchiveFileEntry(self, "file[]", "File") + diff --git a/libs/hachoir_parser/archive/bzip2_parser.py b/libs/hachoir_parser/archive/bzip2_parser.py new file mode 100644 index 0000000..bec1d0e --- /dev/null +++ b/libs/hachoir_parser/archive/bzip2_parser.py @@ -0,0 +1,85 @@ +""" +BZIP2 archive file + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (ParserError, String, + Bytes, Character, UInt8, UInt32, CompressedField) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +try: + from bz2 import BZ2Decompressor + + class Bunzip2: + def __init__(self, stream): + self.bzip2 = BZ2Decompressor() + + def __call__(self, size, data=''): + try: + return self.bzip2.decompress(data) + except EOFError: + return '' + + has_deflate = True +except ImportError: + has_deflate = False + +class Bzip2Parser(Parser): + PARSER_TAGS = { + "id": "bzip2", + "category": "archive", + "file_ext": ("bz2",), + "mime": (u"application/x-bzip2",), + "min_size": 10*8, + "magic": (('BZh', 0),), + "description": "bzip2 archive" + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 3) != 'BZh': + return "Wrong file signature" + if not("1" <= self["blocksize"].value <= "9"): + return "Wrong blocksize" + return True + + def createFields(self): + yield String(self, "id", 3, "Identifier (BZh)", charset="ASCII") + yield Character(self, "blocksize", "Block size (KB of memory needed to uncompress)") + + yield UInt8(self, "blockheader", "Block header") + if self["blockheader"].value == 0x17: + yield String(self, "id2", 4, "Identifier2 (re8P)", charset="ASCII") + yield UInt8(self, "id3", "Identifier3 (0x90)") + elif self["blockheader"].value == 0x31: + yield String(self, "id2", 5, "Identifier 2 (AY&SY)", charset="ASCII") + if self["id2"].value != "AY&SY": + raise ParserError("Invalid identifier 2 (AY&SY)!") + else: + raise ParserError("Invalid block header!") + yield textHandler(UInt32(self, "crc32", "CRC32"), hexadecimal) + + if self._size is None: # TODO: is it possible to handle piped input? + raise NotImplementedError + + size = (self._size - self.current_size)/8 + if size: + for tag, filename in self.stream.tags: + if tag == "filename" and filename.endswith(".bz2"): + filename = filename[:-4] + break + else: + filename = None + data = Bytes(self, "file", size) + if has_deflate: + CompressedField(self, Bunzip2) + def createInputStream(**args): + if filename: + args.setdefault("tags",[]).append(("filename", filename)) + return self._createInputStream(**args) + data._createInputStream = createInputStream + yield data + diff --git a/libs/hachoir_parser/archive/cab.py b/libs/hachoir_parser/archive/cab.py new file mode 100644 index 0000000..856b01e --- /dev/null +++ b/libs/hachoir_parser/archive/cab.py @@ -0,0 +1,125 @@ +""" +Microsoft Cabinet (CAB) archive. + +Author: Victor Stinner +Creation date: 31 january 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Enum, + CString, String, + UInt16, UInt32, Bit, Bits, PaddingBits, NullBits, + DateTimeMSDOS32, RawBytes) +from hachoir_parser.common.msdos import MSDOSFileAttr16 +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.endian import LITTLE_ENDIAN + +MAX_NB_FOLDER = 30 + +COMPRESSION_NONE = 0 +COMPRESSION_NAME = { + 0: "Uncompressed", + 1: "Deflate", + 2: "Quantum", + 3: "LZX", +} + +class Folder(FieldSet): + def createFields(self): + yield UInt32(self, "off_data", "Offset of data") + yield UInt16(self, "cf_data") + yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME) + yield Bits(self, "compr_level", 5, "Compression level") + yield PaddingBits(self, "padding", 7) + + def createDescription(self): + text= "Folder: compression %s" % self["compr_method"].display + if self["compr_method"].value != COMPRESSION_NONE: + text += " (level %u)" % self["compr_level"].value + return text + +class File(FieldSet): + def createFields(self): + yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size")) + yield UInt32(self, "offset", "File offset after decompression") + yield UInt16(self, "iFolder", "file control id") + yield DateTimeMSDOS32(self, "timestamp") + yield MSDOSFileAttr16(self, "attributes") + yield CString(self, "filename", charset="ASCII") + + def createDescription(self): + return "File %s (%s)" % ( + self["filename"].display, self["filesize"].display) + +class Reserved(FieldSet): + def createFields(self): + yield UInt32(self, "size") + size = self["size"].value + if size: + yield RawBytes(self, "data", size) + +class Flags(FieldSet): + static_size = 16 + def createFields(self): + yield Bit(self, "has_previous") + yield Bit(self, "has_next") + yield Bit(self, "has_reserved") + yield NullBits(self, "padding", 13) + +class CabFile(Parser): + endian = LITTLE_ENDIAN + MAGIC = "MSCF" + PARSER_TAGS = { + "id": "cab", + "category": "archive", + "file_ext": ("cab",), + "mime": (u"application/vnd.ms-cab-compressed",), + "magic": ((MAGIC, 0),), + "min_size": 1*8, # header + file entry + "description": "Microsoft Cabinet archive" + } + + def validate(self): + if self.stream.readBytes(0, 4) != self.MAGIC: + return "Invalid magic" + if self["cab_version"].value != 0x0103: + return "Unknown version (%s)" % self["cab_version"].display + if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER): + return "Invalid number of folder (%s)" % self["nb_folder"].value + return True + + def createFields(self): + yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII") + yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal) + yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size")) + yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal) + yield UInt32(self, "off_file", "Offset of first file") + yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal) + yield textHandler(UInt16(self, "cab_version", "Cabinet version"), hexadecimal) + yield UInt16(self, "nb_folder", "Number of folders") + yield UInt16(self, "nb_files", "Number of files") + yield Flags(self, "flags") + yield UInt16(self, "setid") + yield UInt16(self, "number", "Zero-based cabinet number") + + # --- TODO: Support flags + if self["flags/has_reserved"].value: + yield Reserved(self, "reserved") + #(3) Previous cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASPREV + #(4) Previous disk name, if CAB_HEADER.flags & CAB_FLAG_HASPREV + #(5) Next cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT + #(6) Next disk name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT + # ---- + + for index in xrange(self["nb_folder"].value): + yield Folder(self, "folder[]") + for index in xrange(self["nb_files"].value): + yield File(self, "file[]") + + end = self.seekBit(self.size, "endraw") + if end: + yield end + + def createContentSize(self): + return self["filesize"].value * 8 + diff --git a/libs/hachoir_parser/archive/gzip_parser.py b/libs/hachoir_parser/archive/gzip_parser.py new file mode 100644 index 0000000..c082033 --- /dev/null +++ b/libs/hachoir_parser/archive/gzip_parser.py @@ -0,0 +1,129 @@ +""" +GZIP archive parser. + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import ( + UInt8, UInt16, UInt32, Enum, TimestampUnix32, + Bit, CString, SubFile, + NullBits, Bytes, RawBytes) +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.deflate import Deflate + +class GzipParser(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "gzip", + "category": "archive", + "file_ext": ("gz",), + "mime": (u"application/x-gzip",), + "min_size": 18*8, + #"magic": (('\x1F\x8B\x08', 0),), + "magic_regex": ( + # (magic, compression=deflate, , , ) + ('\x1F\x8B\x08.{5}[\0\2\4\6][\x00-\x0D]', 0), + ), + "description": u"gzip archive", + } + os_name = { + 0: u"FAT filesystem", + 1: u"Amiga", + 2: u"VMS (or OpenVMS)", + 3: u"Unix", + 4: u"VM/CMS", + 5: u"Atari TOS", + 6: u"HPFS filesystem (OS/2, NT)", + 7: u"Macintosh", + 8: u"Z-System", + 9: u"CP/M", + 10: u"TOPS-20", + 11: u"NTFS filesystem (NT)", + 12: u"QDOS", + 13: u"Acorn RISCOS", + } + COMPRESSION_NAME = { + 8: u"deflate", + } + + def validate(self): + if self["signature"].value != '\x1F\x8B': + return "Invalid signature" + if self["compression"].value not in self.COMPRESSION_NAME: + return "Unknown compression method (%u)" % self["compression"].value + if self["reserved[0]"].value != 0: + return "Invalid reserved[0] value" + if self["reserved[1]"].value != 0: + return "Invalid reserved[1] value" + if self["reserved[2]"].value != 0: + return "Invalid reserved[2] value" + return True + + def createFields(self): + # Gzip header + yield Bytes(self, "signature", 2, r"GZip file signature (\x1F\x8B)") + yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME) + + # Flags + yield Bit(self, "is_text", "File content is probably ASCII text") + yield Bit(self, "has_crc16", "Header CRC16") + yield Bit(self, "has_extra", "Extra informations (variable size)") + yield Bit(self, "has_filename", "Contains filename?") + yield Bit(self, "has_comment", "Contains comment?") + yield NullBits(self, "reserved[]", 3) + yield TimestampUnix32(self, "mtime", "Modification time") + + # Extra flags + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "slowest", "Compressor used maximum compression (slowest)") + yield Bit(self, "fastest", "Compressor used the fastest compression") + yield NullBits(self, "reserved[]", 5) + yield Enum(UInt8(self, "os", "Operating system"), self.os_name) + + # Optional fields + if self["has_extra"].value: + yield UInt16(self, "extra_length", "Extra length") + yield RawBytes(self, "extra", self["extra_length"].value, "Extra") + if self["has_filename"].value: + yield CString(self, "filename", "Filename", charset="ISO-8859-1") + if self["has_comment"].value: + yield CString(self, "comment", "Comment") + if self["has_crc16"].value: + yield textHandler(UInt16(self, "hdr_crc16", "CRC16 of the header"), + hexadecimal) + + if self._size is None: # TODO: is it possible to handle piped input? + raise NotImplementedError() + + # Read file + size = (self._size - self.current_size) // 8 - 8 # -8: crc32+size + if 0 < size: + if self["has_filename"].value: + filename = self["filename"].value + else: + for tag, filename in self.stream.tags: + if tag == "filename" and filename.endswith(".gz"): + filename = filename[:-3] + break + else: + filename = None + yield Deflate(SubFile(self, "file", size, filename=filename)) + + # Footer + yield textHandler(UInt32(self, "crc32", + "Uncompressed data content CRC32"), hexadecimal) + yield filesizeHandler(UInt32(self, "size", "Uncompressed size")) + + def createDescription(self): + desc = u"gzip archive" + info = [] + if "filename" in self: + info.append('filename "%s"' % self["filename"].value) + if "size" in self: + info.append("was %s" % self["size"].display) + if self["mtime"].value: + info.append(self["mtime"].display) + return "%s: %s" % (desc, ", ".join(info)) + diff --git a/libs/hachoir_parser/archive/mar.py b/libs/hachoir_parser/archive/mar.py new file mode 100644 index 0000000..6a7e31a --- /dev/null +++ b/libs/hachoir_parser/archive/mar.py @@ -0,0 +1,67 @@ +""" +Microsoft Archive parser + +Author: Victor Stinner +Creation date: 2007-03-04 +""" + +MAX_NB_FILE = 100000 + +from hachoir_parser import Parser +from hachoir_core.field import FieldSet, String, UInt32, SubFile +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal + +class FileIndex(FieldSet): + static_size = 68*8 + + def createFields(self): + yield String(self, "filename", 56, truncate="\0", charset="ASCII") + yield filesizeHandler(UInt32(self, "filesize")) + yield textHandler(UInt32(self, "crc32"), hexadecimal) + yield UInt32(self, "offset") + + def createDescription(self): + return "File %s (%s) at %s" % ( + self["filename"].value, self["filesize"].display, self["offset"].value) + +class MarFile(Parser): + MAGIC = "MARC" + PARSER_TAGS = { + "id": "mar", + "category": "archive", + "file_ext": ("mar",), + "min_size": 80*8, # At least one file index + "magic": ((MAGIC, 0),), + "description": "Microsoft Archive", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != self.MAGIC: + return "Invalid magic" + if self["version"].value != 3: + return "Invalid version" + if not(1 <= self["nb_file"].value <= MAX_NB_FILE): + return "Invalid number of file" + return True + + def createFields(self): + yield String(self, "magic", 4, "File signature (MARC)", charset="ASCII") + yield UInt32(self, "version") + yield UInt32(self, "nb_file") + files = [] + for index in xrange(self["nb_file"].value): + item = FileIndex(self, "file[]") + yield item + if item["filesize"].value: + files.append(item) + files.sort(key=lambda item: item["offset"].value) + for index in files: + padding = self.seekByte(index["offset"].value) + if padding: + yield padding + size = index["filesize"].value + desc = "File %s" % index["filename"].value + yield SubFile(self, "data[]", size, desc, filename=index["filename"].value) + diff --git a/libs/hachoir_parser/archive/rar.py b/libs/hachoir_parser/archive/rar.py new file mode 100644 index 0000000..2be5887 --- /dev/null +++ b/libs/hachoir_parser/archive/rar.py @@ -0,0 +1,353 @@ +""" +RAR parser + +Status: can only read higher-level attructures +Author: Christophe Gisquet +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, + Bit, Bits, Enum, + UInt8, UInt16, UInt32, UInt64, + String, TimeDateMSDOS32, + NullBytes, NullBits, RawBytes) +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.msdos import MSDOSFileAttr32 + +MAX_FILESIZE = 1000 * 1024 * 1024 + +BLOCK_NAME = { + 0x72: "Marker", + 0x73: "Archive", + 0x74: "File", + 0x75: "Comment", + 0x76: "Extra info", + 0x77: "Subblock", + 0x78: "Recovery record", + 0x79: "Archive authenticity", + 0x7A: "New-format subblock", + 0x7B: "Archive end", +} + +COMPRESSION_NAME = { + 0x30: "Storing", + 0x31: "Fastest compression", + 0x32: "Fast compression", + 0x33: "Normal compression", + 0x34: "Good compression", + 0x35: "Best compression" +} + +OS_MSDOS = 0 +OS_WIN32 = 2 +OS_NAME = { + 0: "MS DOS", + 1: "OS/2", + 2: "Win32", + 3: "Unix", +} + +DICTIONARY_SIZE = { + 0: "Dictionary size 64 Kb", + 1: "Dictionary size 128 Kb", + 2: "Dictionary size 256 Kb", + 3: "Dictionary size 512 Kb", + 4: "Dictionary size 1024 Kb", + 7: "File is a directory", +} + +def formatRARVersion(field): + """ + Decodes the RAR version stored on 1 byte + """ + return "%u.%u" % divmod(field.value, 10) + +def commonFlags(s): + yield Bit(s, "has_added_size", "Additional field indicating additional size") + yield Bit(s, "is_ignorable", "Old versions of RAR should ignore this block when copying data") + +class ArchiveFlags(StaticFieldSet): + format = ( + (Bit, "vol", "Archive volume"), + (Bit, "has_comment", "Whether there is a comment"), + (Bit, "is_locked", "Archive volume"), + (Bit, "is_solid", "Whether files can be extracted separately"), + (Bit, "new_numbering", "New numbering, or compressed comment"), # From unrar + (Bit, "has_authenticity_information", "The integrity/authenticity of the archive can be checked"), + (Bit, "is_protected", "The integrity/authenticity of the archive can be checked"), + (Bit, "is_passworded", "Needs a password to be decrypted"), + (Bit, "is_first_vol", "Whether it is the first volume"), + (Bit, "is_encrypted", "Whether the encryption version is present"), + (NullBits, "internal", 6, "Reserved for 'internal use'") + ) + +def archiveFlags(s): + yield ArchiveFlags(s, "flags", "Archiver block flags") + +def archiveHeader(s): + yield NullBytes(s, "reserved[]", 2, "Reserved word") + yield NullBytes(s, "reserved[]", 4, "Reserved dword") + +def commentHeader(s): + yield filesizeHandler(UInt16(s, "total_size", "Comment header size + comment size")) + yield filesizeHandler(UInt16(s, "uncompressed_size", "Uncompressed comment size")) + yield UInt8(s, "required_version", "RAR version needed to extract comment") + yield UInt8(s, "packing_method", "Comment packing method") + yield UInt16(s, "comment_crc16", "Comment CRC") + +def commentBody(s): + size = s["total_size"].value - s.current_size + if size > 0: + yield RawBytes(s, "comment_data", size, "Compressed comment data") + +def signatureHeader(s): + yield TimeDateMSDOS32(s, "creation_time") + yield filesizeHandler(UInt16(s, "arc_name_size")) + yield filesizeHandler(UInt16(s, "user_name_size")) + +def recoveryHeader(s): + yield filesizeHandler(UInt32(s, "total_size")) + yield textHandler(UInt8(s, "version"), hexadecimal) + yield UInt16(s, "rec_sectors") + yield UInt32(s, "total_blocks") + yield RawBytes(s, "mark", 8) + +def avInfoHeader(s): + yield filesizeHandler(UInt16(s, "total_size", "Total block size")) + yield UInt8(s, "version", "Version needed to decompress", handler=hexadecimal) + yield UInt8(s, "method", "Compression method", handler=hexadecimal) + yield UInt8(s, "av_version", "Version for AV", handler=hexadecimal) + yield UInt32(s, "av_crc", "AV info CRC32", handler=hexadecimal) + +def avInfoBody(s): + size = s["total_size"].value - s.current_size + if size > 0: + yield RawBytes(s, "av_info_data", size, "AV info") + +class FileFlags(FieldSet): + static_size = 16 + def createFields(self): + yield Bit(self, "continued_from", "File continued from previous volume") + yield Bit(self, "continued_in", "File continued in next volume") + yield Bit(self, "is_encrypted", "File encrypted with password") + yield Bit(self, "has_comment", "File comment present") + yield Bit(self, "is_solid", "Information from previous files is used (solid flag)") + # The 3 following lines are what blocks more staticity + yield Enum(Bits(self, "dictionary_size", 3, "Dictionary size"), DICTIONARY_SIZE) + for bit in commonFlags(self): + yield bit + yield Bit(self, "is_large", "file64 operations needed") + yield Bit(self, "is_unicode", "Filename also encoded using Unicode") + yield Bit(self, "has_salt", "Has salt for encryption") + yield Bit(self, "uses_file_version", "File versioning is used") + yield Bit(self, "has_ext_time", "Extra time ??") + yield Bit(self, "has_ext_flags", "Extra flag ??") + +def fileFlags(s): + yield FileFlags(s, "flags", "File block flags") + +class ExtTime(FieldSet): + def createFields(self): + yield textHandler(UInt16(self, "time_flags", "Flags for extended time"), hexadecimal) + flags = self["time_flags"].value + for index in xrange(4): + rmode = flags >> ((3-index)*4) + if rmode & 8: + if index: + yield TimeDateMSDOS32(self, "dos_time[]", "DOS Time") + if rmode & 3: + yield RawBytes(self, "remainder[]", rmode & 3, "Time remainder") + +def specialHeader(s, is_file): + yield filesizeHandler(UInt32(s, "compressed_size", "Compressed size (bytes)")) + yield filesizeHandler(UInt32(s, "uncompressed_size", "Uncompressed size (bytes)")) + yield Enum(UInt8(s, "host_os", "Operating system used for archiving"), OS_NAME) + yield textHandler(UInt32(s, "crc32", "File CRC32"), hexadecimal) + yield TimeDateMSDOS32(s, "ftime", "Date and time (MS DOS format)") + yield textHandler(UInt8(s, "version", "RAR version needed to extract file"), formatRARVersion) + yield Enum(UInt8(s, "method", "Packing method"), COMPRESSION_NAME) + yield filesizeHandler(UInt16(s, "filename_length", "File name size")) + if s["host_os"].value in (OS_MSDOS, OS_WIN32): + yield MSDOSFileAttr32(s, "file_attr", "File attributes") + else: + yield textHandler(UInt32(s, "file_attr", "File attributes"), hexadecimal) + + # Start additional field from unrar + if s["flags/is_large"].value: + yield filesizeHandler(UInt64(s, "large_size", "Extended 64bits filesize")) + + # End additional field + size = s["filename_length"].value + if size > 0: + if s["flags/is_unicode"].value: + charset = "UTF-8" + else: + charset = "ISO-8859-15" + yield String(s, "filename", size, "Filename", charset=charset) + # Start additional fields from unrar - file only + if is_file: + if s["flags/has_salt"].value: + yield textHandler(UInt8(s, "salt", "Salt"), hexadecimal) + if s["flags/has_ext_time"].value: + yield ExtTime(s, "extra_time", "Extra time info") + +def fileHeader(s): + return specialHeader(s, True) + +def fileBody(s): + # File compressed data + size = s["compressed_size"].value + if s["flags/is_large"].value: + size += s["large_size"].value + if size > 0: + yield RawBytes(s, "compressed_data", size, "File compressed data") + +def fileDescription(s): + return "File entry: %s (%s)" % \ + (s["filename"].display, s["compressed_size"].display) + +def newSubHeader(s): + return specialHeader(s, False) + +class EndFlags(StaticFieldSet): + format = ( + (Bit, "has_next_vol", "Whether there is another next volume"), + (Bit, "has_data_crc", "Whether a CRC value is present"), + (Bit, "rev_space"), + (Bit, "has_vol_number", "Whether the volume number is present"), + (Bits, "unused[]", 4), + (Bit, "has_added_size", "Additional field indicating additional size"), + (Bit, "is_ignorable", "Old versions of RAR should ignore this block when copying data"), + (Bits, "unused[]", 6), + ) + +def endFlags(s): + yield EndFlags(s, "flags", "End block flags") + +class BlockFlags(FieldSet): + static_size = 16 + + def createFields(self): + yield textHandler(Bits(self, "unused[]", 8, "Unused flag bits"), hexadecimal) + yield Bit(self, "has_added_size", "Additional field indicating additional size") + yield Bit(self, "is_ignorable", "Old versions of RAR should ignore this block when copying data") + yield Bits(self, "unused[]", 6) + +class Block(FieldSet): + BLOCK_INFO = { + # None means 'use default function' + 0x72: ("marker", "Archive header", None, None, None), + 0x73: ("archive_start", "Archive info", archiveFlags, archiveHeader, None), + 0x74: ("file[]", fileDescription, fileFlags, fileHeader, fileBody), + 0x75: ("comment[]", "Stray comment", None, commentHeader, commentBody), + 0x76: ("av_info[]", "Extra information", None, avInfoHeader, avInfoBody), + 0x77: ("sub_block[]", "Stray subblock", None, newSubHeader, fileBody), + 0x78: ("recovery[]", "Recovery block", None, recoveryHeader, None), + 0x79: ("signature", "Signature block", None, signatureHeader, None), + 0x7A: ("new_sub_block[]", "Stray new-format subblock", fileFlags, + newSubHeader, fileBody), + 0x7B: ("archive_end", "Archive end block", endFlags, None, None), + } + + def __init__(self, parent, name): + FieldSet.__init__(self, parent, name) + t = self["block_type"].value + if t in self.BLOCK_INFO: + self._name, desc, parseFlags, parseHeader, parseBody = self.BLOCK_INFO[t] + if callable(desc): + self.createDescription = lambda: desc(self) + elif desc: + self._description = desc + if parseFlags : self.parseFlags = lambda: parseFlags(self) + if parseHeader : self.parseHeader = lambda: parseHeader(self) + if parseBody : self.parseBody = lambda: parseBody(self) + else: + self.info("Processing as unknown block block of type %u" % type) + + self._size = 8*self["block_size"].value + if t == 0x74 or t == 0x7A: + self._size += 8*self["compressed_size"].value + if "is_large" in self["flags"] and self["flags/is_large"].value: + self._size += 8*self["large_size"].value + elif "has_added_size" in self: + self._size += 8*self["added_size"].value + # TODO: check if any other member is needed here + + def createFields(self): + yield textHandler(UInt16(self, "crc16", "Block CRC16"), hexadecimal) + yield textHandler(UInt8(self, "block_type", "Block type"), hexadecimal) + + # Parse flags + for field in self.parseFlags(): + yield field + + # Get block size + yield filesizeHandler(UInt16(self, "block_size", "Block size")) + + # Parse remaining header + for field in self.parseHeader(): + yield field + + # Finish header with stuff of unknow size + size = self["block_size"].value - (self.current_size//8) + if size > 0: + yield RawBytes(self, "unknown", size, "Unknow data (UInt32 probably)") + + # Parse body + for field in self.parseBody(): + yield field + + def createDescription(self): + return "Block entry: %s" % self["type"].display + + def parseFlags(self): + yield BlockFlags(self, "flags", "Block header flags") + + def parseHeader(self): + if "has_added_size" in self["flags"] and \ + self["flags/has_added_size"].value: + yield filesizeHandler(UInt32(self, "added_size", + "Supplementary block size")) + + def parseBody(self): + """ + Parse what is left of the block + """ + size = self["block_size"].value - (self.current_size//8) + if "has_added_size" in self["flags"] and self["flags/has_added_size"].value: + size += self["added_size"].value + if size > 0: + yield RawBytes(self, "body", size, "Body data") + +class RarFile(Parser): + MAGIC = "Rar!\x1A\x07\x00" + PARSER_TAGS = { + "id": "rar", + "category": "archive", + "file_ext": ("rar",), + "mime": (u"application/x-rar-compressed", ), + "min_size": 7*8, + "magic": ((MAGIC, 0),), + "description": "Roshal archive (RAR)", + } + endian = LITTLE_ENDIAN + + def validate(self): + magic = self.MAGIC + if self.stream.readBytes(0, len(magic)) != magic: + return "Invalid magic" + return True + + def createFields(self): + while not self.eof: + yield Block(self, "block[]") + + def createContentSize(self): + start = 0 + end = MAX_FILESIZE * 8 + pos = self.stream.searchBytes("\xC4\x3D\x7B\x00\x40\x07\x00", start, end) + if pos is not None: + return pos + 7*8 + return None + diff --git a/libs/hachoir_parser/archive/rpm.py b/libs/hachoir_parser/archive/rpm.py new file mode 100644 index 0000000..ccb8d2e --- /dev/null +++ b/libs/hachoir_parser/archive/rpm.py @@ -0,0 +1,267 @@ +""" +RPM archive parser. + +Author: Victor Stinner, 1st December 2005. +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, UInt64, Enum, + NullBytes, Bytes, RawBytes, SubFile, + Character, CString, String) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_parser.archive.gzip_parser import GzipParser +from hachoir_parser.archive.bzip2_parser import Bzip2Parser + +class ItemContent(FieldSet): + format_type = { + 0: UInt8, + 1: Character, + 2: UInt8, + 3: UInt16, + 4: UInt32, + 5: UInt64, + 6: CString, + 7: RawBytes, + 8: CString, + 9: CString + } + + def __init__(self, parent, name, item): + FieldSet.__init__(self, parent, name, item.description) + self.related_item = item + self._name = "content_%s" % item.name + + def createFields(self): + item = self.related_item + type = item["type"].value + + cls = self.format_type[type] + count = item["count"].value + if cls is RawBytes: # or type == 8: + if cls is RawBytes: + args = (self, "value", count) + else: + args = (self, "value") # cls is CString + count = 1 + else: + if 1 < count: + args = (self, "value[]") + else: + args = (self, "value") + for index in xrange(count): + yield cls(*args) + +class Item(FieldSet): + type_name = { + 0: "NULL", + 1: "CHAR", + 2: "INT8", + 3: "INT16", + 4: "INT32", + 5: "INT64", + 6: "CSTRING", + 7: "BIN", + 8: "CSTRING_ARRAY", + 9: "CSTRING?" + } + tag_name = { + 1000: "File size", + 1001: "(Broken) MD5 signature", + 1002: "PGP 2.6.3 signature", + 1003: "(Broken) MD5 signature", + 1004: "MD5 signature", + 1005: "GnuPG signature", + 1006: "PGP5 signature", + 1007: "Uncompressed payload size (bytes)", + 256+8: "Broken SHA1 header digest", + 256+9: "Broken SHA1 header digest", + 256+13: "Broken SHA1 header digest", + 256+11: "DSA header signature", + 256+12: "RSA header signature" + } + + def __init__(self, parent, name, description=None, tag_name_dict=None): + FieldSet.__init__(self, parent, name, description) + if tag_name_dict is None: + tag_name_dict = Item.tag_name + self.tag_name_dict = tag_name_dict + + def createFields(self): + yield Enum(UInt32(self, "tag", "Tag"), self.tag_name_dict) + yield Enum(UInt32(self, "type", "Type"), Item.type_name) + yield UInt32(self, "offset", "Offset") + yield UInt32(self, "count", "Count") + + def createDescription(self): + return "Item: %s (%s)" % (self["tag"].display, self["type"].display) + +class ItemHeader(Item): + tag_name = { + 61: "Current image", + 62: "Signatures", + 63: "Immutable", + 64: "Regions", + 100: "I18N string locales", + 1000: "Name", + 1001: "Version", + 1002: "Release", + 1003: "Epoch", + 1004: "Summary", + 1005: "Description", + 1006: "Build time", + 1007: "Build host", + 1008: "Install time", + 1009: "Size", + 1010: "Distribution", + 1011: "Vendor", + 1012: "Gif", + 1013: "Xpm", + 1014: "Licence", + 1015: "Packager", + 1016: "Group", + 1017: "Changelog", + 1018: "Source", + 1019: "Patch", + 1020: "Url", + 1021: "OS", + 1022: "Arch", + 1023: "Prein", + 1024: "Postin", + 1025: "Preun", + 1026: "Postun", + 1027: "Old filenames", + 1028: "File sizes", + 1029: "File states", + 1030: "File modes", + 1031: "File uids", + 1032: "File gids", + 1033: "File rdevs", + 1034: "File mtimes", + 1035: "File MD5s", + 1036: "File link to's", + 1037: "File flags", + 1038: "Root", + 1039: "File username", + 1040: "File groupname", + 1043: "Icon", + 1044: "Source rpm", + 1045: "File verify flags", + 1046: "Archive size", + 1047: "Provide name", + 1048: "Require flags", + 1049: "Require name", + 1050: "Require version", + 1051: "No source", + 1052: "No patch", + 1053: "Conflict flags", + 1054: "Conflict name", + 1055: "Conflict version", + 1056: "Default prefix", + 1057: "Build root", + 1058: "Install prefix", + 1059: "Exclude arch", + 1060: "Exclude OS", + 1061: "Exclusive arch", + 1062: "Exclusive OS", + 1064: "RPM version", + 1065: "Trigger scripts", + 1066: "Trigger name", + 1067: "Trigger version", + 1068: "Trigger flags", + 1069: "Trigger index", + 1079: "Verify script", + #TODO: Finish the list (id 1070..1162 using rpm library source code) + } + + def __init__(self, parent, name, description=None): + Item.__init__(self, parent, name, description, self.tag_name) + +def sortRpmItem(a,b): + return int( a["offset"].value - b["offset"].value ) + +class PropertySet(FieldSet): + def __init__(self, parent, name, *args): + FieldSet.__init__(self, parent, name, *args) + self._size = self["content_item[1]"].address + self["size"].value * 8 + + def createFields(self): + # Read chunk header + yield Bytes(self, "signature", 3, r"Property signature (\x8E\xAD\xE8)") + if self["signature"].value != "\x8E\xAD\xE8": + raise ParserError("Invalid property signature") + yield UInt8(self, "version", "Signature version") + yield NullBytes(self, "reserved", 4, "Reserved") + yield UInt32(self, "count", "Count") + yield UInt32(self, "size", "Size") + + # Read item header + items = [] + for i in range(0, self["count"].value): + item = ItemHeader(self, "item[]") + yield item + items.append(item) + + # Sort items by their offset + items.sort( sortRpmItem ) + + # Read item content + start = self.current_size/8 + for item in items: + offset = item["offset"].value + diff = offset - (self.current_size/8 - start) + if 0 < diff: + yield NullBytes(self, "padding[]", diff) + yield ItemContent(self, "content[]", item) + size = start + self["size"].value - self.current_size/8 + if 0 < size: + yield NullBytes(self, "padding[]", size) + +class RpmFile(Parser): + PARSER_TAGS = { + "id": "rpm", + "category": "archive", + "file_ext": ("rpm",), + "mime": (u"application/x-rpm",), + "min_size": (96 + 16 + 16)*8, # file header + checksum + content header + "magic": (('\xED\xAB\xEE\xDB', 0),), + "description": "RPM package" + } + TYPE_NAME = { + 0: "Binary", + 1: "Source" + } + endian = BIG_ENDIAN + + def validate(self): + if self["signature"].value != '\xED\xAB\xEE\xDB': + return "Invalid signature" + if self["major_ver"].value != 3: + return "Unknown major version (%u)" % self["major_ver"].value + if self["type"].value not in self.TYPE_NAME: + return "Invalid RPM type" + return True + + def createFields(self): + yield Bytes(self, "signature", 4, r"RPM file signature (\xED\xAB\xEE\xDB)") + yield UInt8(self, "major_ver", "Major version") + yield UInt8(self, "minor_ver", "Minor version") + yield Enum(UInt16(self, "type", "RPM type"), RpmFile.TYPE_NAME) + yield UInt16(self, "architecture", "Architecture") + yield String(self, "name", 66, "Archive name", strip="\0", charset="ASCII") + yield UInt16(self, "os", "OS") + yield UInt16(self, "signature_type", "Type of signature") + yield NullBytes(self, "reserved", 16, "Reserved") + yield PropertySet(self, "checksum", "Checksum (signature)") + yield PropertySet(self, "header", "Header") + + if self._size is None: # TODO: is it possible to handle piped input? + raise NotImplementedError + + size = (self._size - self.current_size) // 8 + if size: + if 3 <= size and self.stream.readBytes(self.current_size, 3) == "BZh": + yield SubFile(self, "content", size, "bzip2 content", parser=Bzip2Parser) + else: + yield SubFile(self, "content", size, "gzip content", parser=GzipParser) + diff --git a/libs/hachoir_parser/archive/sevenzip.py b/libs/hachoir_parser/archive/sevenzip.py new file mode 100644 index 0000000..7a0148f --- /dev/null +++ b/libs/hachoir_parser/archive/sevenzip.py @@ -0,0 +1,401 @@ +""" +7zip file parser + +Informations: +- File 7zformat.txt of 7-zip SDK: + http://www.7-zip.org/sdk.html + +Author: Olivier SCHWAB +Creation date: 6 december 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (Field, FieldSet, ParserError, + GenericVector, + Enum, UInt8, UInt32, UInt64, + Bytes, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler + +class SZUInt64(Field): + """ + Variable length UInt64, where the first byte gives both the number of bytes + needed and the upper byte value. + """ + def __init__(self, parent, name, max_size=None, description=None): + Field.__init__(self, parent, name, size=8, description=description) + value = 0 + addr = self.absolute_address + mask = 0x80 + firstByte = parent.stream.readBits(addr, 8, LITTLE_ENDIAN) + for i in xrange(8): + addr += 8 + if not (firstByte & mask): + value += ((firstByte & (mask-1)) << (8*i)) + break + value |= (parent.stream.readBits(addr, 8, LITTLE_ENDIAN) << (8*i)) + mask >>= 1 + self._size += 8 + self.createValue = lambda: value + +ID_END, ID_HEADER, ID_ARCHIVE_PROPS, ID_ADD_STREAM_INFO, ID_MAIN_STREAM_INFO, \ +ID_FILES_INFO, ID_PACK_INFO, ID_UNPACK_INFO, ID_SUBSTREAMS_INFO, ID_SIZE, \ +ID_CRC, ID_FOLDER, ID_CODERS_UNPACK_SIZE, ID_NUM_UNPACK_STREAMS, \ +ID_EMPTY_STREAM, ID_EMPTY_FILE, ID_ANTI, ID_NAME, ID_CREATION_TIME, \ +ID_LAST_ACCESS_TIME, ID_LAST_WRITE_TIME, ID_WIN_ATTR, ID_COMMENT, \ +ID_ENCODED_HEADER = xrange(24) + +ID_INFO = { + ID_END : "End", + ID_HEADER : "Header embedding another one", + ID_ARCHIVE_PROPS : "Archive Properties", + ID_ADD_STREAM_INFO : "Additional Streams Info", + ID_MAIN_STREAM_INFO : "Main Streams Info", + ID_FILES_INFO : "Files Info", + ID_PACK_INFO : "Pack Info", + ID_UNPACK_INFO : "Unpack Info", + ID_SUBSTREAMS_INFO : "Substreams Info", + ID_SIZE : "Size", + ID_CRC : "CRC", + ID_FOLDER : "Folder", + ID_CODERS_UNPACK_SIZE: "Coders Unpacked size", + ID_NUM_UNPACK_STREAMS: "Number of Unpacked Streams", + ID_EMPTY_STREAM : "Empty Stream", + ID_EMPTY_FILE : "Empty File", + ID_ANTI : "Anti", + ID_NAME : "Name", + ID_CREATION_TIME : "Creation Time", + ID_LAST_ACCESS_TIME : "Last Access Time", + ID_LAST_WRITE_TIME : "Last Write Time", + ID_WIN_ATTR : "Win Attributes", + ID_COMMENT : "Comment", + ID_ENCODED_HEADER : "Header holding encoded data info", +} + +class SkippedData(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id[]"), ID_INFO) + size = SZUInt64(self, "size") + yield size + if size.value > 0: + yield RawBytes(self, "data", size.value) + +def waitForID(s, wait_id, wait_name="waited_id[]"): + while not s.eof: + addr = s.absolute_address+s.current_size + uid = s.stream.readBits(addr, 8, LITTLE_ENDIAN) + if uid == wait_id: + yield Enum(UInt8(s, wait_name), ID_INFO) + s.info("Found ID %s (%u)" % (ID_INFO[uid], uid)) + return + s.info("Skipping ID %u!=%u" % (uid, wait_id)) + yield SkippedData(s, "skipped_id[]", "%u != %u" % (uid, wait_id)) + +class HashDigest(FieldSet): + def __init__(self, parent, name, num_digests, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.num_digests = num_digests + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + bytes = self.stream.readBytes(self.absolute_address, self.num_digests) + if self.num_digests > 0: + yield GenericVector(self, "defined[]", self.num_digests, UInt8, "bool") + for index in xrange(self.num_digests): + if bytes[index]: + yield textHandler(UInt32(self, "hash[]", + "Hash for digest %u" % index), hexadecimal) + +class PackInfo(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + # Very important, helps determine where the data is + yield SZUInt64(self, "pack_pos", "Position of the packs") + num = SZUInt64(self, "num_pack_streams") + yield num + num = num.value + + for field in waitForID(self, ID_SIZE, "size_marker"): + yield field + + for size in xrange(num): + yield SZUInt64(self, "pack_size[]") + + while not self.eof: + addr = self.absolute_address+self.current_size + uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN) + if uid == ID_END: + yield Enum(UInt8(self, "end_marker"), ID_INFO) + break + elif uid == ID_CRC: + yield HashDigest(self, "hash_digest", size) + else: + yield SkippedData(self, "skipped_data") + +def lzmaParams(value): + param = value.value + remainder = param / 9 + # Literal coder context bits + lc = param % 9 + # Position state bits + pb = remainder / 5 + # Literal coder position bits + lp = remainder % 5 + return "lc=%u pb=%u lp=%u" % (lc, lp, pb) + +class CoderID(FieldSet): + CODECS = { + # Only 2 methods ... and what about PPMD ? + "\0" : "copy", + "\3\1\1": "lzma", + } + def createFields(self): + byte = UInt8(self, "id_size") + yield byte + byte = byte.value + self.info("ID=%u" % byte) + size = byte & 0xF + if size > 0: + name = self.stream.readBytes(self.absolute_address+self.current_size, size) + if name in self.CODECS: + name = self.CODECS[name] + self.info("Codec is %s" % name) + else: + self.info("Undetermined codec %s" % name) + name = "unknown" + yield RawBytes(self, name, size) + #yield textHandler(Bytes(self, "id", size), lambda: name) + if byte & 0x10: + yield SZUInt64(self, "num_stream_in") + yield SZUInt64(self, "num_stream_out") + self.info("Streams: IN=%u OUT=%u" % \ + (self["num_stream_in"].value, self["num_stream_out"].value)) + if byte & 0x20: + size = SZUInt64(self, "properties_size[]") + yield size + if size.value == 5: + #LzmaDecodeProperties@LZMAStateDecode.c + yield textHandler(UInt8(self, "parameters"), lzmaParams) + yield filesizeHandler(UInt32(self, "dictionary_size")) + elif size.value > 0: + yield RawBytes(self, "properties[]", size.value) + +class CoderInfo(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.in_streams = 1 + self.out_streams = 1 + def createFields(self): + # The real ID + addr = self.absolute_address + self.current_size + b = self.parent.stream.readBits(addr, 8, LITTLE_ENDIAN) + cid = CoderID(self, "coder_id") + yield cid + if b&0x10: # Work repeated, ... + self.in_streams = cid["num_stream_in"].value + self.out_streams = cid["num_stream_out"].value + + # Skip other IDs + while b&0x80: + addr = self.absolute_address + self.current_size + b = self.parent.stream.readBits(addr, 8, LITTLE_ENDIAN) + yield CoderID(self, "unused_codec_id[]") + +class BindPairInfo(FieldSet): + def createFields(self): + # 64 bits values then cast to 32 in fact + yield SZUInt64(self, "in_index") + yield SZUInt64(self, "out_index") + self.info("Indexes: IN=%u OUT=%u" % \ + (self["in_index"].value, self["out_index"].value)) + +class FolderItem(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.in_streams = 0 + self.out_streams = 0 + + def createFields(self): + yield SZUInt64(self, "num_coders") + num = self["num_coders"].value + self.info("Folder: %u codecs" % num) + + # Coders info + for index in xrange(num): + ci = CoderInfo(self, "coder_info[]") + yield ci + self.in_streams += ci.in_streams + self.out_streams += ci.out_streams + + # Bin pairs + self.info("out streams: %u" % self.out_streams) + for index in xrange(self.out_streams-1): + yield BindPairInfo(self, "bind_pair[]") + + # Packed streams + # @todo: Actually find mapping + packed_streams = self.in_streams - self.out_streams + 1 + if packed_streams == 1: + pass + else: + for index in xrange(packed_streams): + yield SZUInt64(self, "pack_stream[]") + + +class UnpackInfo(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + # Wait for synch + for field in waitForID(self, ID_FOLDER, "folder_marker"): + yield field + yield SZUInt64(self, "num_folders") + + # Get generic info + num = self["num_folders"].value + self.info("%u folders" % num) + yield UInt8(self, "is_external") + + # Read folder items + for folder_index in xrange(num): + yield FolderItem(self, "folder_item[]") + + # Get unpack sizes for each coder of each folder + for field in waitForID(self, ID_CODERS_UNPACK_SIZE, "coders_unpsize_marker"): + yield field + for folder_index in xrange(num): + folder_item = self["folder_item[%u]" % folder_index] + for index in xrange(folder_item.out_streams): + #yield UInt8(self, "unpack_size[]") + yield SZUInt64(self, "unpack_size[]") + + # Extract digests + while not self.eof: + addr = self.absolute_address+self.current_size + uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN) + if uid == ID_END: + yield Enum(UInt8(self, "end_marker"), ID_INFO) + break + elif uid == ID_CRC: + yield HashDigest(self, "hash_digest", num) + else: + yield SkippedData(self, "skip_data") + +class SubStreamInfo(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + raise ParserError("SubStreamInfo not implemented yet") + +class EncodedHeader(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + while not self.eof: + addr = self.absolute_address+self.current_size + uid = self.stream.readBits(addr, 8, LITTLE_ENDIAN) + if uid == ID_END: + yield Enum(UInt8(self, "end_marker"), ID_INFO) + break + elif uid == ID_PACK_INFO: + yield PackInfo(self, "pack_info", ID_INFO[ID_PACK_INFO]) + elif uid == ID_UNPACK_INFO: + yield UnpackInfo(self, "unpack_info", ID_INFO[ID_UNPACK_INFO]) + elif uid == ID_SUBSTREAMS_INFO: + yield SubStreamInfo(self, "substreams_info", ID_INFO[ID_SUBSTREAMS_INFO]) + else: + self.info("Unexpected ID (%i)" % uid) + break + +class IDHeader(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "id"), ID_INFO) + ParserError("IDHeader not implemented") + +class NextHeader(FieldSet): + def __init__(self, parent, name, desc="Next header"): + FieldSet.__init__(self, parent, name, desc) + self._size = 8*self["/signature/start_hdr/next_hdr_size"].value + # Less work, as much interpretable information as the other + # version... what an obnoxious format + def createFields2(self): + yield Enum(UInt8(self, "header_type"), ID_INFO) + yield RawBytes(self, "header_data", self._size-1) + def createFields(self): + uid = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN) + if uid == ID_HEADER: + yield IDHeader(self, "header", ID_INFO[ID_HEADER]) + elif uid == ID_ENCODED_HEADER: + yield EncodedHeader(self, "encoded_hdr", ID_INFO[ID_ENCODED_HEADER]) + # Game Over: this is usually encoded using LZMA, not copy + # See SzReadAndDecodePackedStreams/SzDecode being called with the + # data position from "/next_hdr/encoded_hdr/pack_info/pack_pos" + # We should process further, yet we can't... + else: + ParserError("Unexpected ID %u" % uid) + size = self._size - self.current_size + if size > 0: + yield RawBytes(self, "next_hdr_data", size//8, "Next header's data") + +class Body(FieldSet): + def __init__(self, parent, name, desc="Body data"): + FieldSet.__init__(self, parent, name, desc) + self._size = 8*self["/signature/start_hdr/next_hdr_offset"].value + def createFields(self): + if "encoded_hdr" in self["/next_hdr/"]: + pack_size = sum([s.value for s in self.array("/next_hdr/encoded_hdr/pack_info/pack_size")]) + body_size = self["/next_hdr/encoded_hdr/pack_info/pack_pos"].value + yield RawBytes(self, "compressed_data", body_size, "Compressed data") + # Here we could check if copy method was used to "compress" it, + # but this never happens, so just output "compressed file info" + yield RawBytes(self, "compressed_file_info", pack_size, + "Compressed file information") + size = (self._size//8) - pack_size - body_size + if size > 0: + yield RawBytes(self, "unknown_data", size) + elif "header" in self["/next_hdr"]: + yield RawBytes(self, "compressed_data", self._size//8, "Compressed data") + +class StartHeader(FieldSet): + static_size = 160 + def createFields(self): + yield textHandler(UInt64(self, "next_hdr_offset", + "Next header offset"), hexadecimal) + yield UInt64(self, "next_hdr_size", "Next header size") + yield textHandler(UInt32(self, "next_hdr_crc", + "Next header CRC"), hexadecimal) + +class SignatureHeader(FieldSet): + static_size = 96 + StartHeader.static_size + def createFields(self): + yield Bytes(self, "signature", 6, "Signature Header") + yield UInt8(self, "major_ver", "Archive major version") + yield UInt8(self, "minor_ver", "Archive minor version") + yield textHandler(UInt32(self, "start_hdr_crc", + "Start header CRC"), hexadecimal) + yield StartHeader(self, "start_hdr", "Start header") + +class SevenZipParser(Parser): + PARSER_TAGS = { + "id": "7zip", + "category": "archive", + "file_ext": ("7z",), + "mime": (u"application/x-7z-compressed",), + "min_size": 32*8, + "magic": (("7z\xbc\xaf\x27\x1c", 0),), + "description": "Compressed archive in 7z format" + } + endian = LITTLE_ENDIAN + + def createFields(self): + yield SignatureHeader(self, "signature", "Signature Header") + yield Body(self, "body_data") + yield NextHeader(self, "next_hdr") + + def validate(self): + if self.stream.readBytes(0,6) != "7z\xbc\xaf'\x1c": + return "Invalid signature" + return True + + def createContentSize(self): + size = self["/signature/start_hdr/next_hdr_offset"].value + size += self["/signature/start_hdr/next_hdr_size"].value + size += 12 # Signature size + size += 20 # Start header size + return size*8 diff --git a/libs/hachoir_parser/archive/tar.py b/libs/hachoir_parser/archive/tar.py new file mode 100644 index 0000000..08a9040 --- /dev/null +++ b/libs/hachoir_parser/archive/tar.py @@ -0,0 +1,124 @@ +""" +Tar archive parser. + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Enum, UInt8, SubFile, String, NullBytes) +from hachoir_core.tools import humanFilesize, paddingSize, timestampUNIX +from hachoir_core.endian import BIG_ENDIAN +import re + +class FileEntry(FieldSet): + type_name = { + # 48 is "0", 49 is "1", ... + 0: u"Normal disk file (old format)", + 48: u"Normal disk file", + 49: u"Link to previously dumped file", + 50: u"Symbolic link", + 51: u"Character special file", + 52: u"Block special file", + 53: u"Directory", + 54: u"FIFO special file", + 55: u"Contiguous file" + } + + def getOctal(self, name): + return self.octal2int(self[name].value) + + def getDatetime(self): + """ + Create modification date as Unicode string, may raise ValueError. + """ + timestamp = self.getOctal("mtime") + return timestampUNIX(timestamp) + + def createFields(self): + yield String(self, "name", 100, "Name", strip="\0", charset="ISO-8859-1") + yield String(self, "mode", 8, "Mode", strip=" \0", charset="ASCII") + yield String(self, "uid", 8, "User ID", strip=" \0", charset="ASCII") + yield String(self, "gid", 8, "Group ID", strip=" \0", charset="ASCII") + yield String(self, "size", 12, "Size", strip=" \0", charset="ASCII") + yield String(self, "mtime", 12, "Modification time", strip=" \0", charset="ASCII") + yield String(self, "check_sum", 8, "Check sum", strip=" \0", charset="ASCII") + yield Enum(UInt8(self, "type", "Type"), self.type_name) + yield String(self, "lname", 100, "Link name", strip=" \0", charset="ISO-8859-1") + yield String(self, "magic", 8, "Magic", strip=" \0", charset="ASCII") + yield String(self, "uname", 32, "User name", strip=" \0", charset="ISO-8859-1") + yield String(self, "gname", 32, "Group name", strip=" \0", charset="ISO-8859-1") + yield String(self, "devmajor", 8, "Dev major", strip=" \0", charset="ASCII") + yield String(self, "devminor", 8, "Dev minor", strip=" \0", charset="ASCII") + yield NullBytes(self, "padding", 167, "Padding (zero)") + + filesize = self.getOctal("size") + if filesize: + yield SubFile(self, "content", filesize, filename=self["name"].value) + + size = paddingSize(self.current_size//8, 512) + if size: + yield NullBytes(self, "padding_end", size, "Padding (512 align)") + + def convertOctal(self, chunk): + return self.octal2int(chunk.value) + + def isEmpty(self): + return self["name"].value == "" + + def octal2int(self, text): + try: + return int(text, 8) + except ValueError: + return 0 + + def createDescription(self): + if self.isEmpty(): + desc = "(terminator, empty header)" + else: + filename = self["name"].value + filesize = humanFilesize(self.getOctal("size")) + desc = "(%s: %s, %s)" % \ + (filename, self["type"].display, filesize) + return "Tar File " + desc + +class TarFile(Parser): + endian = BIG_ENDIAN + PARSER_TAGS = { + "id": "tar", + "category": "archive", + "file_ext": ("tar",), + "mime": (u"application/x-tar", u"application/x-gtar"), + "min_size": 512*8, + "magic": (("ustar \0", 257*8),), + "subfile": "skip", + "description": "TAR archive", + } + _sign = re.compile("ustar *\0|[ \0]*$") + + def validate(self): + if not self._sign.match(self.stream.readBytes(257*8, 8)): + return "Invalid magic number" + if self[0].name == "terminator": + return "Don't contain any file" + try: + int(self["file[0]/uid"].value, 8) + int(self["file[0]/gid"].value, 8) + int(self["file[0]/size"].value, 8) + except ValueError: + return "Invalid file size" + return True + + def createFields(self): + while not self.eof: + field = FileEntry(self, "file[]") + if field.isEmpty(): + yield NullBytes(self, "terminator", 512) + break + yield field + if self.current_size < self._size: + yield self.seekBit(self._size, "end") + + def createContentSize(self): + return self["terminator"].address + self["terminator"].size + diff --git a/libs/hachoir_parser/archive/zip.py b/libs/hachoir_parser/archive/zip.py new file mode 100644 index 0000000..b3cd54a --- /dev/null +++ b/libs/hachoir_parser/archive/zip.py @@ -0,0 +1,427 @@ +""" +Zip splitter. + +Status: can read most important headers +Authors: Christophe Gisquet and Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Bit, Bits, Enum, + TimeDateMSDOS32, SubFile, + UInt8, UInt16, UInt32, UInt64, + String, PascalString16, + RawBytes) +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal +from hachoir_core.error import HACHOIR_ERRORS +from hachoir_core.tools import makeUnicode +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.deflate import Deflate + +MAX_FILESIZE = 1000 * 1024 * 1024 + +COMPRESSION_DEFLATE = 8 +COMPRESSION_METHOD = { + 0: u"no compression", + 1: u"Shrunk", + 2: u"Reduced (factor 1)", + 3: u"Reduced (factor 2)", + 4: u"Reduced (factor 3)", + 5: u"Reduced (factor 4)", + 6: u"Imploded", + 7: u"Tokenizing", + 8: u"Deflate", + 9: u"Deflate64", + 10: u"PKWARE Imploding", + 11: u"Reserved by PKWARE", + 12: u"File is compressed using BZIP2 algorithm", + 13: u"Reserved by PKWARE", + 14: u"LZMA (EFS)", + 15: u"Reserved by PKWARE", + 16: u"Reserved by PKWARE", + 17: u"Reserved by PKWARE", + 18: u"File is compressed using IBM TERSE (new)", + 19: u"IBM LZ77 z Architecture (PFS)", + 98: u"PPMd version I, Rev 1", +} + +def ZipRevision(field): + return "%u.%u" % divmod(field.value, 10) + +class ZipVersion(FieldSet): + static_size = 16 + HOST_OS = { + 0: u"FAT file system (DOS, OS/2, NT)", + 1: u"Amiga", + 2: u"VMS (VAX or Alpha AXP)", + 3: u"Unix", + 4: u"VM/CMS", + 5: u"Atari", + 6: u"HPFS file system (OS/2, NT 3.x)", + 7: u"Macintosh", + 8: u"Z-System", + 9: u"CP/M", + 10: u"TOPS-20", + 11: u"NTFS file system (NT)", + 12: u"SMS/QDOS", + 13: u"Acorn RISC OS", + 14: u"VFAT file system (Win95, NT)", + 15: u"MVS", + 16: u"BeOS (BeBox or PowerMac)", + 17: u"Tandem", + } + def createFields(self): + yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision) + yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS) + +class ZipGeneralFlags(FieldSet): + static_size = 16 + def createFields(self): + # Need the compression info from the parent, and that is the byte following + method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN) + + yield Bits(self, "unused[]", 2, "Unused") + yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked") + yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.") + yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8") + yield Bits(self, "unused[]", 4, "Unused") + yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)") + yield Bit(self, "is_patched", "File is compressed with patched data?") + yield Bit(self, "enhanced_deflate", "Reserved for use with method 8") + yield Bit(self, "has_descriptor", + "Compressed data followed by descriptor?") + if method == 6: + yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)") + yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)") + elif method in (8, 9): + NAME = { + 0: "Normal compression", + 1: "Maximum compression", + 2: "Fast compression", + 3: "Super Fast compression" + } + yield Enum(Bits(self, "method", 2), NAME) + elif method == 14: #LZMA + yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker") + yield Bit(self, "unused[]") + else: + yield Bits(self, "compression_info", 2) + yield Bit(self, "is_encrypted", "File is encrypted?") + +class ExtraField(FieldSet): + EXTRA_FIELD_ID = { + 0x0007: "AV Info", + 0x0009: "OS/2 extended attributes (also Info-ZIP)", + 0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented! + 0x000c: "PKWARE VAX/VMS (also Info-ZIP)", + 0x000d: "PKWARE Unix", + 0x000f: "Patch Descriptor", + 0x07c8: "Info-ZIP Macintosh (old, J. Lee)", + 0x2605: "ZipIt Macintosh (first version)", + 0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)", + 0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)", + 0x4341: "Acorn/SparkFS (David Pilling)", + 0x4453: "Windows NT security descriptor (binary ACL)", + 0x4704: "VM/CMS", + 0x470f: "MVS", + 0x4b46: "FWKCS MD5 (third party, see below)", + 0x4c41: "OS/2 access control list (text ACL)", + 0x4d49: "Info-ZIP VMS (VAX or Alpha)", + 0x5356: "AOS/VS (binary ACL)", + 0x5455: "extended timestamp", + 0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)", + 0x6542: "BeOS (BeBox, PowerMac, etc.)", + 0x756e: "ASi Unix", + 0x7855: "Info-ZIP Unix (new)", + 0xfb4a: "SMS/QDOS", + } + def createFields(self): + yield Enum(UInt16(self, "field_id", "Extra field ID"), + self.EXTRA_FIELD_ID) + size = UInt16(self, "field_data_size", "Extra field data size") + yield size + if size.value > 0: + yield RawBytes(self, "field_data", size, "Unknown field data") + +def ZipStartCommonFields(self): + yield ZipVersion(self, "version_needed", "Version needed") + yield ZipGeneralFlags(self, "flags", "General purpose flag") + yield Enum(UInt16(self, "compression", "Compression method"), + COMPRESSION_METHOD) + yield TimeDateMSDOS32(self, "last_mod", "Last modification file time") + yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal) + yield UInt32(self, "compressed_size", "Compressed size") + yield UInt32(self, "uncompressed_size", "Uncompressed size") + yield UInt16(self, "filename_length", "Filename length") + yield UInt16(self, "extra_length", "Extra fields length") + +def zipGetCharset(self): + if self["flags/uses_unicode"].value: + return "UTF-8" + else: + return "ISO-8859-15" + +class ZipCentralDirectory(FieldSet): + HEADER = 0x02014b50 + def createFields(self): + yield ZipVersion(self, "version_made_by", "Version made by") + for field in ZipStartCommonFields(self): + yield field + + # Check unicode status + charset = zipGetCharset(self) + + yield UInt16(self, "comment_length", "Comment length") + yield UInt16(self, "disk_number_start", "Disk number start") + yield UInt16(self, "internal_attr", "Internal file attributes") + yield UInt32(self, "external_attr", "External file attributes") + yield UInt32(self, "offset_header", "Relative offset of local header") + yield String(self, "filename", self["filename_length"].value, + "Filename", charset=charset) + if 0 < self["extra_length"].value: + yield RawBytes(self, "extra", self["extra_length"].value, + "Extra fields") + if 0 < self["comment_length"].value: + yield String(self, "comment", self["comment_length"].value, + "Comment", charset=charset) + + def createDescription(self): + return "Central directory: %s" % self["filename"].display + +class Zip64EndCentralDirectory(FieldSet): + HEADER = 0x06064b50 + def createFields(self): + yield UInt64(self, "zip64_end_size", + "Size of zip64 end of central directory record") + yield ZipVersion(self, "version_made_by", "Version made by") + yield ZipVersion(self, "version_needed", "Version needed to extract") + yield UInt32(self, "number_disk", "Number of this disk") + yield UInt32(self, "number_disk2", + "Number of the disk with the start of the central directory") + yield UInt64(self, "number_entries", + "Total number of entries in the central directory on this disk") + yield UInt64(self, "number_entries2", + "Total number of entries in the central directory") + yield UInt64(self, "size", "Size of the central directory") + yield UInt64(self, "offset", "Offset of start of central directory") + if 0 < self["zip64_end_size"].value: + yield RawBytes(self, "data_sector", self["zip64_end_size"].value, + "zip64 extensible data sector") + +class ZipEndCentralDirectory(FieldSet): + HEADER = 0x06054b50 + def createFields(self): + yield UInt16(self, "number_disk", "Number of this disk") + yield UInt16(self, "number_disk2", "Number in the central dir") + yield UInt16(self, "total_number_disk", + "Total number of entries in this disk") + yield UInt16(self, "total_number_disk2", + "Total number of entries in the central dir") + yield UInt32(self, "size", "Size of the central directory") + yield UInt32(self, "offset", "Offset of start of central directory") + yield PascalString16(self, "comment", "ZIP comment") + +class ZipDataDescriptor(FieldSet): + HEADER_STRING = "\x50\x4B\x07\x08" + HEADER = 0x08074B50 + static_size = 96 + def createFields(self): + yield textHandler(UInt32(self, "file_crc32", + "Checksum (CRC32)"), hexadecimal) + yield filesizeHandler(UInt32(self, "file_compressed_size", + "Compressed size (bytes)")) + yield filesizeHandler(UInt32(self, "file_uncompressed_size", + "Uncompressed size (bytes)")) + +class FileEntry(FieldSet): + HEADER = 0x04034B50 + filename = None + + def data(self, size): + compression = self["compression"].value + if compression == 0: + return SubFile(self, "data", size, filename=self.filename) + compressed = SubFile(self, "compressed_data", size, filename=self.filename) + if compression == COMPRESSION_DEFLATE: + return Deflate(compressed) + else: + return compressed + + def resync(self): + # Non-seekable output, search the next data descriptor + size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False, + self.absolute_address+self.current_size) + if size <= 0: + raise ParserError("Couldn't resync to %s" % + ZipDataDescriptor.HEADER_STRING) + yield self.data(size) + yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal) + data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor") + #self.info("Resynced!") + yield data_desc + # The above could be checked anytime, but we prefer trying parsing + # than aborting + if self["crc32"].value == 0 and \ + data_desc["file_compressed_size"].value != size: + raise ParserError("Bad resync: position=>%i but data_desc=>%i" % + (size, data_desc["file_compressed_size"].value)) + + def createFields(self): + for field in ZipStartCommonFields(self): + yield field + length = self["filename_length"].value + + + if length: + filename = String(self, "filename", length, "Filename", + charset=zipGetCharset(self)) + yield filename + self.filename = filename.value + if self["extra_length"].value: + yield RawBytes(self, "extra", self["extra_length"].value, "Extra") + size = self["compressed_size"].value + if size > 0: + yield self.data(size) + elif self["flags/incomplete"].value: + for field in self.resync(): + yield field + if self["flags/has_descriptor"].value: + yield ZipDataDescriptor(self, "data_desc", "Data descriptor") + + def createDescription(self): + return "File entry: %s (%s)" % \ + (self["filename"].value, self["compressed_size"].display) + + def validate(self): + if self["compression"].value not in COMPRESSION_METHOD: + return "Unknown compression method (%u)" % self["compression"].value + return "" + +class ZipSignature(FieldSet): + HEADER = 0x05054B50 + def createFields(self): + yield PascalString16(self, "signature", "Signature") + +class Zip64EndCentralDirectoryLocator(FieldSet): + HEADER = 0x07064b50 + def createFields(self): + yield UInt32(self, "disk_number", \ + "Number of the disk with the start of the zip64 end of central directory") + yield UInt64(self, "relative_offset", \ + "Relative offset of the zip64 end of central directory record") + yield UInt32(self, "disk_total_number", "Total number of disks") + + +class ZipFile(Parser): + endian = LITTLE_ENDIAN + MIME_TYPES = { + # Default ZIP archive + u"application/zip": "zip", + u"application/x-zip": "zip", + + # Java archive (JAR) + u"application/x-jar": "jar", + u"application/java-archive": "jar", + + # OpenOffice 1.0 + u"application/vnd.sun.xml.calc": "sxc", + u"application/vnd.sun.xml.draw": "sxd", + u"application/vnd.sun.xml.impress": "sxi", + u"application/vnd.sun.xml.writer": "sxw", + u"application/vnd.sun.xml.math": "sxm", + + # OpenOffice 1.0 (template) + u"application/vnd.sun.xml.calc.template": "stc", + u"application/vnd.sun.xml.draw.template": "std", + u"application/vnd.sun.xml.impress.template": "sti", + u"application/vnd.sun.xml.writer.template": "stw", + u"application/vnd.sun.xml.writer.global": "sxg", + + # OpenDocument + u"application/vnd.oasis.opendocument.chart": "odc", + u"application/vnd.oasis.opendocument.image": "odi", + u"application/vnd.oasis.opendocument.database": "odb", + u"application/vnd.oasis.opendocument.formula": "odf", + u"application/vnd.oasis.opendocument.graphics": "odg", + u"application/vnd.oasis.opendocument.presentation": "odp", + u"application/vnd.oasis.opendocument.spreadsheet": "ods", + u"application/vnd.oasis.opendocument.text": "odt", + u"application/vnd.oasis.opendocument.text-master": "odm", + + # OpenDocument (template) + u"application/vnd.oasis.opendocument.graphics-template": "otg", + u"application/vnd.oasis.opendocument.presentation-template": "otp", + u"application/vnd.oasis.opendocument.spreadsheet-template": "ots", + u"application/vnd.oasis.opendocument.text-template": "ott", + } + PARSER_TAGS = { + "id": "zip", + "category": "archive", + "file_ext": tuple(MIME_TYPES.itervalues()), + "mime": tuple(MIME_TYPES.iterkeys()), + "magic": (("PK\3\4", 0),), + "subfile": "skip", + "min_size": (4 + 26)*8, # header + file entry + "description": "ZIP archive" + } + + def validate(self): + if self["header[0]"].value != FileEntry.HEADER: + return "Invalid magic" + try: + file0 = self["file[0]"] + except HACHOIR_ERRORS, err: + return "Unable to get file #0" + err = file0.validate() + if err: + return "File #0: %s" % err + return True + + def createFields(self): + # File data + self.signature = None + self.central_directory = [] + while not self.eof: + header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal) + yield header + header = header.value + if header == FileEntry.HEADER: + yield FileEntry(self, "file[]") + elif header == ZipDataDescriptor.HEADER: + yield ZipDataDescriptor(self, "spanning[]") + elif header == 0x30304b50: + yield ZipDataDescriptor(self, "temporary_spanning[]") + elif header == ZipCentralDirectory.HEADER: + yield ZipCentralDirectory(self, "central_directory[]") + elif header == ZipEndCentralDirectory.HEADER: + yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory") + elif header == Zip64EndCentralDirectory.HEADER: + yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory") + elif header == ZipSignature.HEADER: + yield ZipSignature(self, "signature", "Signature") + elif header == Zip64EndCentralDirectoryLocator.HEADER: + yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator") + else: + raise ParserError("Error, unknown ZIP header (0x%08X)." % header) + + def createMimeType(self): + if self["file[0]/filename"].value == "mimetype": + return makeUnicode(self["file[0]/data"].value) + else: + return u"application/zip" + + def createFilenameSuffix(self): + if self["file[0]/filename"].value == "mimetype": + mime = self["file[0]/compressed_data"].value + if mime in self.MIME_TYPES: + return "." + self.MIME_TYPES[mime] + return ".zip" + + def createContentSize(self): + start = 0 + end = MAX_FILESIZE * 8 + end = self.stream.searchBytes("PK\5\6", start, end) + if end is not None: + return end + 22*8 + return None + diff --git a/libs/hachoir_parser/audio/__init__.py b/libs/hachoir_parser/audio/__init__.py new file mode 100644 index 0000000..1cc33a2 --- /dev/null +++ b/libs/hachoir_parser/audio/__init__.py @@ -0,0 +1,12 @@ +from hachoir_parser.audio.aiff import AiffFile +from hachoir_parser.audio.au import AuFile +from hachoir_parser.audio.itunesdb import ITunesDBFile +from hachoir_parser.audio.midi import MidiFile +from hachoir_parser.audio.mpeg_audio import MpegAudioFile +from hachoir_parser.audio.real_audio import RealAudioFile +from hachoir_parser.audio.xm import XMModule +from hachoir_parser.audio.s3m import S3MModule +from hachoir_parser.audio.s3m import PTMModule +from hachoir_parser.audio.mod import AmigaModule +from hachoir_parser.audio.flac import FlacParser + diff --git a/libs/hachoir_parser/audio/aiff.py b/libs/hachoir_parser/audio/aiff.py new file mode 100644 index 0000000..d8f4169 --- /dev/null +++ b/libs/hachoir_parser/audio/aiff.py @@ -0,0 +1,127 @@ +""" +Audio Interchange File Format (AIFF) parser. + +Author: Victor Stinner +Creation: 27 december 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt16, UInt32, Float80, TimestampMac32, + RawBytes, NullBytes, + String, Enum, PascalString32) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import filesizeHandler +from hachoir_core.tools import alignValue +from hachoir_parser.audio.id3 import ID3v2 + +CODEC_NAME = { + 'ACE2': u"ACE 2-to-1", + 'ACE8': u"ACE 8-to-3", + 'MAC3': u"MAC 3-to-1", + 'MAC6': u"MAC 6-to-1", + 'NONE': u"None", + 'sowt': u"Little-endian, no compression", +} + +class Comment(FieldSet): + def createFields(self): + yield TimestampMac32(self, "timestamp") + yield PascalString32(self, "text") + +def parseText(self): + yield String(self, "text", self["size"].value) + +def parseID3(self): + yield ID3v2(self, "id3v2", size=self["size"].value*8) + +def parseComment(self): + yield UInt16(self, "nb_comment") + for index in xrange(self["nb_comment"].value): + yield Comment(self, "comment[]") + +def parseCommon(self): + yield UInt16(self, "nb_channel") + yield UInt32(self, "nb_sample") + yield UInt16(self, "sample_size") + yield Float80(self, "sample_rate") + yield Enum(String(self, "codec", 4, strip="\0", charset="ASCII"), CODEC_NAME) + +def parseVersion(self): + yield TimestampMac32(self, "timestamp") + +def parseSound(self): + yield UInt32(self, "offset") + yield UInt32(self, "block_size") + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "data", size) + +class Chunk(FieldSet): + TAG_INFO = { + 'COMM': ('common', "Common chunk", parseCommon), + 'COMT': ('comment', "Comment", parseComment), + 'NAME': ('name', "Name", parseText), + 'AUTH': ('author', "Author", parseText), + 'FVER': ('version', "Version", parseVersion), + 'SSND': ('sound', "Sound data", parseSound), + 'ID3 ': ('id3', "ID3", parseID3), + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = (8 + alignValue(self["size"].value, 2)) * 8 + tag = self["type"].value + if tag in self.TAG_INFO: + self._name, self._description, self._parser = self.TAG_INFO[tag] + else: + self._parser = None + + def createFields(self): + yield String(self, "type", 4, "Signature (FORM)", charset="ASCII") + yield filesizeHandler(UInt32(self, "size")) + size = self["size"].value + if size: + if self._parser: + for field in self._parser(self): + yield field + if size % 2: + yield NullBytes(self, "padding", 1) + else: + yield RawBytes(self, "data", size) + +class AiffFile(Parser): + PARSER_TAGS = { + "id": "aiff", + "category": "audio", + "file_ext": ("aif", "aiff", "aifc"), + "mime": (u"audio/x-aiff",), + "magic_regex": (("FORM.{4}AIF[CF]", 0),), + "min_size": 12*8, + "description": "Audio Interchange File Format (AIFF)" + } + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != "FORM": + return "Invalid signature" + if self.stream.readBytes(8*8, 4) not in ("AIFF", "AIFC"): + return "Invalid type" + return True + + def createFields(self): + yield String(self, "signature", 4, "Signature (FORM)", charset="ASCII") + yield filesizeHandler(UInt32(self, "filesize")) + yield String(self, "type", 4, "Form type (AIFF or AIFC)", charset="ASCII") + while not self.eof: + yield Chunk(self, "chunk[]") + + def createDescription(self): + if self["type"].value == "AIFC": + return "Audio Interchange File Format Compressed (AIFC)" + else: + return "Audio Interchange File Format (AIFF)" + + def createContentSize(self): + return self["filesize"].value * 8 + diff --git a/libs/hachoir_parser/audio/au.py b/libs/hachoir_parser/audio/au.py new file mode 100644 index 0000000..ab9d9c1 --- /dev/null +++ b/libs/hachoir_parser/audio/au.py @@ -0,0 +1,88 @@ +""" +AU audio file parser + +Author: Victor Stinner +Creation: 12 july 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import UInt32, Enum, String, RawBytes +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import displayHandler, filesizeHandler +from hachoir_core.tools import createDict, humanFrequency + +class AuFile(Parser): + PARSER_TAGS = { + "id": "sun_next_snd", + "category": "audio", + "file_ext": ("au", "snd"), + "mime": (u"audio/basic",), + "min_size": 24*8, + "magic": ((".snd", 0),), + "description": "Sun/NeXT audio" + } + endian = BIG_ENDIAN + + CODEC_INFO = { + 1: (8, u"8-bit ISDN u-law"), + 2: (8, u"8-bit linear PCM"), + 3: (16, u"16-bit linear PCM"), + 4: (24, u"24-bit linear PCM"), + 5: (32, u"32-bit linear PCM"), + 6: (32, u"32-bit IEEE floating point"), + 7: (64, u"64-bit IEEE floating point"), + 8: (None, u"Fragmented sample data"), + 9: (None, u"DSP program"), + 10: (8, u"8-bit fixed point"), + 11: (16, u"16-bit fixed point"), + 12: (24, u"24-bit fixed point"), + 13: (32, u"32-bit fixed point"), + 18: (16, u"16-bit linear with emphasis"), + 19: (16, u"16-bit linear compressed"), + 20: (16, u"16-bit linear with emphasis and compression"), + 21: (None, u"Music kit DSP commands"), + 23: (None, u"4-bit ISDN u-law compressed (CCITT G.721 ADPCM)"), + 24: (None, u"ITU-T G.722 ADPCM"), + 25: (None, u"ITU-T G.723 3-bit ADPCM"), + 26: (None, u"ITU-T G.723 5-bit ADPCM"), + 27: (8, u"8-bit ISDN A-law"), + } + + # Create bit rate and codec name dictionnaries + BITS_PER_SAMPLE = createDict(CODEC_INFO, 0) + CODEC_NAME = createDict(CODEC_INFO, 1) + + VALID_NB_CHANNEL = set((1,2)) # FIXME: 4, 5, 7, 8 channels are supported? + + def validate(self): + if self.stream.readBytes(0, 4) != ".snd": + return "Wrong file signature" + if self["channels"].value not in self.VALID_NB_CHANNEL: + return "Invalid number of channel" + return True + + def getBitsPerSample(self): + """ + Get bit rate (number of bit per sample per channel), + may returns None if you unable to compute it. + """ + return self.BITS_PER_SAMPLE.get(self["codec"].value) + + def createFields(self): + yield String(self, "signature", 4, 'Format signature (".snd")', charset="ASCII") + yield UInt32(self, "data_ofs", "Data offset") + yield filesizeHandler(UInt32(self, "data_size", "Data size")) + yield Enum(UInt32(self, "codec", "Audio codec"), self.CODEC_NAME) + yield displayHandler(UInt32(self, "sample_rate", "Number of samples/second"), humanFrequency) + yield UInt32(self, "channels", "Number of interleaved channels") + + size = self["data_ofs"].value - self.current_size // 8 + if 0 < size: + yield String(self, "info", size, "Information", strip=" \0", charset="ISO-8859-1") + + size = min(self["data_size"].value, (self.size - self.current_size) // 8) + yield RawBytes(self, "audio_data", size, "Audio data") + + def createContentSize(self): + return (self["data_ofs"].value + self["data_size"].value) * 8 + diff --git a/libs/hachoir_parser/audio/flac.py b/libs/hachoir_parser/audio/flac.py new file mode 100644 index 0000000..f739ff7 --- /dev/null +++ b/libs/hachoir_parser/audio/flac.py @@ -0,0 +1,157 @@ +""" +FLAC (audio) parser + +Documentation: + + * http://flac.sourceforge.net/format.html + +Author: Esteban Loiseau +Creation date: 2008-04-09 +""" + +from hachoir_parser import Parser +from hachoir_core.field import FieldSet, String, Bit, Bits, UInt16, UInt24, RawBytes, Enum, NullBytes +from hachoir_core.stream import BIG_ENDIAN, LITTLE_ENDIAN +from hachoir_core.tools import createDict +from hachoir_parser.container.ogg import parseVorbisComment + +class VorbisComment(FieldSet): + endian = LITTLE_ENDIAN + createFields = parseVorbisComment + +class StreamInfo(FieldSet): + static_size = 34*8 + def createFields(self): + yield UInt16(self, "min_block_size", "The minimum block size (in samples) used in the stream") + yield UInt16(self, "max_block_size", "The maximum block size (in samples) used in the stream") + yield UInt24(self, "min_frame_size", "The minimum frame size (in bytes) used in the stream") + yield UInt24(self, "max_frame_size", "The maximum frame size (in bytes) used in the stream") + yield Bits(self, "sample_hertz", 20, "Sample rate in Hertz") + yield Bits(self, "nb_channel", 3, "Number of channels minus one") + yield Bits(self, "bits_per_sample", 5, "Bits per sample minus one") + yield Bits(self, "total_samples", 36, "Total samples in stream") + yield RawBytes(self, "md5sum", 16, "MD5 signature of the unencoded audio data") + +class SeekPoint(FieldSet): + def createFields(self): + yield Bits(self, "sample_number", 64, "Sample number") + yield Bits(self, "offset", 64, "Offset in bytes") + yield Bits(self, "nb_sample", 16) + +class SeekTable(FieldSet): + def createFields(self): + while not self.eof: + yield SeekPoint(self, "point[]") + +class MetadataBlock(FieldSet): + "Metadata block field: http://flac.sourceforge.net/format.html#metadata_block" + + BLOCK_TYPES = { + 0: ("stream_info", u"Stream info", StreamInfo), + 1: ("padding[]", u"Padding", None), + 2: ("application[]", u"Application", None), + 3: ("seek_table", u"Seek table", SeekTable), + 4: ("comment", u"Vorbis comment", VorbisComment), + 5: ("cue_sheet[]", u"Cue sheet", None), + 6: ("picture[]", u"Picture", None), + } + BLOCK_TYPE_DESC = createDict(BLOCK_TYPES, 1) + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = 32 + self["metadata_length"].value * 8 + try: + key = self["block_type"].value + self._name, self._description, self.handler = self.BLOCK_TYPES[key] + except KeyError: + self.handler = None + + def createFields(self): + yield Bit(self, "last_metadata_block", "True if this is the last metadata block") + yield Enum(Bits(self, "block_type", 7, "Metadata block header type"), self.BLOCK_TYPE_DESC) + yield UInt24(self, "metadata_length", "Length of following metadata in bytes (doesn't include this header)") + + block_type = self["block_type"].value + size = self["metadata_length"].value + if not size: + return + try: + handler = self.BLOCK_TYPES[block_type][2] + except KeyError: + handler = None + if handler: + yield handler(self, "content", size=size*8) + elif self["block_type"].value == 1: + yield NullBytes(self, "padding", size) + else: + yield RawBytes(self, "rawdata", size) + +class Metadata(FieldSet): + def createFields(self): + while not self.eof: + field = MetadataBlock(self,"metadata_block[]") + yield field + if field["last_metadata_block"].value: + break + +class Frame(FieldSet): + SAMPLE_RATES = { + 0: "get from STREAMINFO metadata block", + 1: "88.2kHz", + 2: "176.4kHz", + 3: "192kHz", + 4: "8kHz", + 5: "16kHz", + 6: "22.05kHz", + 7: "24kHz", + 8: "32kHz", + 9: "44.1kHz", + 10: "48kHz", + 11: "96kHz", + 12: "get 8 bit sample rate (in kHz) from end of header", + 13: "get 16 bit sample rate (in Hz) from end of header", + 14: "get 16 bit sample rate (in tens of Hz) from end of header", + } + + def createFields(self): + yield Bits(self, "sync", 14, "Sync code: 11111111111110") + yield Bit(self, "reserved[]") + yield Bit(self, "blocking_strategy") + yield Bits(self, "block_size", 4) + yield Enum(Bits(self, "sample_rate", 4), self.SAMPLE_RATES) + yield Bits(self, "channel_assign", 4) + yield Bits(self, "sample_size", 3) + yield Bit(self, "reserved[]") + # FIXME: Finish frame header parser + +class Frames(FieldSet): + def createFields(self): + while not self.eof: + yield Frame(self, "frame[]") + # FIXME: Parse all frames + return + +class FlacParser(Parser): + "Parse FLAC audio files: FLAC is a lossless audio codec" + MAGIC = "fLaC\x00" + PARSER_TAGS = { + "id": "flac", + "category": "audio", + "file_ext": ("flac",), + "mime": (u"audio/x-flac",), + "magic": ((MAGIC, 0),), + "min_size": 4*8, + "description": "FLAC audio", + } + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return u"Invalid magic string" + return True + + def createFields(self): + yield String(self, "signature", 4,charset="ASCII", description="FLAC signature: fLaC string") + yield Metadata(self,"metadata") + yield Frames(self,"frames") + diff --git a/libs/hachoir_parser/audio/id3.py b/libs/hachoir_parser/audio/id3.py new file mode 100644 index 0000000..3cfda25 --- /dev/null +++ b/libs/hachoir_parser/audio/id3.py @@ -0,0 +1,507 @@ +""" +ID3 metadata parser, supported versions: 1.O, 2.2, 2.3 and 2.4 + +Informations: http://www.id3.org/ + +Author: Victor Stinner +""" + +from hachoir_core.field import (FieldSet, MatchError, ParserError, + Enum, UInt8, UInt24, UInt32, + CString, String, RawBytes, + Bit, Bits, NullBytes, NullBits) +from hachoir_core.text_handler import textHandler +from hachoir_core.tools import humanDuration +from hachoir_core.endian import NETWORK_ENDIAN + +class ID3v1(FieldSet): + static_size = 128 * 8 + GENRE_NAME = { + 0: u"Blues", + 1: u"Classic Rock", + 2: u"Country", + 3: u"Dance", + 4: u"Disco", + 5: u"Funk", + 6: u"Grunge", + 7: u"Hip-Hop", + 8: u"Jazz", + 9: u"Metal", + 10: u"New Age", + 11: u"Oldies", + 12: u"Other", + 13: u"Pop", + 14: u"R&B", + 15: u"Rap", + 16: u"Reggae", + 17: u"Rock", + 18: u"Techno", + 19: u"Industrial", + 20: u"Alternative", + 21: u"Ska", + 22: u"Death Metal", + 23: u"Pranks", + 24: u"Soundtrack", + 25: u"Euro-Techno", + 26: u"Ambient", + 27: u"Trip-Hop", + 28: u"Vocal", + 29: u"Jazz+Funk", + 30: u"Fusion", + 31: u"Trance", + 32: u"Classical", + 33: u"Instrumental", + 34: u"Acid", + 35: u"House", + 36: u"Game", + 37: u"Sound Clip", + 38: u"Gospel", + 39: u"Noise", + 40: u"AlternRock", + 41: u"Bass", + 42: u"Soul", + 43: u"Punk", + 44: u"Space", + 45: u"Meditative", + 46: u"Instrumental Pop", + 47: u"Instrumental Rock", + 48: u"Ethnic", + 49: u"Gothic", + 50: u"Darkwave", + 51: u"Techno-Industrial", + 52: u"Electronic", + 53: u"Pop-Folk", + 54: u"Eurodance", + 55: u"Dream", + 56: u"Southern Rock", + 57: u"Comedy", + 58: u"Cult", + 59: u"Gangsta", + 60: u"Top 40", + 61: u"Christian Rap", + 62: u"Pop/Funk", + 63: u"Jungle", + 64: u"Native American", + 65: u"Cabaret", + 66: u"New Wave", + 67: u"Psychadelic", + 68: u"Rave", + 69: u"Showtunes", + 70: u"Trailer", + 71: u"Lo-Fi", + 72: u"Tribal", + 73: u"Acid Punk", + 74: u"Acid Jazz", + 75: u"Polka", + 76: u"Retro", + 77: u"Musical", + 78: u"Rock & Roll", + 79: u"Hard Rock", + # Following are winamp extentions + 80: u"Folk", + 81: u"Folk-Rock", + 82: u"National Folk", + 83: u"Swing", + 84: u"Fast Fusion", + 85: u"Bebob", + 86: u"Latin", + 87: u"Revival", + 88: u"Celtic", + 89: u"Bluegrass", + 90: u"Avantgarde", + 91: u"Gothic Rock", + 92: u"Progressive Rock", + 93: u"Psychedelic Rock", + 94: u"Symphonic Rock", + 95: u"Slow Rock", + 96: u"Big Band", + 97: u"Chorus", + 98: u"Easy Listening", + 99: u"Acoustic", + 100: u"Humour", + 101: u"Speech", + 102: u"Chanson", + 103: u"Opera", + 104: u"Chamber Music", + 105: u"Sonata", + 106: u"Symphony", + 107: u"Booty Bass", + 108: u"Primus", + 109: u"Porn Groove", + 110: u"Satire", + 111: u"Slow Jam", + 112: u"Club", + 113: u"Tango", + 114: u"Samba", + 115: u"Folklore", + 116: u"Ballad", + 117: u"Power Ballad", + 118: u"Rhythmic Soul", + 119: u"Freestyle", + 120: u"Duet", + 121: u"Punk Rock", + 122: u"Drum Solo", + 123: u"A capella", + 124: u"Euro-House", + 125: u"Dance Hall", + 126: u"Goa", + 127: u"Drum & Bass", + 128: u"Club-House", + 129: u"Hardcore", + 130: u"Terror", + 131: u"Indie", + 132: u"Britpop", + 133: u"Negerpunk", + 134: u"Polsk Punk", + 135: u"Beat", + 136: u"Christian Gangsta Rap", + 137: u"Heavy Metal", + 138: u"Black Metal", + 139: u"Crossover", + 140: u"Contemporary Christian", + 141: u"Christian Rock ", + 142: u"Merengue", + 143: u"Salsa", + 144: u"Trash Metal", + 145: u"Anime", + 146: u"JPop", + 147: u"Synthpop" + } + + def createFields(self): + yield String(self, "signature", 3, "IDv1 signature (\"TAG\")", charset="ASCII") + if self["signature"].value != "TAG": + raise MatchError("Stream doesn't look like ID3v1 (wrong signature)!") + # TODO: Charset of below strings? + yield String(self, "song", 30, "Song title", strip=" \0", charset="ISO-8859-1") + yield String(self, "author", 30, "Author", strip=" \0", charset="ISO-8859-1") + yield String(self, "album", 30, "Album title", strip=" \0", charset="ISO-8859-1") + yield String(self, "year", 4, "Year", strip=" \0", charset="ISO-8859-1") + + # TODO: Write better algorithm to guess ID3v1 version + version = self.getVersion() + if version in ("v1.1", "v1.1b"): + if version == "v1.1b": + # ID3 v1.1b + yield String(self, "comment", 29, "Comment", strip=" \0", charset="ISO-8859-1") + yield UInt8(self, "track_nb", "Track number") + else: + # ID3 v1.1 + yield String(self, "comment", 30, "Comment", strip=" \0", charset="ISO-8859-1") + yield Enum(UInt8(self, "genre", "Genre"), self.GENRE_NAME) + else: + # ID3 v1.0 + yield String(self, "comment", 31, "Comment", strip=" \0", charset="ISO-8859-1") + + def getVersion(self): + addr = self.absolute_address + 126*8 + bytes = self.stream.readBytes(addr, 2) + + # last byte (127) is not space? + if bytes[1] != ' ': + # byte 126 is nul? + if bytes[0] == 0x00: + return "v1.1" + else: + return "v1.1b" + else: + return "1.0" + + def createDescription(self): + version = self.getVersion() + return "ID3 %s: author=%s, song=%s" % ( + version, self["author"].value, self["song"].value) + +def getCharset(field): + try: + key = field.value + return ID3_StringCharset.charset_name[key] + except KeyError: + raise ParserError("ID3v2: Invalid charset (%s)." % key) + +class ID3_String(FieldSet): + STRIP = " \0" + def createFields(self): + yield String(self, "text", self._size/8, "Text", charset="ISO-8859-1", strip=self.STRIP) + +class ID3_StringCharset(ID3_String): + STRIP = " \0" + charset_desc = { + 0: "ISO-8859-1", + 1: "UTF-16 with BOM", + 2: "UTF-16 (big endian)", + 3: "UTF-8" + } + charset_name = { + 0: "ISO-8859-1", + 1: "UTF-16", + 2: "UTF-16-BE", + 3: "UTF-8" + } + def createFields(self): + yield Enum(UInt8(self, "charset"), self.charset_desc) + size = (self.size - self.current_size)/8 + if not size: + return + charset = getCharset(self["charset"]) + yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP) + +class ID3_GEOB(ID3_StringCharset): + def createFields(self): + yield Enum(UInt8(self, "charset"), self.charset_desc) + charset = getCharset(self["charset"]) + yield CString(self, "mime", "MIME type", charset=charset) + yield CString(self, "filename", "File name", charset=charset) + yield CString(self, "description", "Content description", charset=charset) + size = (self.size - self.current_size) // 8 + if not size: + return + yield String(self, "text", size, "Text", charset=charset) + +class ID3_Comment(ID3_StringCharset): + def createFields(self): + yield Enum(UInt8(self, "charset"), self.charset_desc) + yield String(self, "lang", 3, "Language", charset="ASCII") + charset = getCharset(self["charset"]) + yield CString(self, "title", "Title", charset=charset, strip=self.STRIP) + size = (self.size - self.current_size) // 8 + if not size: + return + yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP) + +class ID3_StringTitle(ID3_StringCharset): + def createFields(self): + yield Enum(UInt8(self, "charset"), self.charset_desc) + if self.current_size == self.size: + return + charset = getCharset(self["charset"]) + yield CString(self, "title", "Title", charset=charset, strip=self.STRIP) + size = (self.size - self.current_size)/8 + if not size: + return + yield String(self, "text", size, "Text", charset=charset, strip=self.STRIP) + +class ID3_Private(FieldSet): + def createFields(self): + size = self._size/8 + # TODO: Strings charset? + if self.stream.readBytes(self.absolute_address, 9) == "PeakValue": + yield String(self, "text", 9, "Text") + size -= 9 + yield String(self, "content", size, "Content") + +class ID3_TrackLength(FieldSet): + def createFields(self): + yield NullBytes(self, "zero", 1) + yield textHandler(String(self, "length", self._size/8 - 1, + "Length in ms", charset="ASCII"), self.computeLength) + + def computeLength(self, field): + try: + ms = int(field.value) + return humanDuration(ms) + except: + return field.value + +class ID3_Picture23(FieldSet): + pict_type_name = { + 0x00: "Other", + 0x01: "32x32 pixels 'file icon' (PNG only)", + 0x02: "Other file icon", + 0x03: "Cover (front)", + 0x04: "Cover (back)", + 0x05: "Leaflet page", + 0x06: "Media (e.g. lable side of CD)", + 0x07: "Lead artist/lead performer/soloist", + 0x08: "Artist/performer", + 0x09: "Conductor", + 0x0A: "Band/Orchestra", + 0x0B: "Composer", + 0x0C: "Lyricist/text writer", + 0x0D: "Recording Location", + 0x0E: "During recording", + 0x0F: "During performance", + 0x10: "Movie/video screen capture", + 0x11: "A bright coloured fish", + 0x12: "Illustration", + 0x13: "Band/artist logotype", + 0x14: "Publisher/Studio logotype" + } + def createFields(self): + yield Enum(UInt8(self, "charset"), ID3_StringCharset.charset_desc) + charset = getCharset(self["charset"]) + yield String(self, "img_fmt", 3, charset="ASCII") + yield Enum(UInt8(self, "pict_type"), self.pict_type_name) + yield CString(self, "text", "Text", charset=charset, strip=" \0") + size = (self._size - self._current_size) / 8 + if size: + yield RawBytes(self, "img_data", size) + +class ID3_Picture24(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "charset"), ID3_StringCharset.charset_desc) + charset = getCharset(self["charset"]) + yield CString(self, "mime", "MIME type", charset=charset) + yield Enum(UInt8(self, "pict_type"), ID3_Picture23.pict_type_name) + yield CString(self, "description", charset=charset) + size = (self._size - self._current_size) / 8 + if size: + yield RawBytes(self, "img_data", size) + +class ID3_Chunk(FieldSet): + endian = NETWORK_ENDIAN + tag22_name = { + "TT2": "Track title", + "TP1": "Artist", + "TRK": "Track number", + "COM": "Comment", + "TCM": "Composer", + "TAL": "Album", + "TYE": "Year", + "TEN": "Encoder", + "TCO": "Content type", + "PIC": "Picture" + } + tag23_name = { + "COMM": "Comment", + "GEOB": "Encapsulated object", + "PRIV": "Private", + "TPE1": "Artist", + "TCOP": "Copyright", + "TALB": "Album", + "TENC": "Encoder", + "TYER": "Year", + "TSSE": "Encoder settings", + "TCOM": "Composer", + "TRCK": "Track number", + "PCNT": "Play counter", + "TCON": "Content type", + "TLEN": "Track length", + "TIT2": "Track title", + "WXXX": "User defined URL" + } + handler = { + "COMM": ID3_Comment, + "COM": ID3_Comment, + "GEOB": ID3_GEOB, + "PIC": ID3_Picture23, + "APIC": ID3_Picture24, + "PRIV": ID3_Private, + "TXXX": ID3_StringTitle, + "WOAR": ID3_String, + "WXXX": ID3_StringTitle, + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + if 3 <= self["../ver_major"].value: + self._size = (10 + self["size"].value) * 8 + else: + self._size = (self["size"].value + 6) * 8 + + def createFields(self): + if 3 <= self["../ver_major"].value: + # ID3 v2.3 and 2.4 + yield Enum(String(self, "tag", 4, "Tag", charset="ASCII", strip="\0"), ID3_Chunk.tag23_name) + if 4 <= self["../ver_major"].value: + yield ID3_Size(self, "size") # ID3 v2.4 + else: + yield UInt32(self, "size") # ID3 v2.3 + + yield Bit(self, "tag_alter", "Tag alter preservation") + yield Bit(self, "file_alter", "Tag alter preservation") + yield Bit(self, "rd_only", "Read only?") + yield NullBits(self, "padding[]", 5) + + yield Bit(self, "compressed", "Frame is compressed?") + yield Bit(self, "encrypted", "Frame is encrypted?") + yield Bit(self, "group", "Grouping identity") + yield NullBits(self, "padding[]", 5) + size = self["size"].value + is_compressed = self["compressed"].value + else: + # ID3 v2.2 + yield Enum(String(self, "tag", 3, "Tag", charset="ASCII", strip="\0"), ID3_Chunk.tag22_name) + yield UInt24(self, "size") + size = self["size"].value - self.current_size/8 + 6 + is_compressed = False + + if size: + cls = None + if not(is_compressed): + tag = self["tag"].value + if tag in ID3_Chunk.handler: + cls = ID3_Chunk.handler[tag] + elif tag[0] == "T": + cls = ID3_StringCharset + if cls: + yield cls(self, "content", "Content", size=size*8) + else: + yield RawBytes(self, "content", size, "Raw data content") + + def createDescription(self): + if self["size"].value != 0: + return "ID3 Chunk: %s" % self["tag"].display + else: + return "ID3 Chunk: (terminator)" + +class ID3_Size(Bits): + static_size = 32 + + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 32, description) + + def createValue(self): + data = self.parent.stream.readBytes(self.absolute_address, 4) + # TODO: Check that bit #7 of each byte is nul: not(ord(data[i]) & 127) + return reduce(lambda x, y: x*128 + y, (ord(item) for item in data )) + +class ID3v2(FieldSet): + endian = NETWORK_ENDIAN + VALID_MAJOR_VERSIONS = (2, 3, 4) + + def __init__(self, parent, name, size=None): + FieldSet.__init__(self, parent, name, size=size) + if not self._size: + self._size = (self["size"].value + 10) * 8 + + def createDescription(self): + return "ID3 v2.%s.%s" % \ + (self["ver_major"].value, self["ver_minor"].value) + + def createFields(self): + # Signature + version + yield String(self, "header", 3, "Header (ID3)", charset="ASCII") + yield UInt8(self, "ver_major", "Version (major)") + yield UInt8(self, "ver_minor", "Version (minor)") + + # Check format + if self["header"].value != "ID3": + raise MatchError("Signature error, should be \"ID3\".") + if self["ver_major"].value not in self.VALID_MAJOR_VERSIONS \ + or self["ver_minor"].value != 0: + raise MatchError( + "Unknown ID3 metadata version (2.%u.%u)" + % (self["ver_major"].value, self["ver_minor"].value)) + + # Flags + yield Bit(self, "unsync", "Unsynchronisation is used?") + yield Bit(self, "ext", "Extended header is used?") + yield Bit(self, "exp", "Experimental indicator") + yield NullBits(self, "padding[]", 5) + + # Size + yield ID3_Size(self, "size") + + # All tags + while self.current_size < self._size: + field = ID3_Chunk(self, "field[]") + yield field + if field["size"].value == 0: + break + + # Search first byte of the MPEG file + padding = self.seekBit(self._size) + if padding: + yield padding + diff --git a/libs/hachoir_parser/audio/itunesdb.py b/libs/hachoir_parser/audio/itunesdb.py new file mode 100644 index 0000000..3472d2d --- /dev/null +++ b/libs/hachoir_parser/audio/itunesdb.py @@ -0,0 +1,433 @@ +""" +iPod iTunesDB parser. + +Documentation: +- http://ipodlinux.org/ITunesDB + +Author: Romain HERAULT +Creation date: 19 august 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt16, UInt32, UInt64, TimestampMac32, + String, Float32, NullBytes, Enum) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import humanDuration +from hachoir_core.text_handler import displayHandler, filesizeHandler + +list_order={ + 1 : "playlist order (manual sort order)", + 2 : "???", + 3 : "songtitle", + 4 : "album", + 5 : "artist", + 6 : "bitrate", + 7 : "genre", + 8 : "kind", + 9 : "date modified", + 10 : "track number", + 11 : "size", + 12 : "time", + 13 : "year", + 14 : "sample rate", + 15 : "comment", + 16 : "date added", + 17 : "equalizer", + 18 : "composer", + 19 : "???", + 20 : "play count", + 21 : "last played", + 22 : "disc number", + 23 : "my rating", + 24 : "release date", + 25 : "BPM", + 26 : "grouping", + 27 : "category", + 28 : "description", + 29 : "show", + 30 : "season", + 31 : "episode number" + } + +class DataObject(FieldSet): + type_name={ + 1:"Title", + 2:"Location", + 3:"Album", + 4:"Artist", + 5:"Genre", + 6:"Filetype", + 7:"EQ Setting", + 8:"Comment", + 9:"Category", + 12:"Composer", + 13:"Grouping", + 14:"Description text", + 15:"Podcast Enclosure URL", + 16:"Podcast RSS URL", + 17:"Chapter data", + 18:"Subtitle", + 19:"Show (for TV Shows only)", + 20:"Episode", + 21:"TV Network", + 50:"Smart Playlist Data", + 51:"Smart Playlist Rules", + 52:"Library Playlist Index", + 100:"Column info", + } + + mhod52_sort_index_type_name={ + 3:"Title", + 4:"Album, then Disk/Tracknumber, then Title", + 5:"Artist, then Album, then Disc/Tracknumber, then Title", + 7:"Genre, then Artist, then Album, then Disc/Tracknumber, then Title", + 8:"Composer, then Title" + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + + def createFields(self): + yield String(self, "header_id", 4, "Data Object Header Markup (\"mhod\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield Enum(UInt32(self, "type", "type"),self.type_name) + if(self["type"].value<15): + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "position", "Position") + yield UInt32(self, "length", "String Length in bytes") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield String(self, "string", self["length"].value, "String Data", charset="UTF-16-LE") + elif (self["type"].value<17): + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield String(self, "string", self._size/8-self["header_length"].value, "String Data", charset="UTF-8") + elif (self["type"].value == 52): + yield UInt32(self, "unknown[]", "unk1") + yield UInt32(self, "unknown[]", "unk2") + yield Enum(UInt32(self, "sort_index_type", "Sort Index Type"),self.mhod52_sort_index_type_name) + yield UInt32(self, "entry_count", "Entry Count") + indexes_size = self["entry_count"].value*4 + padding_offset = self["entry_length"].value - indexes_size + padding = self.seekByte(padding_offset, "header padding") + if padding: + yield padding + for i in xrange(self["entry_count"].value): + yield UInt32(self, "index["+str(i)+"]", "Index of the "+str(i)+"nth mhit") + else: + padding = self.seekByte(self["header_length"].value, "header padding") + if padding: + yield padding + padding = self.seekBit(self._size, "entry padding") + if padding: + yield padding + +class TrackItem(FieldSet): + x1_type_name={ + 0:"AAC or CBR MP3", + 1:"VBR MP3" + } + x2_type_name={ + 0:"AAC", + 1:"MP3" + } + media_type_name={ + 0x00:"Audio/Video", + 0x01:"Audio", + 0x02:"Video", + 0x04:"Podcast", + 0x06:"Video Podcast", + 0x08:"Audiobook", + 0x20:"Music Video", + 0x40:"TV Show", + 0X60:"TV Show (Music lists)", + } + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + + def createFields(self): + yield String(self, "header_id", 4, "Track Item Header Markup (\"mhit\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield UInt32(self, "string_number", "Number of Strings") + yield UInt32(self, "unique_id", "Unique ID") + yield UInt32(self, "visible_tag", "Visible Tag") + yield String(self, "file_type", 4, "File Type") + yield Enum(UInt8(self, "x1_type", "Extended Type 1"),self.x1_type_name) + yield Enum(UInt8(self, "x2_type", "Extended type 2"),self.x2_type_name) + yield UInt8(self, "compilation_flag", "Compilation Flag") + yield UInt8(self, "rating", "Rating") + yield TimestampMac32(self, "added_date", "Date when the item was added") + yield filesizeHandler(UInt32(self, "size", "Track size in bytes")) + yield displayHandler(UInt32(self, "length", "Track length in milliseconds"), humanDuration) + yield UInt32(self, "track_number", "Number of this track") + yield UInt32(self, "total_track", "Total number of tracks") + yield UInt32(self, "year", "Year of the track") + yield UInt32(self, "bitrate", "Bitrate") + yield UInt32(self, "samplerate", "Sample Rate") + yield UInt32(self, "volume", "volume") + yield UInt32(self, "start_time", "Start playing at, in milliseconds") + yield UInt32(self, "stop_time", "Stop playing at, in milliseconds") + yield UInt32(self, "soundcheck", "SoundCheck preamp") + yield UInt32(self, "playcount_1", "Play count of the track") + yield UInt32(self, "playcount_2", "Play count of the track (identical to playcount_1)") + yield UInt32(self, "last_played_time", "Time the song was last played") + yield UInt32(self, "disc_number", "disc number in multi disc sets") + yield UInt32(self, "total_discs", "Total number of discs in the disc set") + yield UInt32(self, "userid", "User ID in the DRM scheme") + yield TimestampMac32(self, "last_modified", "Time of the last modification of the track") + yield UInt32(self, "bookmark_time", "Bookmark time for AudioBook") + yield UInt64(self, "dbid", "Unique DataBase ID for the song (identical in mhit and in mhii)") + yield UInt8(self, "checked", "song is checked") + yield UInt8(self, "application_rating", "Last Rating before change") + yield UInt16(self, "BPM", "BPM of the track") + yield UInt16(self, "artwork_count", "number of artworks fo this item") + yield UInt16(self, "unknown[]") + yield UInt32(self, "artwork_size", "Total size of artworks in bytes") + yield UInt32(self, "unknown[]") + yield Float32(self, "sample_rate_2", "Sample Rate express in float") + yield UInt32(self, "released_date", "Date of release in Music Store or in Podcast") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt8(self, "has_artwork", "0x01 for track with artwork, 0x02 otherwise") + yield UInt8(self, "skip_wen_shuffling", "Skip that track when shuffling") + yield UInt8(self, "remember_playback_position", "Remember playback position") + yield UInt8(self, "flag4", "Flag 4") + yield UInt64(self, "dbid2", "Unique DataBase ID for the song (identical as above)") + yield UInt8(self, "lyrics_flag", "Lyrics Flag") + yield UInt8(self, "movie_file_flag", "Movie File Flag") + yield UInt8(self, "played_mark", "Track has been played") + yield UInt8(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "sample_count", "Number of samples in the song (only for WAV and AAC files)") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield Enum(UInt32(self, "media_type", "Media Type for video iPod"),self.media_type_name) + yield UInt32(self, "season_number", "Season Number") + yield UInt32(self, "episode_number", "Episode Number") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + padding = self.seekByte(self["header_length"].value, "header padding") + if padding: + yield padding + + #while ((self.stream.readBytes(0, 4) == 'mhod') and ((self.current_size/8) < self["entry_length"].value)): + for i in xrange(self["string_number"].value): + yield DataObject(self, "data[]") + padding = self.seekBit(self._size, "entry padding") + if padding: + yield padding + +class TrackList(FieldSet): + def createFields(self): + yield String(self, "header_id", 4, "Track List Header Markup (\"mhlt\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "track_number", "Number of Tracks") + + padding = self.seekByte(self["header_length"].value, "header padding") + if padding: + yield padding + + for i in xrange(self["track_number"].value): + yield TrackItem(self, "track[]") + +class PlaylistItem(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + + def createFields(self): + yield String(self, "header_id", 4, "Playlist Item Header Markup (\"mhip\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield UInt32(self, "data_object_child_count", "Number of Child Data Objects") + yield UInt32(self, "podcast_grouping_flag", "Podcast Grouping Flag") + yield UInt32(self, "group_id", "Group ID") + yield UInt32(self, "track_id", "Track ID") + yield TimestampMac32(self, "timestamp", "Song Timestamp") + yield UInt32(self, "podcast_grouping_ref", "Podcast Grouping Reference") + padding = self.seekByte(self["header_length"].value, "header padding") + if padding: + yield padding + + for i in xrange(self["data_object_child_count"].value): + yield DataObject(self, "mhod[]") + + +class Playlist(FieldSet): + is_master_pl_name={ + 0:"Regular playlist", + 1:"Master playlist" + } + + is_podcast_name={ + 0:"Normal Playlist List", + 1:"Podcast Playlist List" + } + + list_sort_order_name={ + 1:"Manual Sort Order", + 2:"???", + 3:"Song Title", + 4:"Album", + 5:"Artist", + 6:"Bitrate", + 7:"Genre", + 8:"Kind", + 9:"Date Modified", + 10:"Track Number", + 11:"Size", + 12:"Time", + 13:"Year", + 14:"Sample Rate", + 15:"Comment", + 16:"Date Added", + 17:"Equalizer", + 18:"Composer", + 19:"???", + 20:"Play Count", + 21:"Last Played", + 22:"Disc Number", + 23:"My Rating", + 24:"Release Date", + 25:"BPM", + 26:"Grouping", + 27:"Category", + 28:"Description", + 29:"Show", + 30:"Season", + 31:"Episode Number" + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + + def createFields(self): + yield String(self, "header_id", 4, "Playlist List Header Markup (\"mhyp\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield UInt32(self, "data_object_child_count", "Number of Child Data Objects") + yield UInt32(self, "playlist_count", "Number of Playlist Items") + yield Enum(UInt8(self, "type", "Normal or master playlist?"), self.is_master_pl_name) + yield UInt8(self, "XXX1", "XXX1") + yield UInt8(self, "XXX2", "XXX2") + yield UInt8(self, "XXX3", "XXX3") + yield TimestampMac32(self, "creation_date", "Date when the playlist was created") + yield UInt64(self, "playlistid", "Persistent Playlist ID") + yield UInt32(self, "unk3", "unk3") + yield UInt16(self, "string_mhod_count", "Number of string MHODs for this playlist") + yield Enum(UInt16(self, "is_podcast", "Playlist or Podcast List?"), self.is_podcast_name) + yield Enum(UInt32(self, "sort_order", "Playlist Sort Order"), self.list_sort_order_name) + + padding = self.seekByte(self["header_length"].value, "entry padding") + if padding: + yield padding + + for i in xrange(self["data_object_child_count"].value): + yield DataObject(self, "mhod[]") + + for i in xrange(self["playlist_count"].value): + yield PlaylistItem(self, "playlist_item[]") + + + +class PlaylistList(FieldSet): + def createFields(self): + yield String(self, "header_id", 4, "Playlist List Header Markup (\"mhlp\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "playlist_number", "Number of Playlists") + + padding = self.seekByte(self["header_length"].value, "header padding") + if padding: + yield padding + + for i in xrange(self["playlist_number"].value): + yield Playlist(self, "playlist[]") + +class DataSet(FieldSet): + type_name={ + 1:"Track List", + 2:"Play List", + 3:"Podcast List" + } + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + + def createFields(self): + yield String(self, "header_id", 4, "DataSet Header Markup (\"mhsd\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield Enum(UInt32(self, "type", "type"),self.type_name) + padding = self.seekByte(self["header_length"].value, "header_raw") + if padding: + yield padding + if self["type"].value == 1: + yield TrackList(self, "tracklist[]") + if self["type"].value == 2: + yield PlaylistList(self, "playlist_list[]"); + if self["type"].value == 3: + yield PlaylistList(self, "podcast_list[]"); + padding = self.seekBit(self._size, "entry padding") + if padding: + yield padding + +class DataBase(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["entry_length"].value *8 + +# def createFields(self): + +class ITunesDBFile(Parser): + PARSER_TAGS = { + "id": "itunesdb", + "category": "audio", + "min_size": 44*8, + "magic": (('mhbd',0),), + "description": "iPod iTunesDB file" + } + + endian = LITTLE_ENDIAN + + def validate(self): + return self.stream.readBytes(0, 4) == 'mhbd' + + def createFields(self): + yield String(self, "header_id", 4, "DataBase Header Markup (\"mhbd\")", charset="ISO-8859-1") + yield UInt32(self, "header_length", "Header Length") + yield UInt32(self, "entry_length", "Entry Length") + yield UInt32(self, "unknown[]") + yield UInt32(self, "version_number", "Version Number") + yield UInt32(self, "child_number", "Number of Children") + yield UInt64(self, "id", "ID for this database") + yield UInt32(self, "unknown[]") + yield UInt64(self, "initial_dbid", "Initial DBID") + size = self["header_length"].value-self.current_size/ 8 + if size>0: + yield NullBytes(self, "padding", size) + for i in xrange(self["child_number"].value): + yield DataSet(self, "dataset[]") + padding = self.seekByte(self["entry_length"].value, "entry padding") + if padding: + yield padding + + def createContentSize(self): + return self["entry_length"].value * 8 + diff --git a/libs/hachoir_parser/audio/midi.py b/libs/hachoir_parser/audio/midi.py new file mode 100644 index 0000000..211e7b7 --- /dev/null +++ b/libs/hachoir_parser/audio/midi.py @@ -0,0 +1,246 @@ +""" +Musical Instrument Digital Interface (MIDI) audio file parser. + +Documentation: + - Standard MIDI File Format, Dustin Caldwell (downloaded on wotsit.org) + +Author: Victor Stinner +Creation: 27 december 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Bits, ParserError, + String, UInt32, UInt24, UInt16, UInt8, Enum, RawBits, RawBytes) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import createDict, humanDurationNanosec +from hachoir_parser.common.tracker import NOTE_NAME + +MAX_FILESIZE = 10 * 1024 * 1024 + +class Integer(Bits): + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 8, description) + stream = parent.stream + addr = self.absolute_address + value = 0 + while True: + bits = stream.readBits(addr, 8, parent.endian) + value = (value << 7) + (bits & 127) + if not(bits & 128): + break + addr += 8 + self._size += 8 + if 32 < self._size: + raise ParserError("Integer size is bigger than 32-bit") + self.createValue = lambda: value + +def parseNote(parser): + yield Enum(UInt8(parser, "note", "Note number"), NOTE_NAME) + yield UInt8(parser, "velocity") + +def parseControl(parser): + yield UInt8(parser, "control", "Controller number") + yield UInt8(parser, "value", "New value") + +def parsePatch(parser): + yield UInt8(parser, "program", "New program number") + +def parseChannel(parser, size=1): + yield UInt8(parser, "channel", "Channel number") + +def parsePitch(parser): + yield UInt8(parser, "bottom", "(least sig) 7 bits of value") + yield UInt8(parser, "top", "(most sig) 7 bits of value") + +def parseText(parser, size): + yield String(parser, "text", size) + +def parseSMPTEOffset(parser, size): + yield RawBits(parser, "padding", 1) + yield Enum(Bits(parser, "frame_rate", 2), + {0:"24 fps", 1:"25 fps", 2:"30 fps (drop frame)", 3:"30 fps"}) + yield Bits(parser, "hour", 5) + yield UInt8(parser, "minute") + yield UInt8(parser, "second") + yield UInt8(parser, "frame") + yield UInt8(parser, "subframe", "100 subframes per frame") + +def formatTempo(field): + return humanDurationNanosec(field.value*1000) + +def parseTempo(parser, size): + yield textHandler(UInt24(parser, "microsec_quarter", "Microseconds per quarter note"), formatTempo) + +def parseTimeSignature(parser, size): + yield UInt8(parser, "numerator", "Numerator of time signature") + yield UInt8(parser, "denominator", "denominator of time signature 2=quarter 3=eighth, etc.") + yield UInt8(parser, "nb_tick", "Number of ticks in metronome click") + yield UInt8(parser, "nb_32nd_note", "Number of 32nd notes to the quarter note") + +class Command(FieldSet): + COMMAND = {} + for channel in xrange(16): + COMMAND[0x80+channel] = ("Note off (channel %u)" % channel, parseNote) + COMMAND[0x90+channel] = ("Note on (channel %u)" % channel, parseNote) + COMMAND[0xA0+channel] = ("Key after-touch (channel %u)" % channel, parseNote) + COMMAND[0xB0+channel] = ("Control change (channel %u)" % channel, parseControl) + COMMAND[0xC0+channel] = ("Program (patch) change (channel %u)" % channel, parsePatch) + COMMAND[0xD0+channel] = ("Channel after-touch (channel %u)" % channel, parseChannel) + COMMAND[0xE0+channel] = ("Pitch wheel change (channel %u)" % channel, parsePitch) + COMMAND_DESC = createDict(COMMAND, 0) + COMMAND_PARSER = createDict(COMMAND, 1) + + META_COMMAND_TEXT = 1 + META_COMMAND_NAME = 3 + META_COMMAND = { + 0x00: ("Sets the track's sequence number", None), + 0x01: ("Text event", parseText), + 0x02: ("Copyright info", parseText), + 0x03: ("Sequence or Track name", parseText), + 0x04: ("Track instrument name", parseText), + 0x05: ("Lyric", parseText), + 0x06: ("Marker", parseText), + 0x07: ("Cue point", parseText), + 0x20: ("MIDI Channel Prefix", parseChannel), + 0x2F: ("End of the track", None), + 0x51: ("Set tempo", parseTempo), + 0x54: ("SMPTE offset", parseSMPTEOffset), + 0x58: ("Time Signature", parseTimeSignature), + 0x59: ("Key signature", None), + 0x7F: ("Sequencer specific information", None), + } + META_COMMAND_DESC = createDict(META_COMMAND, 0) + META_COMMAND_PARSER = createDict(META_COMMAND, 1) + + def __init__(self, *args, **kwargs): + if 'prev_command' in kwargs: + self.prev_command = kwargs['prev_command'] + del kwargs['prev_command'] + else: + self.prev_command = None + self.command = None + FieldSet.__init__(self, *args, **kwargs) + + def createFields(self): + yield Integer(self, "time", "Delta time in ticks") + next = self.stream.readBits(self.absolute_address+self.current_size, 8, self.root.endian) + if next & 0x80 == 0: + # "Running Status" command + if self.prev_command is None: + raise ParserError("Running Status command not preceded by another command.") + self.command = self.prev_command.command + else: + yield Enum(textHandler(UInt8(self, "command"), hexadecimal), self.COMMAND_DESC) + self.command = self["command"].value + if self.command == 0xFF: + yield Enum(textHandler(UInt8(self, "meta_command"), hexadecimal), self.META_COMMAND_DESC) + yield UInt8(self, "data_len") + size = self["data_len"].value + if size: + command = self["meta_command"].value + if command in self.META_COMMAND_PARSER: + parser = self.META_COMMAND_PARSER[command] + else: + parser = None + if parser: + for field in parser(self, size): + yield field + else: + yield RawBytes(self, "data", size) + else: + if self.command not in self.COMMAND_PARSER: + raise ParserError("Unknown command: %s" % self["command"].display) + parser = self.COMMAND_PARSER[self.command] + for field in parser(self): + yield field + + def createDescription(self): + if "meta_command" in self: + return self["meta_command"].display + else: + return self.COMMAND_DESC[self.command] + +class Track(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = (8 + self["size"].value) * 8 + + def createFields(self): + yield String(self, "marker", 4, "Track marker (MTrk)", charset="ASCII") + yield UInt32(self, "size") + cur = None + if True: + while not self.eof: + cur = Command(self, "command[]", prev_command=cur) + yield cur + else: + size = self["size"].value + if size: + yield RawBytes(self, "raw", size) + + def createDescription(self): + command = self["command[0]"] + if "meta_command" in command \ + and command["meta_command"].value in (Command.META_COMMAND_TEXT, Command.META_COMMAND_NAME) \ + and "text" in command: + return command["text"].value.strip("\r\n") + else: + return "" + +class Header(FieldSet): + static_size = 10*8 + FILE_FORMAT = { + 0: "Single track", + 1: "Multiple tracks, synchronous", + 2: "Multiple tracks, asynchronous", + } + + def createFields(self): + yield UInt32(self, "size") + yield Enum(UInt16(self, "file_format"), self.FILE_FORMAT) + yield UInt16(self, "nb_track") + yield UInt16(self, "delta_time", "Delta-time ticks per quarter note") + + def createDescription(self): + return "%s; %s tracks" % ( + self["file_format"].display, self["nb_track"].value) + +class MidiFile(Parser): + MAGIC = "MThd" + PARSER_TAGS = { + "id": "midi", + "category": "audio", + "file_ext": ["mid", "midi"], + "mime": (u"audio/mime", ), + "magic": ((MAGIC, 0),), + "min_size": 64, + "description": "MIDI audio" + } + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != self.MAGIC: + return "Invalid signature" + if self["header/size"].value != 6: + return "Invalid header size" + return True + + def createFields(self): + yield String(self, "signature", 4, r"MIDI signature (MThd)", charset="ASCII") + yield Header(self, "header") + while not self.eof: + yield Track(self, "track[]") + + def createDescription(self): + return "MIDI audio: %s" % self["header"].description + + def createContentSize(self): + count = self["/header/nb_track"].value - 1 + start = self["track[%u]" % count].absolute_address + # Search "End of track" of last track + end = self.stream.searchBytes("\xff\x2f\x00", start, MAX_FILESIZE*8) + if end is not None: + return end + 3*8 + return None + diff --git a/libs/hachoir_parser/audio/mod.py b/libs/hachoir_parser/audio/mod.py new file mode 100644 index 0000000..75025e0 --- /dev/null +++ b/libs/hachoir_parser/audio/mod.py @@ -0,0 +1,149 @@ +""" +Parser of FastTrackerII Extended Module (XM) version 1.4 + +Documents: +- Modplug source code (file modplug/soundlib/Load_mod.cpp) + http://sourceforge.net/projects/modplug +- Dumb source code (files include/dumb.h and src/it/readmod.c + http://dumb.sf.net/ +- Documents on "MOD" format on Wotsit + http://www.wotsit.org + +Compressed formats (i.e. starting with "PP20" or having "PACK" as type +are not handled. Also NoiseTracker's NST modules aren't handled, although +it might be possible: no file format and 15 samples + +Author: Christophe GISQUET +Creation: 18th February 2007 +""" + +from math import log10 +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Bits, UInt16, UInt8, + RawBytes, String, GenericVector) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler + +# Old NoiseTracker 15-samples modules can have anything here. +MODULE_TYPE = { + "M.K.": ("Noise/Pro-Tracker", 4), + "M!K!": ("Noise/Pro-Tracker", 4), + "M&K&": ("Noise/Pro-Tracker", 4), + "RASP": ("StarTrekker", 4), + "FLT4": ("StarTrekker", 4), + "FLT8": ("StarTrekker", 8), + "6CHN": ("FastTracker", 6), + "8CHN": ("FastTracker", 8), + "CD81": ("Octalyser", 8), + "OCTA": ("Octalyser", 8), + "FA04": ("Digital Tracker", 4), + "FA06": ("Digital Tracker", 6), + "FA08": ("Digital Tracker", 8), +} + +def getFineTune(val): + return ("0", "1", "2", "3", "4", "5", "6", "7", "8", + "-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1")[val.value] + +def getVolume(val): + return "%.1f dB" % (20.0*log10(val.value/64.0)) + +class SampleInfo(FieldSet): + static_size = 30*8 + def createFields(self): + yield String(self, "name", 22, strip='\0') + yield UInt16(self, "sample_count") + yield textHandler(UInt8(self, "fine_tune"), getFineTune) + yield textHandler(UInt8(self, "volume"), getVolume) + yield UInt16(self, "loop_start", "Loop start offset in samples") + yield UInt16(self, "loop_len", "Loop length in samples") + + def createValue(self): + return self["name"].value + +class Header(FieldSet): + static_size = 1084*8 + + def createFields(self): + yield String(self, "name", 20, strip='\0') + yield GenericVector(self, "samples", 31, SampleInfo, "info") + yield UInt8(self, "length") + yield UInt8(self, "played_patterns_count") + yield GenericVector(self, "patterns", 128, UInt8, "position") + yield String(self, "type", 4) + + def getNumChannels(self): + return MODULE_TYPE[self["type"].value][1] + +class Note(FieldSet): + static_size = 8*4 + def createFields(self): + yield Bits(self, 4, "note_hi_nibble") + yield Bits(self, 12, "period") + yield Bits(self, 4, "note_low_nibble") + yield Bits(self, 4, "effect") + yield UInt8(self, "parameter") + +class Row(FieldSet): + def __init__(self, parent, name, channels, desc=None): + FieldSet.__init__(self, parent, name, description=desc) + self.channels = channels + self._size = 8*self.channels*4 + + def createFields(self): + for index in xrange(self.channels): + yield Note(self, "note[]") + +class Pattern(FieldSet): + def __init__(self, parent, name, channels, desc=None): + FieldSet.__init__(self, parent, name, description=desc) + self.channels = channels + self._size = 64*8*self.channels*4 + + def createFields(self): + for index in xrange(64): + yield Row(self, "row[]", self.channels) + +class AmigaModule(Parser): + PARSER_TAGS = { + "id": "mod", + "category": "audio", + "file_ext": ("mod", "nst", "wow", "oct", "sd0" ), + "mime": (u'audio/mod', u'audio/x-mod', u'audio/mod', u'audio/x-mod'), + "min_size": 1084*8, + "description": "Uncompressed amiga module" + } + endian = BIG_ENDIAN + + def validate(self): + t = self.stream.readBytes(1080*8, 4) + if t not in MODULE_TYPE: + return "Invalid module type '%s'" % t + self.createValue = lambda t: "%s module, %u channels" % MODULE_TYPE[t] + return True + + def createFields(self): + header = Header(self, "header") + yield header + channels = header.getNumChannels() + + # Number of patterns + patterns = 0 + for index in xrange(128): + patterns = max(patterns, + header["patterns/position[%u]" % index].value) + patterns += 1 + + # Yield patterns + for index in xrange(patterns): + yield Pattern(self, "pattern[]", channels) + + # Yield samples + for index in xrange(31): + count = header["samples/info[%u]/sample_count" % index].value + if count: + self.info("Yielding sample %u: %u samples" % (index, count)) + yield RawBytes(self, "sample_data[]", 2*count, \ + "Sample %u" % index) + diff --git a/libs/hachoir_parser/audio/modplug.py b/libs/hachoir_parser/audio/modplug.py new file mode 100644 index 0000000..d0ea0ff --- /dev/null +++ b/libs/hachoir_parser/audio/modplug.py @@ -0,0 +1,291 @@ +""" +Modplug metadata inserted into module files. + +Doc: +- http://modplug.svn.sourceforge.net/viewvc/modplug/trunk/modplug/soundlib/ + +Author: Christophe GISQUET +Creation: 10th February 2007 +""" + +from hachoir_core.field import (FieldSet, + UInt32, UInt16, UInt8, Int8, Float32, + RawBytes, String, GenericVector, ParserError) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +MAX_ENVPOINTS = 32 + +def parseComments(parser): + size = parser["block_size"].value + if size > 0: + yield String(parser, "comment", size) + +class MidiOut(FieldSet): + static_size = 9*32*8 + def createFields(self): + for name in ("start", "stop", "tick", "noteon", "noteoff", + "volume", "pan", "banksel", "program"): + yield String(self, name, 32, strip='\0') + +class Command(FieldSet): + static_size = 32*8 + def createFields(self): + start = self.absolute_address + size = self.stream.searchBytesLength("\0", False, start) + if size > 0: + self.info("Command: %s" % self.stream.readBytes(start, size)) + yield String(self, "command", size, strip='\0') + yield RawBytes(self, "parameter", (self._size//8)-size) + +class MidiSFXExt(FieldSet): + static_size = 16*32*8 + def createFields(self): + for index in xrange(16): + yield Command(self, "command[]") + +class MidiZXXExt(FieldSet): + static_size = 128*32*8 + def createFields(self): + for index in xrange(128): + yield Command(self, "command[]") + +def parseMidiConfig(parser): + yield MidiOut(parser, "midi_out") + yield MidiSFXExt(parser, "sfx_ext") + yield MidiZXXExt(parser, "zxx_ext") + +def parseChannelSettings(parser): + size = parser["block_size"].value//4 + if size > 0: + yield GenericVector(parser, "settings", size, UInt32, "mix_plugin") + +def parseEQBands(parser): + size = parser["block_size"].value//4 + if size > 0: + yield GenericVector(parser, "gains", size, UInt32, "band") + +class SoundMixPluginInfo(FieldSet): + static_size = 128*8 + def createFields(self): + yield textHandler(UInt32(self, "plugin_id1"), hexadecimal) + yield textHandler(UInt32(self, "plugin_id2"), hexadecimal) + yield UInt32(self, "input_routing") + yield UInt32(self, "output_routing") + yield GenericVector(self, "routing_info", 4, UInt32, "reserved") + yield String(self, "name", 32, strip='\0') + yield String(self, "dll_name", 64, desc="Original DLL name", strip='\0') + +class ExtraData(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self._size = (4+self["size"].value)*8 + + def createFields(self): + yield UInt32(self, "size") + size = self["size"].value + if size: + yield RawBytes(self, "data", size) + +class XPlugData(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self._size = (4+self["size"].value)*8 + + def createFields(self): + yield UInt32(self, "size") + while not self.eof: + yield UInt32(self, "marker") + if self["marker"].value == 'DWRT': + yield Float32(self, "dry_ratio") + elif self["marker"].value == 'PORG': + yield UInt32(self, "default_program") + +def parsePlugin(parser): + yield SoundMixPluginInfo(parser, "info") + + # Check if VST setchunk present + size = parser.stream.readBits(parser.absolute_address+parser.current_size, 32, LITTLE_ENDIAN) + if 0 < size < parser.current_size + parser._size: + yield ExtraData(parser, "extra_data") + + # Check if XPlugData is present + size = parser.stream.readBits(parser.absolute_address+parser.current_size, 32, LITTLE_ENDIAN) + if 0 < size < parser.current_size + parser._size: + yield XPlugData(parser, "xplug_data") + +# Format: "XXXX": (type, count, name) +EXTENSIONS = { + # WriteInstrumentHeaderStruct@Sndfile.cpp + "XTPM": { + "..Fd": (UInt32, 1, "Flags"), + "..OF": (UInt32, 1, "Fade out"), + "..VG": (UInt32, 1, "Global Volume"), + "...P": (UInt32, 1, "Panning"), + "..EV": (UInt32, 1, "Volume Envelope"), + "..EP": (UInt32, 1, "Panning Envelope"), + ".EiP": (UInt32, 1, "Pitch Envelope"), + ".SLV": (UInt8, 1, "Volume Loop Start"), + ".ELV": (UInt8, 1, "Volume Loop End"), + ".BSV": (UInt8, 1, "Volume Sustain Begin"), + ".ESV": (UInt8, 1, "Volume Sustain End"), + ".SLP": (UInt8, 1, "Panning Loop Start"), + ".ELP": (UInt8, 1, "Panning Loop End"), + ".BSP": (UInt8, 1, "Panning Substain Begin"), + ".ESP": (UInt8, 1, "Padding Substain End"), + "SLiP": (UInt8, 1, "Pitch Loop Start"), + "ELiP": (UInt8, 1, "Pitch Loop End"), + "BSiP": (UInt8, 1, "Pitch Substain Begin"), + "ESiP": (UInt8, 1, "Pitch Substain End"), + ".ANN": (UInt8, 1, "NNA"), + ".TCD": (UInt8, 1, "DCT"), + ".AND": (UInt8, 1, "DNA"), + "..SP": (UInt8, 1, "Panning Swing"), + "..SV": (UInt8, 1, "Volume Swing"), + ".CFI": (UInt8, 1, "IFC"), + ".RFI": (UInt8, 1, "IFR"), + "..BM": (UInt32, 1, "Midi Bank"), + "..PM": (UInt8, 1, "Midi Program"), + "..CM": (UInt8, 1, "Midi Channel"), + ".KDM": (UInt8, 1, "Midi Drum Key"), + ".SPP": (Int8, 1, "PPS"), + ".CPP": (UInt8, 1, "PPC"), + ".[PV": (UInt32, MAX_ENVPOINTS, "Volume Points"), + ".[PP": (UInt32, MAX_ENVPOINTS, "Panning Points"), + "[PiP": (UInt32, MAX_ENVPOINTS, "Pitch Points"), + ".[EV": (UInt8, MAX_ENVPOINTS, "Volume Enveloppe"), + ".[EP": (UInt8, MAX_ENVPOINTS, "Panning Enveloppe"), + "[EiP": (UInt8, MAX_ENVPOINTS, "Pitch Enveloppe"), + ".[MN": (UInt8, 128, "Note Mapping"), + "..[K": (UInt32, 128, "Keyboard"), + "..[n": (String, 32, "Name"), + ".[nf": (String, 12, "Filename"), + ".PiM": (UInt8, 1, "MixPlug"), + "..RV": (UInt16, 1, "Volume Ramping"), + "...R": (UInt16, 1, "Resampling"), + "..SC": (UInt8, 1, "Cut Swing"), + "..SR": (UInt8, 1, "Res Swing"), + "..MF": (UInt8, 1, "Filter Mode"), + }, + + # See after "CODE tag dictionary", same place, elements with [EXT] + "STPM": { + "...C": (UInt32, 1, "Channels"), + ".VWC": (None, 0, "CreatedWith version"), + ".VGD": (None, 0, "Default global volume"), + "..TD": (None, 0, "Default tempo"), + "HIBE": (None, 0, "Embedded instrument header"), + "VWSL": (None, 0, "LastSavedWith version"), + ".MMP": (None, 0, "Plugin Mix mode"), + ".BPR": (None, 0, "Rows per beat"), + ".MPR": (None, 0, "Rows per measure"), + "@PES": (None, 0, "Chunk separator"), + ".APS": (None, 0, "Song Pre-amplification"), + "..MT": (None, 0, "Tempo mode"), + "VTSV": (None, 0, "VSTi volume"), + } +} + +class MPField(FieldSet): + def __init__(self, parent, name, ext, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.ext = ext + self.info(self.createDescription()) + self._size = (6+self["data_size"].value)*8 + + def createFields(self): + # Identify tag + code = self.stream.readBytes(self.absolute_address, 4) + if code in self.ext: + cls, count, comment = self.ext[code] + else: + cls, count, comment = RawBytes, 1, "Unknown tag" + + # Header + yield String(self, "code", 4, comment) + yield UInt16(self, "data_size") + + # Data + if not cls: + size = self["data_size"].value + if size > 0: + yield RawBytes(self, "data", size) + elif cls in (String, RawBytes): + yield cls(self, "value", count) + else: + if count > 1: + yield GenericVector(self, "values", count, cls, "item") + else: + yield cls(self, "value") + + def createDescription(self): + return "Element '%s', size %i" % \ + (self["code"]._description, self["data_size"].value) + +def parseFields(parser): + # Determine field names + ext = EXTENSIONS[parser["block_type"].value] + if ext == None: + raise ParserError("Unknown parent '%s'" % parser["block_type"].value) + + # Parse fields + addr = parser.absolute_address + parser.current_size + while not parser.eof and parser.stream.readBytes(addr, 4) in ext: + field = MPField(parser, "field[]", ext) + yield field + addr += field._size + + # Abort on unknown codes + parser.info("End of extension '%s' when finding '%s'" % + (parser["block_type"].value, parser.stream.readBytes(addr, 4))) + +class ModplugBlock(FieldSet): + BLOCK_INFO = { + "TEXT": ("comment", True, "Comment", parseComments), + "MIDI": ("midi_config", True, "Midi configuration", parseMidiConfig), + "XFHC": ("channel_settings", True, "Channel settings", parseChannelSettings), + "XTPM": ("instrument_ext", False, "Instrument extensions", parseFields), + "STPM": ("song_ext", False, "Song extensions", parseFields), + } + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.parseBlock = parsePlugin + + t = self["block_type"].value + self.has_size = False + if t in self.BLOCK_INFO: + self._name, self.has_size, desc, parseBlock = self.BLOCK_INFO[t] + if callable(desc): + self.createDescription = lambda: desc(self) + if parseBlock: + self.parseBlock = lambda: parseBlock(self) + + if self.has_size: + self._size = 8*(self["block_size"].value + 8) + + def createFields(self): + yield String(self, "block_type", 4) + if self.has_size: + yield UInt32(self, "block_size") + + if self.parseBlock: + for field in self.parseBlock(): + yield field + + if self.has_size: + size = self["block_size"].value - (self.current_size//8) + if size > 0: + yield RawBytes(self, "data", size, "Unknown data") + +def ParseModplugMetadata(parser): + while not parser.eof: + block = ModplugBlock(parser, "block[]") + yield block + if block["block_type"].value == "STPM": + break + + # More undocumented stuff: date ? + size = (parser._size - parser.absolute_address - parser.current_size)//8 + if size > 0: + yield RawBytes(parser, "info", size) + diff --git a/libs/hachoir_parser/audio/mpeg_audio.py b/libs/hachoir_parser/audio/mpeg_audio.py new file mode 100644 index 0000000..04e7d32 --- /dev/null +++ b/libs/hachoir_parser/audio/mpeg_audio.py @@ -0,0 +1,408 @@ +""" +MPEG audio file parser. + +Creation: 12 decembre 2005 +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + MissingField, ParserError, createOrphanField, + Bit, Bits, Enum, + PaddingBits, PaddingBytes, + RawBytes) +from hachoir_parser.audio.id3 import ID3v1, ID3v2 +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.tools import humanFrequency, humanBitSize +from hachoir_core.bits import long2raw +from hachoir_core.error import HACHOIR_ERRORS +from hachoir_core.stream import InputStreamError + +# Max MP3 filesize: 200 MB +MAX_FILESIZE = 200*1024*1024*8 + +class Frame(FieldSet): + VERSION_NAME = { 0: "2.5", 2: "2", 3: "1" } + MPEG_I = 3 + MPEG_II = 2 + MPEG_II_5 = 0 + + LAYER_NAME = { 1: "III", 2: "II", 3: "I" } + LAYER_I = 3 + LAYER_II = 2 + LAYER_III = 1 + + # Bit rates (bit_rate * 1000 = bits/sec) + # key 15 is always invalid + BIT_RATES = { + 1: ( # MPEG1 + ( 0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448 ), # layer I + ( 0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384 ), # layer II + ( 0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320 ), # layer III + # - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - + ), + 2: ( # MPEG2 / MPEG2.5 + ( 0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256 ), # layer I + ( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer II + ( 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160 ), # layer III + # - 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - + ) + } + SAMPLING_RATES = { + 3: {0: 44100, 1: 48000, 2: 32000}, # MPEG1 + 2: {0: 22050, 1: 24000, 2: 16000}, # MPEG2 + 0: {0: 11025, 1: 12000, 2: 8000} # MPEG2.5 + } + EMPHASIS_NAME = {0: "none", 1: "50/15 ms", 3: "CCIT J.17"} + CHANNEL_MODE_NAME = { + 0: "Stereo", + 1: "Joint stereo", + 2: "Dual channel", + 3: "Single channel" + } + # Channel mode => number of channels + NB_CHANNEL = { + 0: 2, + 1: 2, + 2: 2, + 3: 1, + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + if not self._size: + frame_size = self.getFrameSize() + if not frame_size: + raise ParserError("MPEG audio: Invalid frame %s" % self.path) + self._size = min(frame_size * 8, self.parent.size - self.address) + + def createFields(self): + # Header + yield PaddingBits(self, "sync", 11, "Synchronize bits (set to 1)", pattern=1) + yield Enum(Bits(self, "version", 2, "MPEG audio version"), self.VERSION_NAME) + yield Enum(Bits(self, "layer", 2, "MPEG audio layer"), self.LAYER_NAME) + yield Bit(self, "crc16", "No CRC16 protection?") + + # Rates and padding + yield Bits(self, "bit_rate", 4, "Bit rate") + yield Bits(self, "sampling_rate", 2, "Sampling rate") + yield Bit(self, "use_padding", "Stream field use padding?") + yield Bit(self, "extension", "Extension") + + # Channel mode, mode extension, copyright, ... + yield Enum(Bits(self, "channel_mode", 2, "Channel mode"), self.CHANNEL_MODE_NAME) + yield Bits(self, "mode_ext", 2, "Mode extension") + yield Bit(self, "copyright", "Is copyrighted?") + yield Bit(self, "original", "Is original?") + yield Enum(Bits(self, "emphasis", 2, "Emphasis"), self.EMPHASIS_NAME) + + size = (self.size - self.current_size) / 8 + if size: + yield RawBytes(self, "data", size) + + def isValid(self): + return (self["layer"].value != 0 + and self["sync"].value == 2047 + and self["version"].value != 1 + and self["sampling_rate"].value != 3 + and self["bit_rate"].value not in (0, 15) + and self["emphasis"].value != 2) + + def getSampleRate(self): + """ + Read sampling rate. Returns None on error. + """ + version = self["version"].value + rate = self["sampling_rate"].value + try: + return self.SAMPLING_RATES[version][rate] + except (KeyError, IndexError): + return None + + def getBitRate(self): + """ + Read bit rate in bit/sec. Returns None on error. + """ + layer = 3 - self["layer"].value + bit_rate = self["bit_rate"].value + if bit_rate in (0, 15): + return None + if self["version"].value == 3: + dataset = self.BIT_RATES[1] # MPEG1 + else: + dataset = self.BIT_RATES[2] # MPEG2 / MPEG2.5 + try: + return dataset[layer][bit_rate] * 1000 + except (KeyError, IndexError): + return None + + def getFrameSize(self): + """ + Read frame size in bytes. Returns None on error. + """ + frame_size = self.getBitRate() + if not frame_size: + return None + sample_rate = self.getSampleRate() + if not sample_rate: + return None + padding = int(self["use_padding"].value) + + if self["layer"].value == self.LAYER_III: + if self["version"].value == self.MPEG_I: + return (frame_size * 144) // sample_rate + padding + else: + return (frame_size * 72) // sample_rate + padding + elif self["layer"].value == self.LAYER_II: + return (frame_size * 144) / sample_rate + padding + else: # self.LAYER_I: + frame_size = (frame_size * 12) / sample_rate + return (frame_size + padding) * 4 + + def getNbChannel(self): + return self.NB_CHANNEL[ self["channel_mode"].value ] + + def createDescription(self): + info = ["layer %s" % self["layer"].display] + bit_rate = self.getBitRate() + if bit_rate: + info.append("%s/sec" % humanBitSize(bit_rate)) + sampling_rate = self.getSampleRate() + if sampling_rate: + info.append(humanFrequency(sampling_rate)) + return "MPEG-%s %s" % (self["version"].display, ", ".join(info)) + +def findSynchronizeBits(parser, start, max_size): + """ + Find synchronisation bits (11 bits set to 1) + + Returns None on error, or number of bytes before the synchronization. + """ + address0 = parser.absolute_address + end = start + max_size + size = 0 + while start < end: + # Fast search: search 0xFF (first byte of sync frame field) + length = parser.stream.searchBytesLength("\xff", False, start, end) + if length is None: + return None + size += length + start += length * 8 + + # Strong validation of frame: create the frame + # and call method isValid() + try: + frame = createOrphanField(parser, start-address0, Frame, "frame") + valid = frame.isValid() + except HACHOIR_ERRORS: + valid = False + if valid: + return size + + # Invalid frame: continue + start += 8 + size += 1 + return None + +class Frames(FieldSet): + # Padding bytes allowed before a frame + MAX_PADDING = 256 + + def synchronize(self): + addr = self.absolute_address + start = addr + self.current_size + end = min(start + self.MAX_PADDING*8, addr + self.size) + padding = findSynchronizeBits(self, start, end) + if padding is None: + raise ParserError("MPEG audio: Unable to find synchronization bits") + if padding: + return PaddingBytes(self, "padding[]", padding, "Padding before synchronization") + else: + return None + + def looksConstantBitRate(self, count=10): + """ + Guess if frames are constant bit rate. If it returns False, you can + be sure that frames are variable bit rate. Otherwise, it looks like + constant bit rate (on first count fields). + """ + check_keys = ("version", "layer", "bit_rate") + last_field = None + for index, field in enumerate(self.array("frame")): + if last_field: + for key in check_keys: + if field[key].value != last_field[key].value: + return False + last_field = field + if index == count: + break + return True + + def createFields(self): + # Find synchronisation bytes + padding = self.synchronize() + if padding: + yield padding + + while self.current_size < self.size: + yield Frame(self, "frame[]") +# padding = self.synchronize() +# if padding: +# yield padding + + # Read raw bytes at the end (if any) + size = (self.size - self.current_size) / 8 + if size: + yield RawBytes(self, "raw", size) + + def createDescription(self): + if self.looksConstantBitRate(): + text = "(looks like) Constant bit rate (CBR)" + else: + text = "Variable bit rate (VBR)" + return "Frames: %s" % text + +def createMpegAudioMagic(): + + # ID3v1 magic + magics = [("TAG", 0)] + + # ID3v2 magics + for ver_major in ID3v2.VALID_MAJOR_VERSIONS: + magic = "ID3%c\x00" % ver_major + magics.append( (magic,0) ) + + # MPEG frame magic + # TODO: Use longer magic: 32 bits instead of 16 bits + SYNC_BITS = 2047 + for version in Frame.VERSION_NAME.iterkeys(): + for layer in Frame.LAYER_NAME.iterkeys(): + for crc16 in (0, 1): + magic = (SYNC_BITS << 5) | (version << 3) | (layer << 1) | crc16 + magic = long2raw(magic, BIG_ENDIAN, 2) + magics.append( (magic, 0) ) + return magics + +class MpegAudioFile(Parser): + PARSER_TAGS = { + "id": "mpeg_audio", + "category": "audio", + "file_ext": ("mpa", "mp1", "mp2", "mp3"), + "mime": (u"audio/mpeg",), + "min_size": 4*8, +# "magic": createMpegAudioMagic(), + "description": "MPEG audio version 1, 2, 2.5", + "subfile": "skip", + } + endian = BIG_ENDIAN + + def validate(self): + if self[0].name in ("id3v2", "id3v1"): + return True + + if not self.stream.checked: # TODO: is it possible to handle piped input? + return False + + # Validate first 5 frames + for index in xrange(5): + try: + frame = self["frames/frame[%u]" % index] + except MissingField: + # Require a least one valid frame + if (1 <= index) \ + and self["frames"].done: + return True + return "Unable to get frame #%u" % index + except (InputStreamError, ParserError): + return "Unable to create frame #%u" % index + + # Check first frame values + if not frame.isValid(): + return "Frame #%u is invalid" % index + + # Check that all frames are similar + if not index: + frame0 = frame + else: + if frame0["channel_mode"].value != frame["channel_mode"].value: + return "Frame #%u channel mode is different" % index + return True + + def createFields(self): + # Read ID3v2 (if any) + if self.stream.readBytes(0, 3) == "ID3": + yield ID3v2(self, "id3v2") + + if self._size is None: # TODO: is it possible to handle piped input? + raise NotImplementedError + + # Check if file is ending with ID3v1 or not and compute frames size + frames_size = self.size - self.current_size + addr = self.size - 128*8 + if 0 <= addr: + has_id3 = (self.stream.readBytes(addr, 3) == "TAG") + if has_id3: + frames_size -= 128*8 + else: + has_id3 = False + + # Read frames (if any) + if frames_size: + yield Frames(self, "frames", size=frames_size) + + # Read ID3v1 (if any) + if has_id3: + yield ID3v1(self, "id3v1") + + def createDescription(self): + if "frames" in self: + frame = self["frames/frame[0]"] + return "%s, %s" % (frame.description, frame["channel_mode"].display) + elif "id3v2" in self: + return self["id3v2"].description + elif "id3v1" in self: + return self["id3v1"].description + else: + return "MPEG audio" + + def createContentSize(self): + # Get "frames" field + field = self[0] + if field.name != "frames": + try: + field = self[1] + except MissingField: + # File only contains ID3v1 or ID3v2 + return field.size + + # Error: second field are not the frames"? + if field.name != "frames": + return None + + # Go to last frame + frames = field + frame = frames["frame[0]"] + address0 = field.absolute_address + size = address0 + frame.size + while True: + try: + # Parse one MPEG audio frame + frame = createOrphanField(frames, size - address0, Frame, "frame") + + # Check frame 32 bits header + if not frame.isValid(): + break + except HACHOIR_ERRORS: + break + if MAX_FILESIZE < (size + frame.size): + break + size += frame.size + + # ID3v1 at the end? + try: + if self.stream.readBytes(size, 3) == "TAG": + size += ID3v1.static_size + except InputStreamError: + pass + return size + diff --git a/libs/hachoir_parser/audio/real_audio.py b/libs/hachoir_parser/audio/real_audio.py new file mode 100644 index 0000000..289ed6e --- /dev/null +++ b/libs/hachoir_parser/audio/real_audio.py @@ -0,0 +1,90 @@ +""" +RealAudio (.ra) parser + +Author: Mike Melanson +References: + http://wiki.multimedia.cx/index.php?title=RealMedia +Samples: + http://samples.mplayerhq.hu/real/RA/ +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt16, UInt32, + Bytes, RawBytes, String, + PascalString8) +from hachoir_core.tools import humanFrequency +from hachoir_core.text_handler import displayHandler +from hachoir_core.endian import BIG_ENDIAN + +class Metadata(FieldSet): + def createFields(self): + yield PascalString8(self, "title", charset="ISO-8859-1") + yield PascalString8(self, "author", charset="ISO-8859-1") + yield PascalString8(self, "copyright", charset="ISO-8859-1") + yield PascalString8(self, "comment", charset="ISO-8859-1") + +class RealAudioFile(Parser): + MAGIC = ".ra\xFD" + PARSER_TAGS = { + "id": "real_audio", + "category": "audio", + "file_ext": ["ra"], + "mime": (u"audio/x-realaudio", u"audio/x-pn-realaudio"), + "min_size": 6*8, + "magic": ((MAGIC, 0),), + "description": u"Real audio (.ra)", + } + endian = BIG_ENDIAN + + def validate(self): + if self["signature"].value != self.MAGIC: + return "Invalid signature" + if self["version"].value not in (3, 4): + return "Unknown version" + return True + + def createFields(self): + yield Bytes(self, "signature", 4, r"RealAudio identifier ('.ra\xFD')") + yield UInt16(self, "version", "Version") + if self["version"].value == 3: + yield UInt16(self, "header_size", "Header size") + yield RawBytes(self, "Unknown1", 10) + yield UInt32(self, "data_size", "Data size") + yield Metadata(self, "metadata") + yield UInt8(self, "Unknown2") + yield PascalString8(self, "FourCC") + audio_size = self["data_size"].value + else: # version = 4 + yield UInt16(self, "reserved1", "Reserved, should be 0") + yield String(self, "ra4sig", 4, "'.ra4' signature") + yield UInt32(self, "filesize", "File size (minus 40 bytes)") + yield UInt16(self, "version2", "Version 2 (always equal to version)") + yield UInt32(self, "headersize", "Header size (minus 16)") + yield UInt16(self, "codec_flavor", "Codec flavor") + yield UInt32(self, "coded_frame_size", "Coded frame size") + yield RawBytes(self, "unknown1", 12) + yield UInt16(self, "subpacketh", "Subpacket h (?)") + yield UInt16(self, "frame_size", "Frame size") + yield UInt16(self, "sub_packet_size", "Subpacket size") + yield UInt16(self, "unknown2", "Unknown") + yield displayHandler(UInt16(self, "sample_rate", "Sample rate"), humanFrequency) + yield UInt16(self, "unknown3", "Unknown") + yield UInt16(self, "sample_size", "Sample size") + yield UInt16(self, "channels", "Channels") + yield PascalString8(self, "Interleaving ID String") + yield PascalString8(self, "FourCC") + yield RawBytes(self, "unknown4", 3) + yield Metadata(self, "metadata") + audio_size = (self["filesize"].value + 40) - (self["headersize"].value + 16) + if 0 < audio_size: + yield RawBytes(self, "audio_data", audio_size) + + def createDescription(self): + if (self["version"].value == 3): + return "RealAudio v3 file, '%s' codec" % self["FourCC"].value + elif (self["version"].value == 4): + return "RealAudio v4 file, '%s' codec, %s, %u channels" % ( + self["FourCC"].value, self["sample_rate"].display, self["channels"].value) + else: + return "Real audio" diff --git a/libs/hachoir_parser/audio/s3m.py b/libs/hachoir_parser/audio/s3m.py new file mode 100644 index 0000000..1b2a732 --- /dev/null +++ b/libs/hachoir_parser/audio/s3m.py @@ -0,0 +1,668 @@ +""" +The ScreamTracker 3.0x module format description for .s3m files. + +Documents: +- Search s3m on Wotsit + http://www.wotsit.org/ + +Author: Christophe GISQUET +Creation: 11th February 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, Field, + Bit, Bits, + UInt32, UInt16, UInt8, Enum, + PaddingBytes, RawBytes, NullBytes, + String, GenericVector, ParserError) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import alignValue + +class Chunk: + def __init__(self, cls, name, offset, size, *args): + # Todo: swap and have None=unknown instead of now: 0=unknown + assert size != None and size>=0 + self.cls = cls + self.name = name + self.offset = offset + self.size = size + self.args = args + +class ChunkIndexer: + def __init__(self): + self.chunks = [ ] + + # Check if a chunk fits + def canHouse(self, chunk, index): + if index > 1: + if chunk.offset + chunk.size > self.chunks[index-1].offset: + return False + # We could test now that it fits in the memory + return True + + # Farthest element is last + def addChunk(self, new_chunk): + index = 0 + # Find first chunk whose value is bigger + while index < len(self.chunks): + offset = self.chunks[index].offset + if offset < new_chunk.offset: + if not self.canHouse(new_chunk, index): + raise ParserError("Chunk '%s' doesn't fit!" % new_chunk.name) + self.chunks.insert(index, new_chunk) + return + index += 1 + + # Not found or empty + # We could at least check that it fits in the memory + self.chunks.append(new_chunk) + + def yieldChunks(self, obj): + while len(self.chunks) > 0: + chunk = self.chunks.pop() + current_pos = obj.current_size//8 + + # Check if padding needed + size = chunk.offset - current_pos + if size > 0: + obj.info("Padding of %u bytes needed: curr=%u offset=%u" % \ + (size, current_pos, chunk.offset)) + yield PaddingBytes(obj, "padding[]", size) + current_pos = obj.current_size//8 + + # Find resynch point if needed + count = 0 + old_off = chunk.offset + while chunk.offset < current_pos: + count += 1 + chunk = self.chunks.pop() + # Unfortunaly, we also pass the underlying chunks + if chunk == None: + obj.info("Couldn't resynch: %u object skipped to reach %u" % \ + (count, current_pos)) + return + + # Resynch + size = chunk.offset-current_pos + if size > 0: + obj.info("Skipped %u objects to resynch to %u; chunk offset: %u->%u" % \ + (count, current_pos, old_off, chunk.offset)) + yield RawBytes(obj, "resynch[]", size) + + # Yield + obj.info("Yielding element of size %u at offset %u" % \ + (chunk.size, chunk.offset)) + field = chunk.cls(obj, chunk.name, chunk.size, *chunk.args) + # Not tested, probably wrong: + #if chunk.size: field.static_size = 8*chunk.size + yield field + + if hasattr(field, "getSubChunks"): + for sub_chunk in field.getSubChunks(): + obj.info("Adding sub chunk: position=%u size=%u name='%s'" % \ + (sub_chunk.offset, sub_chunk.size, sub_chunk.name)) + self.addChunk(sub_chunk) + + # Let missing padding be done by next chunk + +class S3MFlags(StaticFieldSet): + format = ( + (Bit, "st2_vibrato", "Vibrato (File version 1/ScreamTrack 2)"), + (Bit, "st2_tempo", "Tempo (File version 1/ScreamTrack 2)"), + (Bit, "amiga_slides", "Amiga slides (File version 1/ScreamTrack 2)"), + (Bit, "zero_vol_opt", "Automatically turn off looping notes whose volume is zero for >2 note rows"), + (Bit, "amiga_limits", "Disallow notes beyond Amiga hardware specs"), + (Bit, "sb_processing", "Enable filter/SFX with SoundBlaster"), + (Bit, "vol_slide", "Volume slide also performed on first row"), + (Bit, "extended", "Special custom data in file"), + (Bits, "unused[]", 8) + ) + +def parseChannelType(val): + val = val.value + if val<8: + return "Left Sample Channel %u" % val + if val<16: + return "Right Sample Channel %u" % (val-8) + if val<32: + return "Adlib channel %u" % (val-16) + return "Value %u unknown" % val + +class ChannelSettings(FieldSet): + static_size = 8 + def createFields(self): + yield textHandler(Bits(self, "type", 7), parseChannelType) + yield Bit(self, "enabled") + +class ChannelPanning(FieldSet): + static_size = 8 + def createFields(self): + yield Bits(self, "default_position", 4, "Default pan position") + yield Bit(self, "reserved[]") + yield Bit(self, "use_default", "Bits 0:3 specify default position") + yield Bits(self, "reserved[]", 2) + +# Provide an automatic constructor +class SizeFieldSet(FieldSet): + """ + Provide an automatic constructor for a sized field that can be aligned + on byte positions according to ALIGN. + + Size is ignored if static_size is set. Real size is stored + for convenience, but beware, it is not in bits, but in bytes. + + Field can be automatically padded, unless: + - size is 0 (unknown, so padding doesn't make sense) + - it shouldn't be aligned + + If it shouldn't be aligned, two solutions: + - change _size to another value than the one found through aligment. + - derive a class with ALIGN = 0. + """ + ALIGN = 16 + def __init__(self, parent, name, size, desc=None): + FieldSet.__init__(self, parent, name, desc) + if size: + self.real_size = size + if self.static_size == None: + self.setCheckedSizes(size) + + def setCheckedSizes(self, size): + # First set size so that end is aligned, if needed + self.real_size = size + size *= 8 + if self.ALIGN: + size = alignValue(self.absolute_address+size, 8*self.ALIGN) \ + - self.absolute_address + + if self._parent._size: + if self._parent.current_size + size > self._parent._size: + size = self._parent._size - self._parent.current_size + + self._size = size + + def createFields(self): + for field in self.createUnpaddedFields(): + yield field + size = (self._size - self.current_size)//8 + if size > 0: + yield PaddingBytes(self, "padding", size) + +class Header(SizeFieldSet): + def createDescription(self): + return "%s (%u patterns, %u instruments)" % \ + (self["title"].value, self["num_patterns"].value, + self["num_instruments"].value) + + def createValue(self): + return self["title"].value + + # Header fields may have to be padded - specify static_size + # or modify _size in a derived class if never. + def createUnpaddedFields(self): + yield String(self, "title", 28, strip='\0') + yield textHandler(UInt8(self, "marker[]"), hexadecimal) + for field in self.getFileVersionField(): + yield field + + yield UInt16(self, "num_orders") + yield UInt16(self, "num_instruments") + yield UInt16(self, "num_patterns") + + for field in self.getFirstProperties(): + yield field + yield String(self, "marker[]", 4) + for field in self.getLastProperties(): + yield field + + yield GenericVector(self, "channel_settings", 32, + ChannelSettings, "channel") + + # Orders + yield GenericVector(self, "orders", self.getNumOrders(), UInt8, "order") + + for field in self.getHeaderEndFields(): + yield field + +class S3MHeader(Header): + """ + 0 1 2 3 4 5 6 7 8 9 A B C D E F + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0000: | Song name, max 28 chars (end with NUL (0)) | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0010: | |1Ah|Typ| x | x | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0020: |OrdNum |InsNum |PatNum | Flags | Cwt/v | Ffi |'S'|'C'|'R'|'M'| + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0030: |g.v|i.s|i.t|m.v|u.c|d.p| x | x | x | x | x | x | x | x |Special| + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0040: |Channel settings for 32 channels, 255=unused,+128=disabled | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0050: | | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0060: |Orders; length=OrdNum (should be even) | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + xxx1: |Parapointers to instruments; length=InsNum*2 | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + xxx2: |Parapointers to patterns; length=PatNum*2 | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + xxx3: |Channel default pan positions | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + xxx1=70h+orders + xxx2=70h+orders+instruments*2 + xxx3=70h+orders+instruments*2+patterns*2 + """ + def __init__(self, parent, name, size, desc=None): + Header.__init__(self, parent, name, size, desc) + + # Overwrite real_size + size = 0x60 + self["num_orders"].value + \ + 2*(self["num_instruments"].value + self["num_patterns"].value) + if self["panning_info"].value == 252: + size += 32 + + # Deduce size for SizeFieldSet + self.setCheckedSizes(size) + + def getFileVersionField(self): + yield UInt8(self, "type") + yield RawBytes(self, "reserved[]", 2) + + def getFirstProperties(self): + yield S3MFlags(self, "flags") + yield UInt8(self, "creation_version_minor") + yield Bits(self, "creation_version_major", 4) + yield Bits(self, "creation_version_unknown", 4, "(=1)") + yield UInt16(self, "format_version") + + def getLastProperties(self): + yield UInt8(self, "glob_vol", "Global volume") + yield UInt8(self, "init_speed", "Initial speed (command A)") + yield UInt8(self, "init_tempo", "Initial tempo (command T)") + yield Bits(self, "volume", 7) + yield Bit(self, "stereo") + yield UInt8(self, "click_removal", "Number of GUS channels to run to prevent clicks") + yield UInt8(self, "panning_info") + yield RawBytes(self, "reserved[]", 8) + yield UInt16(self, "custom_data_parapointer", + "Parapointer to special custom data (not used by ST3.01)") + + def getNumOrders(self): return self["num_orders"].value + + def getHeaderEndFields(self): + instr = self["num_instruments"].value + patterns = self["num_patterns"].value + # File pointers + if instr > 0: + yield GenericVector(self, "instr_pptr", instr, UInt16, "offset") + if patterns > 0: + yield GenericVector(self, "pattern_pptr", patterns, UInt16, "offset") + + # S3M 3.20 extension + if self["creation_version_major"].value >= 3 \ + and self["creation_version_minor"].value >= 0x20 \ + and self["panning_info"].value == 252: + yield GenericVector(self, "channel_panning", 32, ChannelPanning, "channel") + + # Padding required for 16B alignment + size = self._size - self.current_size + if size > 0: + yield PaddingBytes(self, "padding", size//8) + + def getSubChunks(self): + # Instruments - no warranty that they are concatenated + for index in xrange(self["num_instruments"].value): + yield Chunk(S3MInstrument, "instrument[]", + 16*self["instr_pptr/offset[%u]" % index].value, + S3MInstrument.static_size//8) + + # Patterns - size unknown but listed in their headers + for index in xrange(self["num_patterns"].value): + yield Chunk(S3MPattern, "pattern[]", + 16*self["pattern_pptr/offset[%u]" % index].value, 0) + +class PTMHeader(Header): + # static_size should prime over _size, right? + static_size = 8*608 + + def getTrackerVersion(val): + val = val.value + return "ProTracker x%04X" % val + + def getFileVersionField(self): + yield UInt16(self, "type") + yield RawBytes(self, "reserved[]", 1) + + def getFirstProperties(self): + yield UInt16(self, "channels") + yield UInt16(self, "flags") # 0 => NullBytes + yield UInt16(self, "reserved[]") + + def getLastProperties(self): + yield RawBytes(self, "reserved[]", 16) + + def getNumOrders(self): return 256 + + def getHeaderEndFields(self): + yield GenericVector(self, "pattern_pptr", 128, UInt16, "offset") + + def getSubChunks(self): + # It goes like this in the BS: patterns->instruments->instr. samples + + if self._parent._size: + min_off = self.absolute_address+self._parent._size + else: + min_off = 99999999999 + + # Instruments and minimal end position for last pattern + count = self["num_instruments"].value + addr = self.absolute_address + for index in xrange(count): + offset = (self.static_size+index*PTMInstrument.static_size)//8 + yield Chunk(PTMInstrument, "instrument[]", offset, + PTMInstrument.static_size//8) + offset = self.stream.readBits(addr+8*(offset+18), 32, LITTLE_ENDIAN) + min_off = min(min_off, offset) + + # Patterns + count = self["num_patterns"].value + prev_off = 16*self["pattern_pptr/offset[0]"].value + for index in range(1, count): + offset = 16*self["pattern_pptr/offset[%u]" % index].value + yield Chunk(PTMPattern, "pattern[]", prev_off, offset-prev_off) + prev_off = offset + + # Difficult to account for + yield Chunk(PTMPattern, "pattern[]", prev_off, min_off-prev_off) + +class SampleFlags(StaticFieldSet): + format = ( + (Bit, "loop_on"), + (Bit, "stereo", "Sample size will be 2*length"), + (Bit, "16bits", "16b sample, Intel LO-HI byteorder"), + (Bits, "unused", 5) + ) + +class S3MUInt24(Field): + static_size = 24 + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, size=24, description=desc) + addr = self.absolute_address + val = parent.stream.readBits(addr, 8, LITTLE_ENDIAN) << 20 + val += parent.stream.readBits(addr+8, 16, LITTLE_ENDIAN) << 4 + self.createValue = lambda: val + +class SampleData(SizeFieldSet): + def createUnpaddedFields(self): + yield RawBytes(self, "data", self.real_size) +class PTMSampleData(SampleData): + ALIGN = 0 + +class Instrument(SizeFieldSet): + static_size = 8*0x50 + + def createDescription(self): + info = [self["c4_speed"].display] + if "flags/stereo" in self: + if self["flags/stereo"].value: + info.append("stereo") + else: + info.append("mono") + info.append("%u bits" % self.getSampleBits()) + return ", ".join(info) + + # Structure knows its size and doesn't need padding anyway, so + # overwrite base member: no need to go through it. + def createFields(self): + yield self.getType() + yield String(self, "filename", 12, strip='\0') + + for field in self.getInstrumentFields(): + yield field + + yield String(self, "name", 28, strip='\0') + yield String(self, "marker", 4, "Either 'SCRS' or '(empty)'", strip='\0') + + def createValue(self): + return self["name"].value + +class S3MInstrument(Instrument): + """ + In fact a sample. Description follows: + + 0 1 2 3 4 5 6 7 8 9 A B C D E F + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0000: |[T]| Dos filename (12345678.ABC) | MemSeg | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0010: |Length |HI:leng|LoopBeg|HI:LBeg|LoopEnd|HI:Lend|Vol| x |[P]|[F]| + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0020: |C2Spd |HI:C2sp| x | x | x | x |Int:Gp |Int:512|Int:lastused | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0030: | Sample name, 28 characters max... (incl. NUL) | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0040: | ...sample name... |'S'|'C'|'R'|'S'| + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + xxxx: sampledata + """ + MAGIC = "SCRS" + PACKING = {0: "Unpacked", 1: "DP30ADPCM" } + TYPE = {0: "Unknown", 1: "Sample", 2: "adlib melody", 3: "adlib drum2" } + + def getType(self): + return Enum(UInt8(self, "type"), self.TYPE) + + def getSampleBits(self): + return 8*(1+self["flags/16bits"].value) + + def getInstrumentFields(self): + yield S3MUInt24(self, "sample_offset") + yield UInt32(self, "sample_size") + yield UInt32(self, "loop_begin") + yield UInt32(self, "loop_end") + yield UInt8(self, "volume") + yield UInt8(self, "reserved[]") + yield Enum(UInt8(self, "packing"), self.PACKING) + yield SampleFlags(self, "flags") + yield UInt32(self, "c4_speed", "Frequency for middle C note") + yield UInt32(self, "reserved[]", 4) + yield UInt16(self, "internal[]", "Sample address in GUS memory") + yield UInt16(self, "internal[]", "Flags for SoundBlaster loop expansion") + yield UInt32(self, "internal[]", "Last used position (SB)") + + def getSubChunks(self): + size = self["sample_size"].value + if self["flags/stereo"].value: size *= 2 + if self["flags/16bits"].value: size *= 2 + yield Chunk(SampleData, "sample_data[]", + self["sample_offset"].value, size) + + +class PTMType(FieldSet): + TYPES = {0: "No sample", 1: "Regular", 2: "OPL2/OPL2 instrument", 3: "MIDI instrument" } + static_size = 8 + def createFields(self): + yield Bits(self, "unused", 2) + yield Bit(self, "is_tonable") + yield Bit(self, "16bits") + yield Bit(self, "loop_bidir") + yield Bit(self, "loop") + yield Enum(Bits(self, "origin", 2), self.TYPES) + +##class PTMType(StaticFieldSet): +## format = ( +## (Bits, "unused", 2), +## (Bit, "is_tonable"), +## (Bit, "16bits"), +## (Bit, "loop_bidir"), +## (Bit, "loop"), +## (Bits, "origin", 2), +## ) + +class PTMInstrument(Instrument): + MAGIC = "PTMI" + ALIGN = 0 + + def getType(self): + return PTMType(self, "flags") # Hack to have more common code + + # PTM doesn't pretend to manage 16bits + def getSampleBits(self): + return 8 + + def getInstrumentFields(self): + yield UInt8(self, "volume") + yield UInt16(self, "c4_speed") + yield UInt16(self, "sample_segment") + yield UInt32(self, "sample_offset") + yield UInt32(self, "sample_size") + yield UInt32(self, "loop_begin") + yield UInt32(self, "loop_end") + yield UInt32(self, "gus_begin") + yield UInt32(self, "gus_loop_start") + yield UInt32(self, "gus_loop_end") + yield textHandler(UInt8(self, "gus_loop_flags"), hexadecimal) + yield UInt8(self, "reserved[]") # Should be 0 + + def getSubChunks(self): + # Samples are NOT padded, and the size is already the correct one + size = self["sample_size"].value + if size: + yield Chunk(PTMSampleData, "sample_data[]", self["sample_offset"].value, size) + + +class S3MNoteInfo(StaticFieldSet): + """ +0=end of row +&31=channel +&32=follows; BYTE:note, BYTE:instrument +&64=follows; BYTE:volume +&128=follows; BYTE:command, BYTE:info + """ + format = ( + (Bits, "channel", 5), + (Bit, "has_note"), + (Bit, "has_volume"), + (Bit, "has_effect") + ) + +class PTMNoteInfo(StaticFieldSet): + format = ( + (Bits, "channel", 5), + (Bit, "has_note"), + (Bit, "has_effect"), + (Bit, "has_volume") + ) + +class Note(FieldSet): + def createFields(self): + # Used by Row to check if end of Row + info = self.NOTE_INFO(self, "info") + yield info + if info["has_note"].value: + yield UInt8(self, "note") + yield UInt8(self, "instrument") + if info["has_volume"].value: + yield UInt8(self, "volume") + if info["has_effect"].value: + yield UInt8(self, "effect") + yield UInt8(self, "param") + +class S3MNote(Note): + NOTE_INFO = S3MNoteInfo +class PTMNote(Note): + NOTE_INFO = PTMNoteInfo + +class Row(FieldSet): + def createFields(self): + addr = self.absolute_address + while True: + # Check empty note + byte = self.stream.readBits(addr, 8, self.endian) + if not byte: + yield NullBytes(self, "terminator", 1) + return + + note = self.NOTE(self, "note[]") + yield note + addr += note.size + +class S3MRow(Row): + NOTE = S3MNote +class PTMRow(Row): + NOTE = PTMNote + +class Pattern(SizeFieldSet): + def createUnpaddedFields(self): + count = 0 + while count < 64 and not self.eof: + yield self.ROW(self, "row[]") + count += 1 + +class S3MPattern(Pattern): + ROW = S3MRow + def __init__(self, parent, name, size, desc=None): + Pattern.__init__(self, parent, name, size, desc) + + # Get real_size from header + addr = self.absolute_address + size = self.stream.readBits(addr, 16, LITTLE_ENDIAN) + self.setCheckedSizes(size) + +class PTMPattern(Pattern): + ROW = PTMRow + +class Module(Parser): + # MARKER / HEADER are defined in derived classes + endian = LITTLE_ENDIAN + + def validate(self): + marker = self.stream.readBits(0x1C*8, 8, LITTLE_ENDIAN) + if marker != 0x1A: + return "Invalid start marker %u" % marker + marker = self.stream.readBytes(0x2C*8, 4) + if marker != self.MARKER: + return "Invalid marker %s!=%s" % (marker, self.MARKER) + return True + + def createFields(self): + # Index chunks + indexer = ChunkIndexer() + # Add header - at least 0x50 bytes + indexer.addChunk(Chunk(self.HEADER, "header", 0, 0x50)) + for field in indexer.yieldChunks(self): + yield field + + +class S3MModule(Module): + PARSER_TAGS = { + "id": "s3m", + "category": "audio", + "file_ext": ("s3m",), + "mime": (u'audio/s3m', u'audio/x-s3m'), + "min_size": 64*8, + "description": "ScreamTracker3 module" + } + MARKER = "SCRM" + HEADER = S3MHeader + +## def createContentSize(self): +## hdr = Header(self, "header") +## max_offset = hdr._size//8 + +## instr_size = Instrument._size//8 +## for index in xrange(self["header/num_instruments"].value): +## offset = 16*hdr["instr_pptr/offset[%u]" % index].value +## max_offset = max(offset+instr_size, max_offset) +## addr = self.absolute_address + 8*offset + +class PTMModule(Module): + PARSER_TAGS = { + "id": "ptm", + "category": "audio", + "file_ext": ("ptm",), + "min_size": 64*8, + "description": "PolyTracker module (v1.17)" + } + MARKER = "PTMF" + HEADER = PTMHeader diff --git a/libs/hachoir_parser/audio/xm.py b/libs/hachoir_parser/audio/xm.py new file mode 100644 index 0000000..0b13b41 --- /dev/null +++ b/libs/hachoir_parser/audio/xm.py @@ -0,0 +1,390 @@ +""" +Parser of FastTrackerII Extended Module (XM) version 1.4 + +Documents: +- Modplug source code (file modplug/soundlib/Load_xm.cpp) + http://sourceforge.net/projects/modplug +- Dumb source code (files include/dumb.h and src/it/readxm.c + http://dumb.sf.net/ +- Documents of "XM" format on Wotsit + http://www.wotsit.org + +Author: Christophe GISQUET +Creation: 8th February 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, + Bit, RawBits, Bits, + UInt32, UInt16, UInt8, Int8, Enum, + RawBytes, String, GenericVector) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal +from hachoir_parser.audio.modplug import ParseModplugMetadata +from hachoir_parser.common.tracker import NOTE_NAME + +def parseSigned(val): + return "%i" % (val.value-128) + +# From dumb +SEMITONE_BASE = 1.059463094359295309843105314939748495817 +PITCH_BASE = 1.000225659305069791926712241547647863626 + +SAMPLE_LOOP_MODE = ("No loop", "Forward loop", "Ping-pong loop", "Undef") + +class SampleType(FieldSet): + static_size = 8 + def createFields(self): + yield Bits(self, "unused[]", 4) + yield Bit(self, "16bits") + yield Bits(self, "unused[]", 1) + yield Enum(Bits(self, "loop_mode", 2), SAMPLE_LOOP_MODE) + +class SampleHeader(FieldSet): + static_size = 40*8 + def createFields(self): + yield UInt32(self, "length") + yield UInt32(self, "loop_start") + yield UInt32(self, "loop_end") + yield UInt8(self, "volume") + yield Int8(self, "fine_tune") + yield SampleType(self, "type") + yield UInt8(self, "panning") + yield Int8(self, "relative_note") + yield UInt8(self, "reserved") + yield String(self, "name", 22, charset="ASCII", strip=' \0') + + def createValue(self): + bytes = 1+self["type/16bits"].value + C5_speed = int(16726.0*pow(SEMITONE_BASE, self["relative_note"].value) + *pow(PITCH_BASE, self["fine_tune"].value*2)) + return "%s, %ubits, %u samples, %uHz" % \ + (self["name"].display, 8*bytes, self["length"].value/bytes, C5_speed) + +class StuffType(StaticFieldSet): + format = ( + (Bits, "unused", 5), + (Bit, "loop"), + (Bit, "sustain"), + (Bit, "on") + ) + +class InstrumentSecondHeader(FieldSet): + static_size = 234*8 + def createFields(self): + yield UInt32(self, "sample_header_size") + yield GenericVector(self, "notes", 96, UInt8, "sample") + yield GenericVector(self, "volume_envelope", 24, UInt16, "point") + yield GenericVector(self, "panning_envelope", 24, UInt16, "point") + yield UInt8(self, "volume_points", r"Number of volume points") + yield UInt8(self, "panning_points", r"Number of panning points") + yield UInt8(self, "volume_sustain_point") + yield UInt8(self, "volume_loop_start_point") + yield UInt8(self, "volume_loop_end_point") + yield UInt8(self, "panning_sustain_point") + yield UInt8(self, "panning_loop_start_point") + yield UInt8(self, "panning_loop_end_point") + yield StuffType(self, "volume_type") + yield StuffType(self, "panning_type") + yield UInt8(self, "vibrato_type") + yield UInt8(self, "vibrato_sweep") + yield UInt8(self, "vibrato_depth") + yield UInt8(self, "vibrato_rate") + yield UInt16(self, "volume_fadeout") + yield GenericVector(self, "reserved", 11, UInt16, "word") + +def createInstrumentContentSize(s, addr): + start = addr + samples = s.stream.readBits(addr+27*8, 16, LITTLE_ENDIAN) + # Seek to end of header (1st + 2nd part) + addr += 8*s.stream.readBits(addr, 32, LITTLE_ENDIAN) + + sample_size = 0 + if samples: + for index in xrange(samples): + # Read the sample size from the header + sample_size += s.stream.readBits(addr, 32, LITTLE_ENDIAN) + # Seek to next sample header + addr += SampleHeader.static_size + + return addr - start + 8*sample_size + +class Instrument(FieldSet): + def __init__(self, parent, name): + FieldSet.__init__(self, parent, name) + self._size = createInstrumentContentSize(self, self.absolute_address) + self.info(self.createDescription()) + + # Seems to fix things... + def fixInstrumentHeader(self): + size = self["size"].value - self.current_size//8 + if size: + yield RawBytes(self, "unknown_data", size) + + def createFields(self): + yield UInt32(self, "size") + yield String(self, "name", 22, charset="ASCII", strip=" \0") + # Doc says type is always 0, but I've found values of 24 and 96 for + # the _same_ song here, just different download sources for the file + yield UInt8(self, "type") + yield UInt16(self, "samples") + num = self["samples"].value + self.info(self.createDescription()) + + if num: + yield InstrumentSecondHeader(self, "second_header") + + for field in self.fixInstrumentHeader(): + yield field + + # This part probably wrong + sample_size = [ ] + for index in xrange(num): + sample = SampleHeader(self, "sample_header[]") + yield sample + sample_size.append(sample["length"].value) + + for size in sample_size: + if size: + yield RawBytes(self, "sample_data[]", size, "Deltas") + else: + for field in self.fixInstrumentHeader(): + yield field + + def createDescription(self): + return "Instrument '%s': %i samples, header %i bytes" % \ + (self["name"].value, self["samples"].value, self["size"].value) + +VOLUME_NAME = ( + "Volume slide down", "Volume slide up", "Fine volume slide down", + "Fine volume slide up", "Set vibrato speed", "Vibrato", + "Set panning", "Panning slide left", "Panning slide right", + "Tone porta", "Unhandled") + +def parseVolume(val): + val = val.value + if 0x10<=val<=0x50: + return "Volume %i" % val-16 + else: + return VOLUME_NAME[val/16 - 6] + +class RealBit(RawBits): + static_size = 1 + + def __init__(self, parent, name, description=None): + RawBits.__init__(self, parent, name, 1, description=description) + + def createValue(self): + return self._parent.stream.readBits(self.absolute_address, 1, BIG_ENDIAN) + +class NoteInfo(StaticFieldSet): + format = ( + (RawBits, "unused", 2), + (RealBit, "has_parameter"), + (RealBit, "has_type"), + (RealBit, "has_volume"), + (RealBit, "has_instrument"), + (RealBit, "has_note") + ) + +EFFECT_NAME = ( + "Arppegio", "Porta up", "Porta down", "Tone porta", "Vibrato", + "Tone porta+Volume slide", "Vibrato+Volume slide", "Tremolo", + "Set panning", "Sample offset", "Volume slide", "Position jump", + "Set volume", "Pattern break", None, "Set tempo/BPM", + "Set global volume", "Global volume slide", "Unused", "Unused", + "Unused", "Set envelope position", "Unused", "Unused", + "Panning slide", "Unused", "Multi retrig note", "Unused", + "Tremor", "Unused", "Unused", "Unused", None) + +EFFECT_E_NAME = ( + "Unknown", "Fine porta up", "Fine porta down", + "Set gliss control", "Set vibrato control", "Set finetune", + "Set loop begin/loop", "Set tremolo control", "Retrig note", + "Fine volume slide up", "Fine volume slide down", "Note cut", + "Note delay", "Pattern delay") + +class Effect(RawBits): + def __init__(self, parent, name): + RawBits.__init__(self, parent, name, 8) + + def createValue(self): + t = self.parent.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN) + param = self.parent.stream.readBits(self.absolute_address+8, 8, LITTLE_ENDIAN) + if t == 0x0E: + return EFFECT_E_NAME[param>>4] + " %i" % (param&0x07) + elif t == 0x21: + return ("Extra fine porta up", "Extra fine porta down")[param>>4] + else: + return EFFECT_NAME[t] + +class Note(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.flags = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN) + if self.flags&0x80: + # TODO: optimize bitcounting with a table: + # http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetTable + self._size = 8 + if self.flags&0x01: self._size += 8 + if self.flags&0x02: self._size += 8 + if self.flags&0x04: self._size += 8 + if self.flags&0x08: self._size += 8 + if self.flags&0x10: self._size += 8 + else: + self._size = 5*8 + + def createFields(self): + # This stupid shit gets the LSB, not the MSB... + self.info("Note info: 0x%02X" % + self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)) + yield RealBit(self, "is_extended") + if self["is_extended"].value: + info = NoteInfo(self, "info") + yield info + if info["has_note"].value: + yield Enum(UInt8(self, "note"), NOTE_NAME) + if info["has_instrument"].value: + yield UInt8(self, "instrument") + if info["has_volume"].value: + yield textHandler(UInt8(self, "volume"), parseVolume) + if info["has_type"].value: + yield Effect(self, "effect_type") + if info["has_parameter"].value: + yield textHandler(UInt8(self, "effect_parameter"), hexadecimal) + else: + yield Enum(Bits(self, "note", 7), NOTE_NAME) + yield UInt8(self, "instrument") + yield textHandler(UInt8(self, "volume"), parseVolume) + yield Effect(self, "effect_type") + yield textHandler(UInt8(self, "effect_parameter"), hexadecimal) + + def createDescription(self): + if "info" in self: + info = self["info"] + desc = [] + if info["has_note"].value: + desc.append(self["note"].display) + if info["has_instrument"].value: + desc.append("instrument %i" % self["instrument"].value) + if info["has_volume"].value: + desc.append(self["has_volume"].display) + if info["has_type"].value: + desc.append("effect %s" % self["effect_type"].value) + if info["has_parameter"].value: + desc.append("parameter %i" % self["effect_parameter"].value) + else: + desc = (self["note"].display, "instrument %i" % self["instrument"].value, + self["has_volume"].display, "effect %s" % self["effect_type"].value, + "parameter %i" % self["effect_parameter"].value) + if desc: + return "Note %s" % ", ".join(desc) + else: + return "Note" + +class Row(FieldSet): + def createFields(self): + for index in xrange(self["/header/channels"].value): + yield Note(self, "note[]") + +def createPatternContentSize(s, addr): + return 8*(s.stream.readBits(addr, 32, LITTLE_ENDIAN) + + s.stream.readBits(addr+7*8, 16, LITTLE_ENDIAN)) + +class Pattern(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self._size = createPatternContentSize(self, self.absolute_address) + + def createFields(self): + yield UInt32(self, "header_size", r"Header length (9)") + yield UInt8(self, "packing_type", r"Packing type (always 0)") + yield UInt16(self, "rows", r"Number of rows in pattern (1..256)") + yield UInt16(self, "data_size", r"Packed patterndata size") + rows = self["rows"].value + self.info("Pattern: %i rows" % rows) + for index in xrange(rows): + yield Row(self, "row[]") + + def createDescription(self): + return "Pattern with %i rows" % self["rows"].value + +class Header(FieldSet): + MAGIC = "Extended Module: " + static_size = 336*8 + + def createFields(self): + yield String(self, "signature", 17, "XM signature", charset="ASCII") + yield String(self, "title", 20, "XM title", charset="ASCII", strip=' ') + yield UInt8(self, "marker", "Marker (0x1A)") + yield String(self, "tracker_name", 20, "XM tracker name", charset="ASCII", strip=' ') + yield UInt8(self, "format_minor") + yield UInt8(self, "format_major") + yield filesizeHandler(UInt32(self, "header_size", "Header size (276)")) + yield UInt16(self, "song_length", "Length in patten order table") + yield UInt16(self, "restart", "Restart position") + yield UInt16(self, "channels", "Number of channels (2,4,6,8,10,...,32)") + yield UInt16(self, "patterns", "Number of patterns (max 256)") + yield UInt16(self, "instruments", "Number of instruments (max 128)") + yield Bit(self, "amiga_ftable", "Amiga frequency table") + yield Bit(self, "linear_ftable", "Linear frequency table") + yield Bits(self, "unused", 14) + yield UInt16(self, "tempo", "Default tempo") + yield UInt16(self, "bpm", "Default BPM") + yield GenericVector(self, "pattern_order", 256, UInt8, "order") + + def createDescription(self): + return "'%s' by '%s'" % ( + self["title"].value, self["tracker_name"].value) + +class XMModule(Parser): + PARSER_TAGS = { + "id": "fasttracker2", + "category": "audio", + "file_ext": ("xm",), + "mime": ( + u'audio/xm', u'audio/x-xm', + u'audio/module-xm', u'audio/mod', u'audio/x-mod'), + "magic": ((Header.MAGIC, 0),), + "min_size": Header.static_size +29*8, # Header + 1 empty instrument + "description": "FastTracker2 module" + } + endian = LITTLE_ENDIAN + + def validate(self): + header = self.stream.readBytes(0, 17) + if header != Header.MAGIC: + return "Invalid signature '%s'" % header + if self["/header/header_size"].value != 276: + return "Unknown header size (%u)" % self["/header/header_size"].value + return True + + def createFields(self): + yield Header(self, "header") + for index in xrange(self["/header/patterns"].value): + yield Pattern(self, "pattern[]") + for index in xrange(self["/header/instruments"].value): + yield Instrument(self, "instrument[]") + + # Metadata added by ModPlug - can be discarded + for field in ParseModplugMetadata(self): + yield field + + def createContentSize(self): + # Header size + size = Header.static_size + + # Add patterns size + for index in xrange(self["/header/patterns"].value): + size += createPatternContentSize(self, size) + + # Add instruments size + for index in xrange(self["/header/instruments"].value): + size += createInstrumentContentSize(self, size) + + # Not reporting Modplug metadata + return size + + def createDescription(self): + return self["header"].description + diff --git a/libs/hachoir_parser/common/__init__.py b/libs/hachoir_parser/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/hachoir_parser/common/deflate.py b/libs/hachoir_parser/common/deflate.py new file mode 100644 index 0000000..8aa8e51 --- /dev/null +++ b/libs/hachoir_parser/common/deflate.py @@ -0,0 +1,33 @@ +from hachoir_core.field import CompressedField + +try: + from zlib import decompressobj, MAX_WBITS + + class DeflateStream: + def __init__(self, stream, wbits=None): + if wbits: + self.gzip = decompressobj(-MAX_WBITS) + else: + self.gzip = decompressobj() + + def __call__(self, size, data=None): + if data is None: + data = '' + return self.gzip.decompress(self.gzip.unconsumed_tail+data, size) + + class DeflateStreamWbits(DeflateStream): + def __init__(self, stream): + DeflateStream.__init__(self, stream, True) + + def Deflate(field, wbits=True): + if wbits: + CompressedField(field, DeflateStreamWbits) + else: + CompressedField(field, DeflateStream) + return field + has_deflate = True +except ImportError: + def Deflate(field, wbits=True): + return field + has_deflate = False + diff --git a/libs/hachoir_parser/common/msdos.py b/libs/hachoir_parser/common/msdos.py new file mode 100644 index 0000000..addd149 --- /dev/null +++ b/libs/hachoir_parser/common/msdos.py @@ -0,0 +1,62 @@ +""" +MS-DOS structures. + +Documentation: +- File attributes: + http://www.cs.colorado.edu/~main/cs1300/include/ddk/winddk.h +""" + +from hachoir_core.field import StaticFieldSet +from hachoir_core.field import Bit, NullBits + +_FIELDS = ( + (Bit, "read_only"), + (Bit, "hidden"), + (Bit, "system"), + (NullBits, "reserved[]", 1), + (Bit, "directory"), + (Bit, "archive"), + (Bit, "device"), + (Bit, "normal"), + (Bit, "temporary"), + (Bit, "sparse_file"), + (Bit, "reparse_file"), + (Bit, "compressed"), + (Bit, "offline"), + (Bit, "dont_index_content"), + (Bit, "encrypted"), +) + +class MSDOSFileAttr16(StaticFieldSet): + """ + MSDOS 16-bit file attributes + """ + format = _FIELDS + ((NullBits, "reserved[]", 1),) + + _text_keys = ( + # Sort attributes by importance + "directory", "read_only", "compressed", + "hidden", "system", + "normal", "device", + "temporary", "archive") + + def createValue(self): + mode = [] + for name in self._text_keys: + if self[name].value: + if 4 <= len(mode): + mode.append("...") + break + else: + mode.append(name) + if mode: + return ", ".join(mode) + else: + return "(none)" + +class MSDOSFileAttr32(MSDOSFileAttr16): + """ + MSDOS 32-bit file attributes + """ + format = _FIELDS + ((NullBits, "reserved[]", 17),) + diff --git a/libs/hachoir_parser/common/tracker.py b/libs/hachoir_parser/common/tracker.py new file mode 100644 index 0000000..27a38e8 --- /dev/null +++ b/libs/hachoir_parser/common/tracker.py @@ -0,0 +1,10 @@ +""" +Shared code for tracker parser. +""" + +NOTE_NAME = {} +NOTES = ("C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "G#", "A", "A#", "B") +for octave in xrange(10): + for index, note in enumerate(NOTES): + NOTE_NAME[octave*12+index] = "%s (octave %s)" % (note, octave) + diff --git a/libs/hachoir_parser/common/win32.py b/libs/hachoir_parser/common/win32.py new file mode 100644 index 0000000..177190e --- /dev/null +++ b/libs/hachoir_parser/common/win32.py @@ -0,0 +1,154 @@ +from hachoir_core.field import (FieldSet, + UInt16, UInt32, Enum, String, Bytes, Bits, TimestampUUID60) +from hachoir_parser.video.fourcc import video_fourcc_name +from hachoir_core.bits import str2hex +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.network.common import MAC48_Address + +# Dictionary: Windows codepage => Python charset name +CODEPAGE_CHARSET = { + 874: "CP874", +# 932: Japanese Shift-JIS +# 936: Simplified Chinese GBK +# 949: Korean +# 950: Traditional Chinese Big5 + 1250: "WINDOWS-1250", + 1251: "WINDOWS-1251", + 1252: "WINDOWS-1252", + 1253: "WINDOWS-1253", + 1254: "WINDOWS-1254", + 1255: "WINDOWS-1255", + 1256: "WINDOWS-1256", + 1257: "WINDOWS-1257", + 1258: "WINDOWS-1258", + 65001: "UTF-8", +} + +class PascalStringWin32(FieldSet): + def __init__(self, parent, name, description=None, strip=None, charset="UTF-16-LE"): + FieldSet.__init__(self, parent, name, description) + length = self["length"].value + self._size = 32 + length * 16 + self.strip = strip + self.charset = charset + + def createFields(self): + yield UInt32(self, "length", "Length in widechar characters") + size = self["length"].value + if size: + yield String(self, "text", size*2, charset=self.charset, strip=self.strip) + + def createValue(self): + if "text" in self: + return self["text"].value + else: + return None + +class GUID(FieldSet): + """ + Windows 128 bits Globally Unique Identifier (GUID) + + See RFC 4122 + """ + static_size = 128 + NULL = "00000000-0000-0000-0000-000000000000" + FIELD_NAMES = { + 3: ("sha1_high", "sha1_low"), + 4: ("random_high", "random_low"), + 5: ("md5_high", "md5_low"), + } + VERSION_NAME = { + 1: "Timestamp & MAC-48", + 2: "DCE Security version", + 3: "Name SHA-1 hash", + 4: "Randomly generated", + 5: "Name MD5 hash", + } + VARIANT_NAME = { + 0: "NCS", + 2: "Leach-Salz", + # 5: Microsoft Corporation? + 6: "Microsoft Corporation", + 7: "Reserved Future", + } + def __init__(self, *args): + FieldSet.__init__(self, *args) + self.version = self.stream.readBits(self.absolute_address + 32 + 16 + 12, 4, self.endian) + + def createFields(self): + if self.version == 1: + yield TimestampUUID60(self, "time") + yield Enum(Bits(self, "version", 4), self.VERSION_NAME) + yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME) + yield textHandler(Bits(self, "clock", 13), hexadecimal) +# yield textHandler(Bits(self, "clock", 16), hexadecimal) + if self.version == 1: + yield MAC48_Address(self, "mac", "IEEE 802 MAC address") + else: + yield Bytes(self, "node", 6) + else: + namea, nameb = self.FIELD_NAMES.get( + self.version, ("data_a", "data_b")) + yield textHandler(Bits(self, namea, 60), hexadecimal) + yield Enum(Bits(self, "version", 4), self.VERSION_NAME) + yield Enum(Bits(self, "variant", 3), self.VARIANT_NAME) + yield textHandler(Bits(self, nameb, 61), hexadecimal) + + def createValue(self): + addr = self.absolute_address + a = self.stream.readBits (addr, 32, self.endian) + b = self.stream.readBits (addr + 32, 16, self.endian) + c = self.stream.readBits (addr + 48, 16, self.endian) + d = self.stream.readBytes(addr + 64, 2) + e = self.stream.readBytes(addr + 80, 6) + return "%08X-%04X-%04X-%s-%s" % (a, b, c, str2hex(d), str2hex(e)) + + def createDisplay(self): + value = self.value + if value == self.NULL: + name = "Null GUID: " + else: + name = "GUID v%u (%s): " % (self.version, self["version"].display) + return name + value + + def createRawDisplay(self): + value = self.stream.readBytes(self.absolute_address, 16) + return str2hex(value, format=r"\x%02x") + +class BitmapInfoHeader(FieldSet): + """ Win32 BITMAPINFOHEADER structure from GDI """ + static_size = 40*8 + + COMPRESSION_NAME = { + 0: u"Uncompressed (RGB)", + 1: u"RLE (8 bits)", + 2: u"RLE (4 bits)", + 3: u"Bitfields", + 4: u"JPEG", + 5: u"PNG" + } + + def __init__(self, parent, name, use_fourcc=False): + FieldSet.__init__(self, parent, name) + self._use_fourcc = use_fourcc + + def createFields(self): + yield UInt32(self, "hdr_size", "Header size (in bytes) (=40)") + yield UInt32(self, "width", "Width") + yield UInt32(self, "height", "Height") + yield UInt16(self, "nb_planes", "Color planes") + yield UInt16(self, "bpp", "Bits/pixel") + if self._use_fourcc: + yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name) + else: + yield Enum(UInt32(self, "codec", "Compression"), self.COMPRESSION_NAME) + yield UInt32(self, "size", "Image size (in bytes)") + yield UInt32(self, "xres", "X pixels per meter") + yield UInt32(self, "yres", "Y pixels per meter") + yield UInt32(self, "color_used", "Number of used colors") + yield UInt32(self, "color_important", "Number of important colors") + + def createDescription(self): + return "Bitmap info header: %ux%u pixels, %u bits/pixel" % \ + (self["width"].value, self["height"].value, self["bpp"].value) + diff --git a/libs/hachoir_parser/common/win32_lang_id.py b/libs/hachoir_parser/common/win32_lang_id.py new file mode 100644 index 0000000..a5da66f --- /dev/null +++ b/libs/hachoir_parser/common/win32_lang_id.py @@ -0,0 +1,136 @@ +""" +Windows 2000 - List of Locale IDs and Language Groups + +Original data table: +http://www.microsoft.com/globaldev/reference/win2k/setup/lcid.mspx +""" + +LANGUAGE_ID = { + 0x0436: u"Afrikaans", + 0x041c: u"Albanian", + 0x0401: u"Arabic Saudi Arabia", + 0x0801: u"Arabic Iraq", + 0x0c01: u"Arabic Egypt", + 0x1001: u"Arabic Libya", + 0x1401: u"Arabic Algeria", + 0x1801: u"Arabic Morocco", + 0x1c01: u"Arabic Tunisia", + 0x2001: u"Arabic Oman", + 0x2401: u"Arabic Yemen", + 0x2801: u"Arabic Syria", + 0x2c01: u"Arabic Jordan", + 0x3001: u"Arabic Lebanon", + 0x3401: u"Arabic Kuwait", + 0x3801: u"Arabic UAE", + 0x3c01: u"Arabic Bahrain", + 0x4001: u"Arabic Qatar", + 0x042b: u"Armenian", + 0x042c: u"Azeri Latin", + 0x082c: u"Azeri Cyrillic", + 0x042d: u"Basque", + 0x0423: u"Belarusian", + 0x0402: u"Bulgarian", + 0x0403: u"Catalan", + 0x0404: u"Chinese Taiwan", + 0x0804: u"Chinese PRC", + 0x0c04: u"Chinese Hong Kong", + 0x1004: u"Chinese Singapore", + 0x1404: u"Chinese Macau", + 0x041a: u"Croatian", + 0x0405: u"Czech", + 0x0406: u"Danish", + 0x0413: u"Dutch Standard", + 0x0813: u"Dutch Belgian", + 0x0409: u"English United States", + 0x0809: u"English United Kingdom", + 0x0c09: u"English Australian", + 0x1009: u"English Canadian", + 0x1409: u"English New Zealand", + 0x1809: u"English Irish", + 0x1c09: u"English South Africa", + 0x2009: u"English Jamaica", + 0x2409: u"English Caribbean", + 0x2809: u"English Belize", + 0x2c09: u"English Trinidad", + 0x3009: u"English Zimbabwe", + 0x3409: u"English Philippines", + 0x0425: u"Estonian", + 0x0438: u"Faeroese", + 0x0429: u"Farsi", + 0x040b: u"Finnish", + 0x040c: u"French Standard", + 0x080c: u"French Belgian", + 0x0c0c: u"French Canadian", + 0x100c: u"French Swiss", + 0x140c: u"French Luxembourg", + 0x180c: u"French Monaco", + 0x0437: u"Georgian", + 0x0407: u"German Standard", + 0x0807: u"German Swiss", + 0x0c07: u"German Austrian", + 0x1007: u"German Luxembourg", + 0x1407: u"German Liechtenstein", + 0x0408: u"Greek", + 0x040d: u"Hebrew", + 0x0439: u"Hindi", + 0x040e: u"Hungarian", + 0x040f: u"Icelandic", + 0x0421: u"Indonesian", + 0x0410: u"Italian Standard", + 0x0810: u"Italian Swiss", + 0x0411: u"Japanese", + 0x043f: u"Kazakh", + 0x0457: u"Konkani", + 0x0412: u"Korean", + 0x0426: u"Latvian", + 0x0427: u"Lithuanian", + 0x042f: u"Macedonian", + 0x043e: u"Malay Malaysia", + 0x083e: u"Malay Brunei Darussalam", + 0x044e: u"Marathi", + 0x0414: u"Norwegian Bokmal", + 0x0814: u"Norwegian Nynorsk", + 0x0415: u"Polish", + 0x0416: u"Portuguese Brazilian", + 0x0816: u"Portuguese Standard", + 0x0418: u"Romanian", + 0x0419: u"Russian", + 0x044f: u"Sanskrit", + 0x081a: u"Serbian Latin", + 0x0c1a: u"Serbian Cyrillic", + 0x041b: u"Slovak", + 0x0424: u"Slovenian", + 0x040a: u"Spanish Traditional Sort", + 0x080a: u"Spanish Mexican", + 0x0c0a: u"Spanish Modern Sort", + 0x100a: u"Spanish Guatemala", + 0x140a: u"Spanish Costa Rica", + 0x180a: u"Spanish Panama", + 0x1c0a: u"Spanish Dominican Republic", + 0x200a: u"Spanish Venezuela", + 0x240a: u"Spanish Colombia", + 0x280a: u"Spanish Peru", + 0x2c0a: u"Spanish Argentina", + 0x300a: u"Spanish Ecuador", + 0x340a: u"Spanish Chile", + 0x380a: u"Spanish Uruguay", + 0x3c0a: u"Spanish Paraguay", + 0x400a: u"Spanish Bolivia", + 0x440a: u"Spanish El Salvador", + 0x480a: u"Spanish Honduras", + 0x4c0a: u"Spanish Nicaragua", + 0x500a: u"Spanish Puerto Rico", + 0x0441: u"Swahili", + 0x041d: u"Swedish", + 0x081d: u"Swedish Finland", + 0x0449: u"Tamil", + 0x0444: u"Tatar", + 0x041e: u"Thai", + 0x041f: u"Turkish", + 0x0422: u"Ukrainian", + 0x0420: u"Urdu", + 0x0443: u"Uzbek Latin", + 0x0843: u"Uzbek Cyrillic", + 0x042a: u"Vietnamese", +} + diff --git a/libs/hachoir_parser/container/__init__.py b/libs/hachoir_parser/container/__init__.py new file mode 100644 index 0000000..6fd7d3e --- /dev/null +++ b/libs/hachoir_parser/container/__init__.py @@ -0,0 +1,7 @@ +from hachoir_parser.container.asn1 import ASN1File +from hachoir_parser.container.mkv import MkvFile +from hachoir_parser.container.ogg import OggFile, OggStream +from hachoir_parser.container.riff import RiffFile +from hachoir_parser.container.swf import SwfFile +from hachoir_parser.container.realmedia import RealMediaFile + diff --git a/libs/hachoir_parser/container/action_script.py b/libs/hachoir_parser/container/action_script.py new file mode 100644 index 0000000..4e22cef --- /dev/null +++ b/libs/hachoir_parser/container/action_script.py @@ -0,0 +1,661 @@ +""" +SWF (Macromedia/Adobe Flash) file parser. + +Documentation: + + - Alexis' SWF Reference: + http://www.m2osw.com/swf_alexref.html + - Tamarin ABC format: + http://www.m2osw.com/abc_format.html + +Authors: Sebastien Ponce, Robert Xiao +Creation date: 26 April 2008 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Bit, Bits, UInt8, UInt32, Int16, UInt16, Float32, Float64, CString, Enum, + Bytes, RawBytes, NullBits, String, SubFile, Field) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.field.float import FloatExponent +from struct import unpack + +class FlashPackedInteger(Bits): + def __init__(self, parent, name, signed=False, nbits=30, description=None): + Bits.__init__(self, parent, name, 8, description) + stream = self._parent.stream + addr = self.absolute_address + size = 0 + value = 0 + mult = 1 + while True: + byte = stream.readBits(addr+size, 8, LITTLE_ENDIAN) + value += mult * (byte & 0x7f) + size += 8 + mult <<= 7 + if byte < 128: + break + self._size = size + if signed and (1 << (nbits-1)) <= value: + value -= (1 << nbits) + self.createValue = lambda: value + +class FlashU30(FlashPackedInteger): + def __init__(self, parent, name, description=None): + FlashPackedInteger.__init__(self, parent, name, signed=False, nbits=30, description=description) + +class FlashS32(FlashPackedInteger): + def __init__(self, parent, name, description=None): + FlashPackedInteger.__init__(self, parent, name, signed=True, nbits=32, description=description) + +class FlashU32(FlashPackedInteger): + def __init__(self, parent, name, description=None): + FlashPackedInteger.__init__(self, parent, name, signed=False, nbits=32, description=description) + +class FlashFloat64(FieldSet): + def createFields(self): + yield Bits(self, "mantissa_high", 20) + yield FloatExponent(self, "exponent", 11) + yield Bit(self, "negative") + yield Bits(self, "mantissa_low", 32) + + def createValue(self): + # Manual computation: + # mantissa = mantissa_high * 2^32 + mantissa_low + # float = 2^exponent + (1 + mantissa / 2^52) + # (and float is negative if negative=True) + bytes = self.parent.stream.readBytes( + self.absolute_address, self.size//8) + # Mix bytes: xxxxyyyy <=> yyyyxxxx + bytes = bytes[4:8] + bytes[0:4] + return unpack('" + + def createValue(self): + if "data" in self: + return self["data"].value + else: + return "" + +class ABCConstantNamespace(FieldSet): + NAMESPACE_KIND = {8: "Namespace", + 5: "PrivateNamespace", + 22: "PackageNamespace", + 23: "PacakgeInternalNamespace", + 24: "ProtectedNamespace", + 25: "ExplicitNamespace", + 26: "MultinameL"} + def createFields(self): + yield Enum(UInt8(self, "kind"), self.NAMESPACE_KIND) + yield ABCStringIndex(self, "name_index") + + def createDisplay(self): + return "%s %s"%(self["kind"].display, self["name_index"].display) + + def createValue(self): + return self["name_index"].value + +class ABCConstantNamespaceSet(FieldSet): + def createFields(self): + ctr = FlashU30(self, "namespace_count") + yield ctr + for i in xrange(ctr.value): + yield ABCNSIndex(self, "namespace_index[]") + + def createDescription(self): + ret = [fld.display for fld in self.array("namespace_index")] + return ', '.join(ret) + +class ABCConstantMultiname(FieldSet): + MULTINAME_KIND = {7: "Qname", + 13: "QnameA", + 9: "Multiname", + 14: "MultinameA", + 15: "RTQname", + 16: "RTQnameA", + 27: "MultinameL", + 17: "RTQnameL", + 18: "RTQnameLA"} + def createFields(self): + yield Enum(UInt8(self, "kind"), self.MULTINAME_KIND) + kind = self["kind"].value + if kind in (7,13): # Qname + yield FlashU30(self, "namespace_index") + yield ABCStringIndex(self, "name_index") + elif kind in (9,14): # Multiname + yield ABCStringIndex(self, "name_index") + yield FlashU30(self, "namespace_set_index") + elif kind in (15,16): # RTQname + yield ABCStringIndex(self, "name_index") + elif kind == 27: # MultinameL + yield FlashU30(self, "namespace_set_index") + elif kind in (17,18): # RTQnameL + pass + + def createDisplay(self): + kind = self["kind"].display + if "name_index" in self: + return kind + " " + self["name_index"].display + return kind + + def createValue(self): + return self["kind"].value + +class ABCTrait(FieldSet): + TRAIT_KIND = {0: "slot", + 1: "method", + 2: "getter", + 3: "setter", + 4: "class", + 5: "function", + 6: "const",} + def createFields(self): + yield ABCMultinameIndex(self, "name_index") + yield Enum(Bits(self, "kind", 4), self.TRAIT_KIND) + yield Enum(Bit(self, "is_final"), {True:'final',False:'virtual'}) + yield Enum(Bit(self, "is_override"), {True:'override',False:'new'}) + yield Bit(self, "has_metadata") + yield Bits(self, "unused", 1) + kind = self["kind"].value + if kind in (0,6): # slot, const + yield FlashU30(self, "slot_id") + yield ABCMultinameIndex(self, "type_index") + ### TODO reference appropriate constant pool using value_kind + yield FlashU30(self, "value_index") + if self['value_index'].value != 0: + yield UInt8(self, "value_kind") + elif kind in (1,2,3): # method, getter, setter + yield FlashU30(self, "disp_id") + yield ABCMethodIndex(self, "method_info") + elif kind == 4: # class + yield FlashU30(self, "disp_id") + yield FlashU30(self, "class_info") + elif kind == 5: # function + yield FlashU30(self, "disp_id") + yield ABCMethodIndex(self, "method_info") + if self['has_metadata'].value: + yield ABCObjectArray(self, "metadata", FlashU30) + +class ABCValueKind(FieldSet): + def createFields(self): + yield FlashU30(self, "value_index") + yield UInt8(self, "value_kind") + +class ABCMethodInfo(FieldSet): + def createFields(self): + yield FlashU30(self, "param_count") + yield ABCMultinameIndex(self, "ret_type") + for i in xrange(self["param_count"].value): + yield ABCMultinameIndex(self, "param_type[]") + yield ABCStringIndex(self, "name_index") + yield Bit(self, "need_arguments") + yield Bit(self, "need_activation") + yield Bit(self, "need_rest") + yield Bit(self, "has_optional") + yield Bit(self, "ignore_rest") + yield Bit(self, "explicit") + yield Bit(self, "setsdxns") + yield Bit(self, "has_paramnames") + if self["has_optional"].value: + yield ABCObjectArray(self, "optional", ABCValueKind) + if self["has_paramnames"].value: + for i in xrange(self["param_count"].value): + yield FlashU30(self, "param_name[]") + + def createDescription(self): + ret = GetMultiname(self, self["ret_type"].value) + ret += " " + self["name_index"].display + ret += "(" + ", ".join(GetMultiname(self, fld.value) for fld in self.array("param_type")) + ")" + return ret + +class ABCMetadataInfo(FieldSet): + def createFields(self): + yield ABCStringIndex(self, "name_index") + yield FlashU30(self, "values_count") + count = self["values_count"].value + for i in xrange(count): + yield FlashU30(self, "key[]") + for i in xrange(count): + yield FlashU30(self, "value[]") + +class ABCInstanceInfo(FieldSet): + def createFields(self): + yield ABCMultinameIndex(self, "name_index") + yield ABCMultinameIndex(self, "super_index") + yield Bit(self, "is_sealed") + yield Bit(self, "is_final") + yield Bit(self, "is_interface") + yield Bit(self, "is_protected") + yield Bits(self, "unused", 4) + if self['is_protected'].value: + yield ABCNSIndex(self, "protectedNS") + yield FlashU30(self, "interfaces_count") + for i in xrange(self["interfaces_count"].value): + yield ABCMultinameIndex(self, "interface[]") + yield ABCMethodIndex(self, "iinit_index") + yield ABCObjectArray(self, "trait", ABCTrait) + +class ABCClassInfo(FieldSet): + def createFields(self): + yield ABCMethodIndex(self, "cinit_index") + yield ABCObjectArray(self, "trait", ABCTrait) + +class ABCScriptInfo(FieldSet): + def createFields(self): + yield ABCMethodIndex(self, "init_index") + yield ABCObjectArray(self, "trait", ABCTrait) + +class ABCException(FieldSet): + def createFields(self): + yield FlashU30(self, "start") + yield FlashU30(self, "end") + yield FlashU30(self, "target") + yield FlashU30(self, "type_index") + yield FlashU30(self, "name_index") + +class ABCMethodBody(FieldSet): + def createFields(self): + yield ABCMethodIndex(self, "method_info") + yield FlashU30(self, "max_stack") + yield FlashU30(self, "max_regs") + yield FlashU30(self, "scope_depth") + yield FlashU30(self, "max_scope") + yield FlashU30(self, "code_length") + yield RawBytes(self, "code", self['code_length'].value) + yield ABCObjectArray(self, "exception", ABCException) + yield ABCObjectArray(self, "trait", ABCTrait) + +def parseABC(parent, size): + code = parent["code"].value + if code == parent.TAG_DO_ABC_DEFINE: + yield UInt32(parent, "action_flags") + yield CString(parent, "action_name") + yield UInt16(parent, "minor_version") + yield UInt16(parent, "major_version") + parent.isABC = True + + yield ABCConstantPool(parent, "int", FlashS32) + yield ABCConstantPool(parent, "uint", FlashU32) + yield ABCConstantPool(parent, "double", Float64) + yield ABCConstantPool(parent, "string", ABCConstantString) + yield ABCConstantPool(parent, "namespace", ABCConstantNamespace) + yield ABCConstantPool(parent, "namespace_set", ABCConstantNamespaceSet) + yield ABCConstantPool(parent, "multiname", ABCConstantMultiname) + + yield ABCObjectArray(parent, "method", ABCMethodInfo) + yield ABCObjectArray(parent, "metadata", ABCMetadataInfo) + yield ABCClassArray(parent, "class") + yield ABCObjectArray(parent, "script", ABCScriptInfo) + yield ABCObjectArray(parent, "body", ABCMethodBody) + diff --git a/libs/hachoir_parser/container/asn1.py b/libs/hachoir_parser/container/asn1.py new file mode 100644 index 0000000..dfac847 --- /dev/null +++ b/libs/hachoir_parser/container/asn1.py @@ -0,0 +1,282 @@ +""" +Abstract Syntax Notation One (ASN.1) parser. + +Technical informations: +* PER standard + http://www.tu.int/ITU-T/studygroups/com17/languages/X.691-0207.pdf +* Python library + http://pyasn1.sourceforge.net/ +* Specification of Abstract Syntax Notation One (ASN.1) + ISO/IEC 8824:1990 Information Technology +* Specification of Basic Encoding Rules (BER) for ASN.1 + ISO/IEC 8825:1990 Information Technology +* OpenSSL asn1parser, use command: + openssl asn1parse -i -inform DER -in file.der +* ITU-U recommendations: + http://www.itu.int/rec/T-REC-X/en + (X.680, X.681, X.682, X.683, X.690, X.691, X.692, X.693, X.694) +* dumpasn1 + http://www.cs.auckland.ac.nz/~pgut001/dumpasn1.c + +General information: +* Wikipedia (english) article + http://en.wikipedia.org/wiki/Abstract_Syntax_Notation_One +* ASN.1 information site + http://asn1.elibel.tm.fr/en/ +* ASN.1 consortium + http://www.asn1.org/ + +Encodings: +* Basic Encoding Rules (BER) +* Canonical Encoding Rules (CER) -- DER derivative that is not widely used +* Distinguished Encoding Rules (DER) -- used for encrypted applications +* XML Encoding Rules (XER) +* Packed Encoding Rules (PER) -- result in the fewest number of bytes +* Generic String Encoding Rules (GSER) +=> Are encodings compatibles? Which encodings are supported?? + +Author: Victor Stinner +Creation date: 24 september 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + FieldError, ParserError, + Bit, Bits, Bytes, UInt8, GenericInteger, String, + Field, Enum, RawBytes) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.tools import createDict, humanDatetime +from hachoir_core.stream import InputStreamError +from hachoir_core.text_handler import textHandler + +# --- Field parser --- + +class ASNInteger(Field): + """ + Integer: two cases: + - first byte in 0..127: it's the value + - first byte in 128..255: byte & 127 is the number of bytes, + next bytes are the value + """ + def __init__(self, parent, name, description=None): + Field.__init__(self, parent, name, 8, description) + stream = self._parent.stream + addr = self.absolute_address + value = stream.readBits(addr, 8, BIG_ENDIAN) + if 128 <= value: + nbits = (value & 127) * 8 + if not nbits: + raise ParserError("ASN.1: invalid ASN integer size (zero)") + if 64 < nbits: + # Arbitrary limit to catch errors + raise ParserError("ASN.1: ASN integer is limited to 64 bits") + self._size = 8 + nbits + value = stream.readBits(addr+8, nbits, BIG_ENDIAN) + self.createValue = lambda: value + +class OID_Integer(Bits): + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 8, description) + stream = self._parent.stream + addr = self.absolute_address + size = 8 + value = 0 + byte = stream.readBits(addr, 8, BIG_ENDIAN) + value = byte & 127 + while 128 <= byte: + addr += 8 + size += 8 + if 64 < size: + # Arbitrary limit to catch errors + raise ParserError("ASN.1: Object identifier is limited 64 bits") + byte = stream.readBits(addr, 8, BIG_ENDIAN) + value = (value << 7) + (byte & 127) + self._size = size + self.createValue = lambda: value + +def readSequence(self, content_size): + while self.current_size < self.size: + yield Object(self, "item[]") + +def readSet(self, content_size): + yield Object(self, "value", size=content_size*8) + +def readASCIIString(self, content_size): + yield String(self, "value", content_size, charset="ASCII") + +def readUTF8String(self, content_size): + yield String(self, "value", content_size, charset="UTF-8") + +def readBMPString(self, content_size): + yield String(self, "value", content_size, charset="UTF-16") + +def readBitString(self, content_size): + yield UInt8(self, "padding_size", description="Number of unused bits") + if content_size > 1: + yield Bytes(self, "value", content_size-1) + +def readOctetString(self, content_size): + yield Bytes(self, "value", content_size) + +def formatObjectID(fieldset): + text = [ fieldset["first"].display ] + items = [ field for field in fieldset if field.name.startswith("item[") ] + text.extend( str(field.value) for field in items ) + return ".".join(text) + +def readObjectID(self, content_size): + yield textHandler(UInt8(self, "first"), formatFirstObjectID) + while self.current_size < self.size: + yield OID_Integer(self, "item[]") + +def readBoolean(self, content_size): + if content_size != 1: + raise ParserError("Overlong boolean: got %s bytes, expected 1 byte"%content_size) + yield textHandler(UInt8(self, "value"), lambda field:str(bool(field.value))) + +def readInteger(self, content_size): + # Always signed? + yield GenericInteger(self, "value", True, content_size*8) + +# --- Format --- + +def formatFirstObjectID(field): + value = field.value + return "%u.%u" % (value // 40, value % 40) + +def formatValue(fieldset): + return fieldset["value"].display + +def formatUTCTime(fieldset): + import datetime + value = fieldset["value"].value + year = int(value[0:2]) + if year < 50: + year += 2000 + else: + year += 1900 + month = int(value[2:4]) + day = int(value[4:6]) + hour = int(value[6:8]) + minute = int(value[8:10]) + if value[-1] == "Z": + second = int(value[10:12]) + dt = datetime.datetime(year, month, day, hour, minute, second) + else: + # Skip timezone... + dt = datetime.datetime(year, month, day, hour, minute) + return humanDatetime(dt) + +# --- Object parser --- + +class Object(FieldSet): + TYPE_INFO = { + 0: ("end[]", None, "End (reserved for BER, None)", None), # TODO: Write parser + 1: ("boolean[]", readBoolean, "Boolean", None), + 2: ("integer[]", readInteger, "Integer", None), + 3: ("bit_str[]", readBitString, "Bit string", None), + 4: ("octet_str[]", readOctetString, "Octet string", None), + 5: ("null[]", None, "NULL (empty, None)", None), + 6: ("obj_id[]", readObjectID, "Object identifier", formatObjectID), + 7: ("obj_desc[]", None, "Object descriptor", None), # TODO: Write parser + 8: ("external[]", None, "External, instance of", None), # TODO: Write parser # External? + 9: ("real[]", readASCIIString, "Real number", None), # TODO: Write parser + 10: ("enum[]", readInteger, "Enumerated", None), + 11: ("embedded[]", None, "Embedded PDV", None), # TODO: Write parser + 12: ("utf8_str[]", readUTF8String, "Printable string", None), + 13: ("rel_obj_id[]", None, "Relative object identifier", None), # TODO: Write parser + 14: ("time[]", None, "Time", None), # TODO: Write parser + # 15: invalid??? sequence of??? + 16: ("seq[]", readSequence, "Sequence", None), + 17: ("set[]", readSet, "Set", None), + 18: ("num_str[]", readASCIIString, "Numeric string", None), + 19: ("print_str[]", readASCIIString, "Printable string", formatValue), + 20: ("teletex_str[]", readASCIIString, "Teletex (T61, None) string", None), + 21: ("videotex_str[]", readASCIIString, "Videotex string", None), + 22: ("ia5_str[]", readASCIIString, "IA5 string", formatValue), + 23: ("utc_time[]", readASCIIString, "UTC time", formatUTCTime), + 24: ("general_time[]", readASCIIString, "Generalized time", None), + 25: ("graphic_str[]", readASCIIString, "Graphic string", None), + 26: ("visible_str[]", readASCIIString, "Visible (ISO64, None) string", None), + 27: ("general_str[]", readASCIIString, "General string", None), + 28: ("universal_str[]", readASCIIString, "Universal string", None), + 29: ("unrestricted_str[]", readASCIIString, "Unrestricted string", None), + 30: ("bmp_str[]", readBMPString, "BMP string", None), + # 31: multiple octet tag number, TODO: not supported + + # Extended tag values: + # 31: Date + # 32: Time of day + # 33: Date-time + # 34: Duration + } + TYPE_DESC = createDict(TYPE_INFO, 2) + + CLASS_DESC = {0: "universal", 1: "application", 2: "context", 3: "private"} + FORM_DESC = {False: "primitive", True: "constructed"} + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + key = self["type"].value & 31 + if self['class'].value == 0: + # universal object + if key in self.TYPE_INFO: + self._name, self._handler, self._description, create_desc = self.TYPE_INFO[key] + if create_desc: + self.createDescription = lambda: "%s: %s" % (self.TYPE_INFO[key][2], create_desc(self)) + self._description = None + elif key == 31: + raise ParserError("ASN.1 Object: tag bigger than 30 are not supported") + else: + self._handler = None + elif self['form'].value: + # constructed: treat as sequence + self._name = 'seq[]' + self._handler = readSequence + self._description = 'constructed object type %i' % key + else: + # primitive, context/private + self._name = 'raw[]' + self._handler = readASCIIString + self._description = '%s object type %i' % (self['class'].display, key) + field = self["size"] + self._size = field.address + field.size + field.value*8 + + def createFields(self): + yield Enum(Bits(self, "class", 2), self.CLASS_DESC) + yield Enum(Bit(self, "form"), self.FORM_DESC) + if self['class'].value == 0: + yield Enum(Bits(self, "type", 5), self.TYPE_DESC) + else: + yield Bits(self, "type", 5) + yield ASNInteger(self, "size", "Size in bytes") + size = self["size"].value + if size: + if self._handler: + for field in self._handler(self, size): + yield field + else: + yield RawBytes(self, "raw", size) + +class ASN1File(Parser): + PARSER_TAGS = { + "id": "asn1", + "category": "container", + "file_ext": ("der",), + "min_size": 16, + "description": "Abstract Syntax Notation One (ASN.1)" + } + endian = BIG_ENDIAN + + def validate(self): + try: + root = self[0] + except (InputStreamError, FieldError): + return "Unable to create root object" + if root.size != self.size: + return "Invalid root object size" + return True + + def createFields(self): + yield Object(self, "root") + diff --git a/libs/hachoir_parser/container/mkv.py b/libs/hachoir_parser/container/mkv.py new file mode 100644 index 0000000..4e90f46 --- /dev/null +++ b/libs/hachoir_parser/container/mkv.py @@ -0,0 +1,598 @@ +# +# Matroska parser +# Author Julien Muchembled +# Created: 8 june 2006 +# + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Link, + MissingField, ParserError, + Enum as _Enum, String as _String, + Float32, Float64, + NullBits, Bits, Bit, RawBytes, Bytes, + Int16, GenericInteger) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.iso639 import ISO639_2 +from hachoir_core.tools import humanDatetime +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.container.ogg import XiphInt +from datetime import datetime, timedelta + +class RawInt(GenericInteger): + """ + Raw integer: have to be used in BIG_ENDIAN! + """ + def __init__(self, parent, name, description=None): + GenericInteger.__init__(self, parent, name, False, 8, description) + i = GenericInteger.createValue(self) + if i == 0: + raise ParserError('Invalid integer length!') + while i < 0x80: + self._size += 8 + i <<= 1 + +class Unsigned(RawInt): + def __init__(self, parent, name, description=None): + RawInt.__init__(self, parent, name, description) + + def hasValue(self): + return True + def createValue(self): + header = 1 << self._size / 8 * 7 + value = RawInt.createValue(self) - header + if value + 1 == header: + return None + return value + +class Signed(Unsigned): + def createValue(self): + header = 1 << self._size / 8 * 7 - 1 + value = RawInt.createValue(self) - 3 * header + 1 + if value == header: + return None + return value + +def Enum(parent, enum): + return _Enum(GenericInteger(parent, 'enum', False, parent['size'].value*8), enum) + +def Bool(parent): + return textHandler(GenericInteger(parent, 'bool', False, parent['size'].value*8), + lambda chunk: str(chunk.value != 0)) + +def UInt(parent): + return GenericInteger(parent, 'unsigned', False, parent['size'].value*8) + +def SInt(parent): + return GenericInteger(parent, 'signed', True, parent['size'].value*8) + +def String(parent): + return _String(parent, 'string', parent['size'].value, charset="ASCII") + +def EnumString(parent, enum): + return _Enum(String(parent), enum) + +def Binary(parent): + return RawBytes(parent, 'binary', parent['size'].value) + +class AttachedFile(Bytes): + def __init__(self, parent): + Bytes.__init__(self, parent, 'file', parent['size'].value, None) + def _getFilename(self): + if not hasattr(self, "_filename"): + try: + self._filename = self["../../FileName/unicode"].value + except MissingField: + self._filename = None + return self._filename + def createDescription(self): + filename = self._getFilename() + if filename: + return 'File "%s"' % filename + return "('Filename' entry not found)" + def _createInputStream(self, **args): + tags = args.setdefault("tags",[]) + try: + tags.append(("mime", self["../../FileMimeType/string"].value)) + except MissingField: + pass + filename = self._getFilename() + if filename: + tags.append(("filename", filename)) + return Bytes._createInputStream(self, **args) + +def UTF8(parent): + return _String(parent,'unicode', parent['size'].value, charset='UTF-8') + +def Float(parent): + size = parent['size'].value + if size == 4: + return Float32(parent, 'float') + elif size == 8: + return Float64(parent, 'double') + else: + return RawBytes(parent, 'INVALID_FLOAT', size) + +TIMESTAMP_T0 = datetime(2001, 1, 1) + +def dateToDatetime(value): + return TIMESTAMP_T0 + timedelta(microseconds=value//1000) + +def dateToString(field): + return humanDatetime(dateToDatetime(field.value)) + +def Date(parent): + return textHandler(GenericInteger(parent, 'date', True, parent['size'].value*8), + dateToString) + +def SeekID(parent): + return textHandler(GenericInteger(parent, 'binary', False, parent['size'].value*8), + lambda chunk: segment.get(chunk.value, (hexadecimal(chunk),))[0]) + +def CueClusterPosition(parent): + class Cluster(Link): + def createValue(self): + parent = self.parent + segment = parent['.....'] + pos = parent['unsigned'].value * 8 + segment[2].address + return segment.getFieldByAddress(pos, feed=False) + return Cluster(parent, 'cluster') + +def CueTrackPositions(parent): + class Block(Link): + def createValue(self): + parent = self.parent + time = parent['../CueTime/unsigned'].value + track = parent['CueTrack/unsigned'].value + cluster = parent['CueClusterPosition/cluster'].value + time -= cluster['Timecode/unsigned'].value + for field in cluster: + if field.name.startswith('BlockGroup['): + for path in 'Block/block', 'SimpleBlock': + try: + block = field[path] + if block['track'].value == track and \ + block['timecode'].value == time: + return field + except MissingField: + pass + parent.error('Cue point not found') + return self + return Block(parent, 'block') + +class Lace(FieldSet): + def __init__(self, parent, lacing, size): + self.n_frames = parent['n_frames'].value + self.createFields = ( self.parseXiph, self.parseFixed, self.parseEBML )[lacing] + FieldSet.__init__(self, parent, 'Lace', size=size * 8) + + def parseXiph(self): + for i in xrange(self.n_frames): + yield XiphInt(self, 'size[]') + for i in xrange(self.n_frames): + yield RawBytes(self, 'frame[]', self['size['+str(i)+']'].value) + yield RawBytes(self,'frame[]', (self._size - self.current_size) / 8) + + def parseEBML(self): + yield Unsigned(self, 'size') + for i in xrange(1, self.n_frames): + yield Signed(self, 'dsize[]') + size = self['size'].value + yield RawBytes(self, 'frame[]', size) + for i in xrange(self.n_frames-1): + size += self['dsize['+str(i)+']'].value + yield RawBytes(self, 'frame[]', size) + yield RawBytes(self,'frame[]', (self._size - self.current_size) / 8) + + def parseFixed(self): + n = self.n_frames + 1 + size = self._size / 8 / n + for i in xrange(n): + yield RawBytes(self, 'frame[]', size) + +class Block(FieldSet): + def __init__(self, parent): + FieldSet.__init__(self, parent, 'block') + self._size = 8 * parent['size'].value + + def lacing(self): + return _Enum(Bits(self, 'lacing', 2), [ 'none', 'Xiph', 'fixed', 'EBML' ]) + + def createFields(self): + yield Unsigned(self, 'track') + yield Int16(self, 'timecode') + + if self.parent._name == 'Block': + yield NullBits(self, 'reserved[]', 4) + yield Bit(self, 'invisible') + yield self.lacing() + yield NullBits(self, 'reserved[]', 1) + elif self.parent._name == 'SimpleBlock[]': + yield Bit(self, 'keyframe') + yield NullBits(self, 'reserved', 3) + yield Bit(self, 'invisible') + yield self.lacing() + yield Bit(self, 'discardable') + else: + yield NullBits(self, 'reserved', 8) + return + + size = (self._size - self.current_size) / 8 + lacing = self['lacing'].value + if lacing: + yield textHandler(GenericInteger(self, 'n_frames', False, 8), + lambda chunk: str(chunk.value+1)) + yield Lace(self, lacing - 1, size - 1) + else: + yield RawBytes(self,'frame', size) + +ebml = { + 0x1A45DFA3: ('EBML[]', { + 0x4286: ('EBMLVersion',UInt), + 0x42F7: ('EBMLReadVersion',UInt), + 0x42F2: ('EBMLMaxIDLength',UInt), + 0x42F3: ('EBMLMaxSizeLength',UInt), + 0x4282: ('DocType',String), + 0x4287: ('DocTypeVersion',UInt), + 0x4285: ('DocTypeReadVersion',UInt) + }) +} + +signature = { + 0x7E8A: ('SignatureAlgo', UInt), + 0x7E9A: ('SignatureHash', UInt), + 0x7EA5: ('SignaturePublicKey', Binary), + 0x7EB5: ('Signature', Binary), + 0x7E5B: ('SignatureElements', { + 0x7E7B: ('SignatureElementList[]', { + 0x6532: ('SignedElement[]', Binary) + }) + }) +} + +chapter_atom = { + 0x73C4: ('ChapterUID', UInt), + 0x91: ('ChapterTimeStart', UInt), + 0x92: ('ChapterTimeEnd', UInt), + 0x98: ('ChapterFlagHidden', Bool), + 0x4598: ('ChapterFlagEnabled', Bool), + 0x6E67: ('ChapterSegmentUID', Binary), + 0x6EBC: ('ChapterSegmentEditionUID', Binary), + 0x63C3: ('ChapterPhysicalEquiv', UInt), + 0x8F: ('ChapterTrack', { + 0x89: ('ChapterTrackNumber[]', UInt) + }), + 0x80: ('ChapterDisplay[]', { + 0x85: ('ChapString', UTF8), + 0x437C: ('ChapLanguage[]', String), + 0x437E: ('ChapCountry[]', String) + }), + 0x6944: ('ChapProcess[]', { + 0x6955: ('ChapProcessCodecID', UInt), + 0x450D: ('ChapProcessPrivate', Binary), + 0x6911: ('ChapProcessCommand[]', { + 0x6922: ('ChapProcessTime', UInt), + 0x6933: ('ChapProcessData', Binary) + }) + }) +} + +simple_tag = { + 0x45A3: ('TagName', UTF8), + 0x447A: ('TagLanguage', String), + 0x44B4: ('TagDefault', Bool), # 0x4484 + 0x4487: ('TagString', UTF8), + 0x4485: ('TagBinary', Binary) +} + +segment_seek = { + 0x4DBB: ('Seek[]', { + 0x53AB: ('SeekID', SeekID), + 0x53AC: ('SeekPosition', UInt) + }) +} + +segment_info = { + 0x73A4: ('SegmentUID', Binary), + 0x7384: ('SegmentFilename', UTF8), + 0x3CB923: ('PrevUID', Binary), + 0x3C83AB: ('PrevFilename', UTF8), + 0x3EB923: ('NextUID', Binary), + 0x3E83BB: ('NextFilename', UTF8), + 0x4444: ('SegmentFamily[]', Binary), + 0x6924: ('ChapterTranslate[]', { + 0x69FC: ('ChapterTranslateEditionUID[]', UInt), + 0x69BF: ('ChapterTranslateCodec', UInt), + 0x69A5: ('ChapterTranslateID', Binary) + }), + 0x2AD7B1: ('TimecodeScale', UInt), + 0x4489: ('Duration', Float), + 0x4461: ('DateUTC', Date), + 0x7BA9: ('Title', UTF8), + 0x4D80: ('MuxingApp', UTF8), + 0x5741: ('WritingApp', UTF8) +} + +segment_clusters = { + 0xE7: ('Timecode', UInt), + 0x5854: ('SilentTracks', { + 0x58D7: ('SilentTrackNumber[]', UInt) + }), + 0xA7: ('Position', UInt), + 0xAB: ('PrevSize', UInt), + 0xA0: ('BlockGroup[]', { + 0xA1: ('Block', Block), + 0xA2: ('BlockVirtual[]', Block), + 0x75A1: ('BlockAdditions', { + 0xA6: ('BlockMore[]', { + 0xEE: ('BlockAddID', UInt), + 0xA5: ('BlockAdditional', Binary) + }) + }), + 0x9B: ('BlockDuration', UInt), + 0xFA: ('ReferencePriority', UInt), + 0xFB: ('ReferenceBlock[]', SInt), + 0xFD: ('ReferenceVirtual', SInt), + 0xA4: ('CodecState', Binary), + 0x8E: ('Slices[]', { + 0xE8: ('TimeSlice[]', { + 0xCC: ('LaceNumber', UInt), + 0xCD: ('FrameNumber', UInt), + 0xCB: ('BlockAdditionID', UInt), + 0xCE: ('Delay', UInt), + 0xCF: ('Duration', UInt) + }) + }) + }), + 0xA3: ('SimpleBlock[]', Block) +} + +tracks_video = { + 0x9A: ('FlagInterlaced', Bool), + 0x53B8: ('StereoMode', lambda parent: Enum(parent, \ + [ 'mono', 'right eye', 'left eye', 'both eyes' ])), + 0xB0: ('PixelWidth', UInt), + 0xBA: ('PixelHeight', UInt), + 0x54AA: ('PixelCropBottom', UInt), + 0x54BB: ('PixelCropTop', UInt), + 0x54CC: ('PixelCropLeft', UInt), + 0x54DD: ('PixelCropRight', UInt), + 0x54B0: ('DisplayWidth', UInt), + 0x54BA: ('DisplayHeight', UInt), + 0x54B2: ('DisplayUnit', lambda parent: Enum(parent, \ + [ 'pixels', 'centimeters', 'inches' ])), + 0x54B3: ('AspectRatioType', lambda parent: Enum(parent, \ + [ 'free resizing', 'keep aspect ratio', 'fixed' ])), + 0x2EB524: ('ColourSpace', Binary), + 0x2FB523: ('GammaValue', Float) +} + +tracks_audio = { + 0xB5: ('SamplingFrequency', Float), + 0x78B5: ('OutputSamplingFrequency', Float), + 0x9F: ('Channels', UInt), + 0x7D7B: ('ChannelPositions', Binary), + 0x6264: ('BitDepth', UInt) +} + +tracks_content_encodings = { + 0x6240: ('ContentEncoding[]', { + 0x5031: ('ContentEncodingOrder', UInt), + 0x5032: ('ContentEncodingScope', UInt), + 0x5033: ('ContentEncodingType', UInt), + 0x5034: ('ContentCompression', { + 0x4254: ('ContentCompAlgo', UInt), + 0x4255: ('ContentCompSettings', Binary) + }), + 0x5035: ('ContentEncryption', { + 0x47e1: ('ContentEncAlgo', UInt), + 0x47e2: ('ContentEncKeyID', Binary), + 0x47e3: ('ContentSignature', Binary), + 0x47e4: ('ContentSigKeyID', Binary), + 0x47e5: ('ContentSigAlgo', UInt), + 0x47e6: ('ContentSigHashAlgo', UInt), + }) + }) +} + +segment_tracks = { + 0xAE: ('TrackEntry[]', { + 0xD7: ('TrackNumber', UInt), + 0x73C5: ('TrackUID', UInt), + 0x83: ('TrackType', lambda parent: Enum(parent, { + 0x01: 'video', + 0x02: 'audio', + 0x03: 'complex', + 0x10: 'logo', + 0x11: 'subtitle', + 0x12: 'buttons', + 0x20: 'control' + })), + 0xB9: ('FlagEnabled', Bool), + 0x88: ('FlagDefault', Bool), + 0x55AA: ('FlagForced[]', Bool), + 0x9C: ('FlagLacing', Bool), + 0x6DE7: ('MinCache', UInt), + 0x6DF8: ('MaxCache', UInt), + 0x23E383: ('DefaultDuration', UInt), + 0x23314F: ('TrackTimecodeScale', Float), + 0x537F: ('TrackOffset', SInt), + 0x55EE: ('MaxBlockAdditionID', UInt), + 0x536E: ('Name', UTF8), + 0x22B59C: ('Language', lambda parent: EnumString(parent, ISO639_2)), + 0x86: ('CodecID', String), + 0x63A2: ('CodecPrivate', Binary), + 0x258688: ('CodecName', UTF8), + 0x7446: ('AttachmentLink', UInt), + 0x3A9697: ('CodecSettings', UTF8), + 0x3B4040: ('CodecInfoURL[]', String), + 0x26B240: ('CodecDownloadURL[]', String), + 0xAA: ('CodecDecodeAll', Bool), + 0x6FAB: ('TrackOverlay[]', UInt), + 0x6624: ('TrackTranslate[]', { + 0x66FC: ('TrackTranslateEditionUID[]', UInt), + 0x66BF: ('TrackTranslateCodec', UInt), + 0x66A5: ('TrackTranslateTrackID', Binary) + }), + 0xE0: ('Video', tracks_video), + 0xE1: ('Audio', tracks_audio), + 0x6d80: ('ContentEncodings', tracks_content_encodings) + }) +} + +segment_cues = { + 0xBB: ('CuePoint[]', { + 0xB3: ('CueTime', UInt), + 0xB7: ('CueTrackPositions[]', CueTrackPositions, { + 0xF7: ('CueTrack', UInt), + 0xF1: ('CueClusterPosition', CueClusterPosition, UInt), + 0x5378: ('CueBlockNumber', UInt), + 0xEA: ('CueCodecState', UInt), + 0xDB: ('CueReference[]', { + 0x96: ('CueRefTime', UInt), + 0x97: ('CueRefCluster', UInt), + 0x535F: ('CueRefNumber', UInt), + 0xEB: ('CueRefCodecState', UInt) + }) + }) + }) +} + +segment_attachments = { + 0x61A7: ('AttachedFile[]', { + 0x467E: ('FileDescription', UTF8), + 0x466E: ('FileName', UTF8), + 0x4660: ('FileMimeType', String), + 0x465C: ('FileData', AttachedFile), + 0x46AE: ('FileUID', UInt), + 0x4675: ('FileReferral', Binary) + }) +} + +segment_chapters = { + 0x45B9: ('EditionEntry[]', { + 0x45BC: ('EditionUID', UInt), + 0x45BD: ('EditionFlagHidden', Bool), + 0x45DB: ('EditionFlagDefault', Bool), + 0x45DD: ('EditionFlagOrdered', Bool), + 0xB6: ('ChapterAtom[]', chapter_atom) + }) +} + +segment_tags = { + 0x7373: ('Tag[]', { + 0x63C0: ('Targets', { + 0x68CA: ('TargetTypeValue', UInt), + 0x63CA: ('TargetType', String), + 0x63C5: ('TrackUID[]', UInt), + 0x63C9: ('EditionUID[]', UInt), + 0x63C4: ('ChapterUID[]', UInt), + 0x63C6: ('AttachmentUID[]', UInt) + }), + 0x67C8: ('SimpleTag[]', simple_tag) + }) +} + +segment = { + 0x114D9B74: ('SeekHead[]', segment_seek), + 0x1549A966: ('Info[]', segment_info), + 0x1F43B675: ('Cluster[]', segment_clusters), + 0x1654AE6B: ('Tracks[]', segment_tracks), + 0x1C53BB6B: ('Cues', segment_cues), + 0x1941A469: ('Attachments', segment_attachments), + 0x1043A770: ('Chapters', segment_chapters), + 0x1254C367: ('Tags[]', segment_tags) +} + +class EBML(FieldSet): + def __init__(self, parent, ids): + FieldSet.__init__(self, parent, "?[]") + + # Set name + id = self['id'].value + self.val = ids.get(id) + if not self.val: + if id == 0xBF: + self.val = 'CRC-32[]', Binary + elif id == 0xEC: + self.val = 'Void[]', Binary + elif id == 0x1B538667: + self.val = 'SignatureSlot[]', signature + else: + self.val = 'Unknown[]', Binary + self._name = self.val[0] + + # Compute size + size = self['size'] + if size.value is not None: + self._size = size.address + size.size + size.value * 8 + elif self._parent._parent: + raise ParserError("Unknown length (only allowed for the last Level 0 element)") + elif self._parent._size is not None: + self._size = self._parent._size - self.address + + def createFields(self): + yield RawInt(self, 'id') + yield Unsigned(self, 'size') + for val in self.val[1:]: + if callable(val): + yield val(self) + else: + while not self.eof: + yield EBML(self, val) + +class MkvFile(Parser): + EBML_SIGNATURE = 0x1A45DFA3 + PARSER_TAGS = { + "id": "matroska", + "category": "container", + "file_ext": ("mka", "mkv", "webm"), + "mime": ( + u"video/x-matroska", + u"audio/x-matroska", + u"video/webm", + u"audio/webm"), + "min_size": 5*8, + "magic": (("\x1A\x45\xDF\xA3", 0),), + "description": "Matroska multimedia container" + } + endian = BIG_ENDIAN + + def _getDoctype(self): + return self[0]['DocType/string'].value + + def validate(self): + if self.stream.readBits(0, 32, self.endian) != self.EBML_SIGNATURE: + return False + try: + first = self[0] + except ParserError: + return False + if None < self._size < first._size: + return "First chunk size is invalid" + if self._getDoctype() not in ('matroska', 'webm'): + return "Stream isn't a matroska document." + return True + + def createFields(self): + hdr = EBML(self, ebml) + yield hdr + + while not self.eof: + yield EBML(self, { 0x18538067: ('Segment[]', segment) }) + + def createContentSize(self): + field = self["Segment[0]/size"] + return field.absolute_address + field.value * 8 + field.size + + def createDescription(self): + if self._getDoctype() == 'webm': + return 'WebM video' + else: + return 'Matroska video' + + def createMimeType(self): + if self._getDoctype() == 'webm': + return u"video/webm" + else: + return u"video/x-matroska" + diff --git a/libs/hachoir_parser/container/ogg.py b/libs/hachoir_parser/container/ogg.py new file mode 100644 index 0000000..fa2d26c --- /dev/null +++ b/libs/hachoir_parser/container/ogg.py @@ -0,0 +1,349 @@ +# +# Ogg parser +# Author Julien Muchembled +# Created: 10 june 2006 +# + +from hachoir_parser import Parser +from hachoir_core.field import (Field, FieldSet, createOrphanField, + NullBits, Bit, Bits, Enum, Fragment, MissingField, ParserError, + UInt8, UInt16, UInt24, UInt32, UInt64, + RawBytes, String, PascalString32, NullBytes) +from hachoir_core.stream import FragmentedStream, InputStreamError +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.tools import humanDurationNanosec +from hachoir_core.text_handler import textHandler, hexadecimal + +MAX_FILESIZE = 1000 * 1024 * 1024 + +class XiphInt(Field): + """ + Positive integer with variable size. Values bigger than 254 are stored as + (255, 255, ..., rest): value is the sum of all bytes. + + Example: 1000 is stored as (255, 255, 255, 235), total = 255*3+235 = 1000 + """ + def __init__(self, parent, name, max_size=None, description=None): + Field.__init__(self, parent, name, size=0, description=description) + value = 0 + addr = self.absolute_address + while max_size is None or self._size < max_size: + byte = parent.stream.readBits(addr, 8, LITTLE_ENDIAN) + value += byte + self._size += 8 + if byte != 0xff: + break + addr += 8 + self.createValue = lambda: value + +class Lacing(FieldSet): + def createFields(self): + size = self.size + while size: + field = XiphInt(self, 'size[]', size) + yield field + size -= field.size + +def parseVorbisComment(parent): + yield PascalString32(parent, 'vendor', charset="UTF-8") + yield UInt32(parent, 'count') + for index in xrange(parent["count"].value): + yield PascalString32(parent, 'metadata[]', charset="UTF-8") + if parent.current_size != parent.size: + yield UInt8(parent, "framing_flag") + +PIXEL_FORMATS = { + 0: "4:2:0", + 2: "4:2:2", + 3: "4:4:4", +} + +def formatTimeUnit(field): + return humanDurationNanosec(field.value * 100) + +def parseVideoHeader(parent): + yield NullBytes(parent, "padding[]", 2) + yield String(parent, "fourcc", 4) + yield UInt32(parent, "size") + yield textHandler(UInt64(parent, "time_unit", "Frame duration"), formatTimeUnit) + yield UInt64(parent, "sample_per_unit") + yield UInt32(parent, "default_len") + yield UInt32(parent, "buffer_size") + yield UInt16(parent, "bits_per_sample") + yield NullBytes(parent, "padding[]", 2) + yield UInt32(parent, "width") + yield UInt32(parent, "height") + yield NullBytes(parent, "padding[]", 4) + +def parseTheoraHeader(parent): + yield UInt8(parent, "version_major") + yield UInt8(parent, "version_minor") + yield UInt8(parent, "version_revision") + yield UInt16(parent, "width", "Width*16 in pixel") + yield UInt16(parent, "height", "Height*16 in pixel") + + yield UInt24(parent, "frame_width") + yield UInt24(parent, "frame_height") + yield UInt8(parent, "offset_x") + yield UInt8(parent, "offset_y") + + yield UInt32(parent, "fps_num", "Frame per second numerator") + yield UInt32(parent, "fps_den", "Frame per second denominator") + yield UInt24(parent, "aspect_ratio_num", "Aspect ratio numerator") + yield UInt24(parent, "aspect_ratio_den", "Aspect ratio denominator") + + yield UInt8(parent, "color_space") + yield UInt24(parent, "target_bitrate") + yield Bits(parent, "quality", 6) + yield Bits(parent, "gp_shift", 5) + yield Enum(Bits(parent, "pixel_format", 2), PIXEL_FORMATS) + yield Bits(parent, "spare_config", 3) + +def parseVorbisHeader(parent): + yield UInt32(parent, "vorbis_version") + yield UInt8(parent, "audio_channels") + yield UInt32(parent, "audio_sample_rate") + yield UInt32(parent, "bitrate_maximum") + yield UInt32(parent, "bitrate_nominal") + yield UInt32(parent, "bitrate_minimum") + yield Bits(parent, "blocksize_0", 4) + yield Bits(parent, "blocksize_1", 4) + yield UInt8(parent, "framing_flag") + +class Chunk(FieldSet): + tag_info = { + "vorbis": { + 3: ("comment", parseVorbisComment), + 1: ("vorbis_hdr", parseVorbisHeader), + }, "theora": { + 128: ("theora_hdr", parseTheoraHeader), + 129: ("comment", parseVorbisComment), + }, "video\0": { + 1: ("video_hdr", parseVideoHeader), + }, + } + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + if 7*8 <= self.size: + try: + self._name, self.parser = self.tag_info[self["codec"].value][self["type"].value] + if self._name == "theora_hdr": + self.endian = BIG_ENDIAN + except KeyError: + self.parser = None + else: + self.parser = None + + def createFields(self): + if 7*8 <= self.size: + yield UInt8(self, 'type') + yield String(self, 'codec', 6) + if self.parser: + for field in self.parser(self): + yield field + else: + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "raw", size) + +class Packets: + def __init__(self, first): + self.first = first + + def __iter__(self): + fragment = self.first + size = None + while fragment is not None: + page = fragment.parent + continued_packet = page["continued_packet"].value + for segment_size in page.segment_size: + if continued_packet: + size += segment_size + continued_packet = False + else: + if size: + yield size * 8 + size = segment_size + fragment = fragment.next + if size: + yield size * 8 + +class Segments(Fragment): + def __init__(self, parent, *args, **kw): + Fragment.__init__(self, parent, *args, **kw) + if parent['last_page'].value: + next = None + else: + next = self.createNext + self.setLinks(parent.parent.streams.setdefault(parent['serial'].value, self), next) + + def _createInputStream(self, **args): + if self.first is self: + return FragmentedStream(self, packets=Packets(self), tags=[("id","ogg_stream")], **args) + return Fragment._createInputStream(self, **args) + + def _getData(self): + return self + + def createNext(self): + parent = self.parent + index = parent.index + parent = parent.parent + first = self.first + try: + while True: + index += 1 + next = parent[index][self.name] + if next.first is first: + return next + except MissingField: + pass + + def createFields(self): + for segment_size in self.parent.segment_size: + if segment_size: + yield Chunk(self, "chunk[]", size=segment_size*8) + +class OggPage(FieldSet): + MAGIC = "OggS" + + def __init__(self, *args): + FieldSet.__init__(self, *args) + size = 27 + self.lacing_size = self['lacing_size'].value + if self.lacing_size: + size += self.lacing_size + lacing = self['lacing'] + self.segment_size = [ field.value for field in lacing ] + size += sum(self.segment_size) + self._size = size * 8 + + def createFields(self): + yield String(self, 'capture_pattern', 4, charset="ASCII") + if self['capture_pattern'].value != self.MAGIC: + self.warning('Invalid signature. An Ogg page must start with "%s".' % self.MAGIC) + yield UInt8(self, 'stream_structure_version') + yield Bit(self, 'continued_packet') + yield Bit(self, 'first_page') + yield Bit(self, 'last_page') + yield NullBits(self, 'unused', 5) + yield UInt64(self, 'abs_granule_pos') + yield textHandler(UInt32(self, 'serial'), hexadecimal) + yield UInt32(self, 'page') + yield textHandler(UInt32(self, 'checksum'), hexadecimal) + yield UInt8(self, 'lacing_size') + if self.lacing_size: + yield Lacing(self, "lacing", size=self.lacing_size*8) + yield Segments(self, "segments", size=self._size-self._current_size) + + def validate(self): + if self['capture_pattern'].value != self.MAGIC: + return "Wrong signature" + if self['stream_structure_version'].value != 0: + return "Unknown structure version (%s)" % self['stream_structure_version'].value + return "" + +class OggFile(Parser): + PARSER_TAGS = { + "id": "ogg", + "category": "container", + "file_ext": ("ogg", "ogm"), + "mime": ( + u"application/ogg", u"application/x-ogg", + u"audio/ogg", u"audio/x-ogg", + u"video/ogg", u"video/x-ogg", + u"video/theora", u"video/x-theora", + ), + "magic": ((OggPage.MAGIC, 0),), + "subfile": "skip", + "min_size": 28*8, + "description": "Ogg multimedia container" + } + endian = LITTLE_ENDIAN + + def validate(self): + magic = OggPage.MAGIC + if self.stream.readBytes(0, len(magic)) != magic: + return "Invalid magic string" + # Validate first 3 pages + for index in xrange(3): + try: + page = self[index] + except MissingField: + if self.done: + return True + return "Unable to get page #%u" % index + except (InputStreamError, ParserError): + return "Unable to create page #%u" % index + err = page.validate() + if err: + return "Invalid page #%s: %s" % (index, err) + return True + + def createMimeType(self): + if "theora_hdr" in self["page[0]/segments"]: + return u"video/theora" + elif "vorbis_hdr" in self["page[0]/segments"]: + return u"audio/vorbis" + else: + return u"application/ogg" + + def createDescription(self): + if "theora_hdr" in self["page[0]"]: + return u"Ogg/Theora video" + elif "vorbis_hdr" in self["page[0]"]: + return u"Ogg/Vorbis audio" + else: + return u"Ogg multimedia container" + + def createFields(self): + self.streams = {} + while not self.eof: + yield OggPage(self, "page[]") + + def createLastPage(self): + start = self[0].size + end = MAX_FILESIZE * 8 + if True: + # FIXME: This doesn't work on all files (eg. some Ogg/Theora) + offset = self.stream.searchBytes("OggS\0\5", start, end) + if offset is None: + offset = self.stream.searchBytes("OggS\0\4", start, end) + if offset is None: + return None + return createOrphanField(self, offset, OggPage, "page") + else: + # Very slow version + page = None + while True: + offset = self.stream.searchBytes("OggS\0", start, end) + if offset is None: + break + page = createOrphanField(self, offset, OggPage, "page") + start += page.size + return page + + def createContentSize(self): + page = self.createLastPage() + if page: + return page.absolute_address + page.size + else: + return None + + +class OggStream(Parser): + PARSER_TAGS = { + "id": "ogg_stream", + "category": "container", + "subfile": "skip", + "min_size": 7*8, + "description": "Ogg logical stream" + } + endian = LITTLE_ENDIAN + + def validate(self): + return False + + def createFields(self): + for size in self.stream.packets: + yield RawBytes(self, "packet[]", size//8) diff --git a/libs/hachoir_parser/container/realmedia.py b/libs/hachoir_parser/container/realmedia.py new file mode 100644 index 0000000..45c8173 --- /dev/null +++ b/libs/hachoir_parser/container/realmedia.py @@ -0,0 +1,172 @@ +""" +RealMedia (.rm) parser + +Author: Mike Melanson +Creation date: 15 december 2006 + +References: +- http://wiki.multimedia.cx/index.php?title=RealMedia +- Appendix E: RealMedia File Format (RMFF) Reference + https://common.helixcommunity.org/nonav/2003/HCS_SDK_r5/htmfiles/rmff.htm + +Samples: +- http://samples.mplayerhq.hu/real/ +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt16, UInt32, Bit, RawBits, + RawBytes, String, PascalString8, PascalString16) +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.endian import BIG_ENDIAN + +def parseHeader(self): + yield UInt32(self, "filever", "File version") + yield UInt32(self, "numheaders", "number of headers") + +def parseFileProperties(self): + yield UInt32(self, "max_bit_rate", "Maximum bit rate") + yield UInt32(self, "avg_bit_rate", "Average bit rate") + yield UInt32(self, "max_pkt_size", "Size of largest data packet") + yield UInt32(self, "avg_pkt_size", "Size of average data packet") + yield UInt32(self, "num_pkts", "Number of data packets") + yield UInt32(self, "duration", "File duration in milliseconds") + yield UInt32(self, "preroll", "Suggested preroll in milliseconds") + yield textHandler(UInt32(self, "index_offset", "Absolute offset of first index chunk"), hexadecimal) + yield textHandler(UInt32(self, "data_offset", "Absolute offset of first data chunk"), hexadecimal) + yield UInt16(self, "stream_count", "Number of streams in the file") + yield RawBits(self, "reserved", 13) + yield Bit(self, "is_live", "Whether file is a live broadcast") + yield Bit(self, "is_perfect_play", "Whether PerfectPlay can be used") + yield Bit(self, "is_saveable", "Whether file can be saved") + +def parseContentDescription(self): + yield PascalString16(self, "title", charset="ISO-8859-1", strip=" \0") + yield PascalString16(self, "author", charset="ISO-8859-1", strip=" \0") + yield PascalString16(self, "copyright", charset="ISO-8859-1", strip=" \0") + yield PascalString16(self, "comment", charset="ISO-8859-1", strip=" \0") + + +class NameValueProperty(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + + def createFields(self): + yield UInt32(self, "size") + yield UInt16(self, "obj_version") + yield PascalString8(self, "name", charset="ASCII") + yield UInt32(self, "type") + yield PascalString16(self, "value", charset="ISO-8859-1", strip=" \0") + +class LogicalFileInfo(FieldSet): + def createFields(self): + yield UInt32(self, "size") + yield UInt16(self, "obj_version") + yield UInt16(self, "nb_physical_stream") + for index in xrange(self["nb_physical_stream"].value): + yield UInt16(self, "physical_stream[]") + for index in xrange(self["nb_physical_stream"].value): + yield UInt16(self, "data_offset[]") + yield UInt16(self, "nb_rule") + for index in xrange(self["nb_rule"].value): + yield UInt16(self, "rule[]") + yield UInt16(self, "nb_prop") + for index in xrange(self["nb_prop"].value): + yield NameValueProperty(self, "prop[]") + +def parseMediaPropertiesHeader(self): + yield UInt16(self, "stream_number", "Stream number") + yield UInt32(self, "max_bit_rate", "Maximum bit rate") + yield UInt32(self, "avg_bit_rate", "Average bit rate") + yield UInt32(self, "max_pkt_size", "Size of largest data packet") + yield UInt32(self, "avg_pkt_size", "Size of average data packet") + yield UInt32(self, "stream_start", "Stream start offset in milliseconds") + yield UInt32(self, "preroll", "Preroll in milliseconds") + yield UInt32(self, "duration", "Stream duration in milliseconds") + yield PascalString8(self, "desc", "Stream description", charset="ISO-8859-1") + yield PascalString8(self, "mime_type", "MIME type string", charset="ASCII") + yield UInt32(self, "specific_size", "Size of type-specific data") + size = self['specific_size'].value + if size: + if self["mime_type"].value == "logical-fileinfo": + yield LogicalFileInfo(self, "file_info", size=size*8) + else: + yield RawBytes(self, "specific", size, "Type-specific data") + +class Chunk(FieldSet): + tag_info = { + ".RMF": ("header", parseHeader), + "PROP": ("file_prop", parseFileProperties), + "CONT": ("content_desc", parseContentDescription), + "MDPR": ("stream_prop[]", parseMediaPropertiesHeader), + "DATA": ("data[]", None), + "INDX": ("file_index[]", None) + } + + def createValueFunc(self): + return self.value_func(self) + + def __init__(self, parent, name, description=None): + FieldSet.__init__(self, parent, name, description) + self._size = (self["size"].value) * 8 + tag = self["tag"].value + if tag in self.tag_info: + self._name, self.parse_func = self.tag_info[tag] + else: + self._description = "" + self.parse_func = None + + def createFields(self): + yield String(self, "tag", 4, "Chunk FourCC", charset="ASCII") + yield UInt32(self, "size", "Chunk Size") + yield UInt16(self, "version", "Chunk Version") + + if self.parse_func: + for field in self.parse_func(self): + yield field + else: + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "raw", size) + + def createDescription(self): + return "Chunk: %s" % self["tag"].display + +class RealMediaFile(Parser): + MAGIC = '.RMF\0\0\0\x12\0\1' # (magic, size=18, version=1) + PARSER_TAGS = { + "id": "real_media", + "category": "container", + "file_ext": ("rm",), + "mime": ( + u"video/x-pn-realvideo", + u"audio/x-pn-realaudio", + u"audio/x-pn-realaudio-plugin", + u"audio/x-real-audio", + u"application/vnd.rn-realmedia"), + "min_size": len(MAGIC)*8, # just the identifier + "magic": ((MAGIC, 0),), + "description": u"RealMedia (rm) Container File", + } + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != '.RMF': + return "Invalid magic" + if self["header/size"].value != 18: + return "Invalid header size" + if self["header/version"].value not in (0, 1): + return "Unknown file format version (%s)" % self["header/version"].value + return True + + def createFields(self): + while not self.eof: + yield Chunk(self, "chunk") + + def createMimeType(self): + for prop in self.array("stream_prop"): + if prop["mime_type"].value == "video/x-pn-realvideo": + return u"video/x-pn-realvideo" + return u"audio/x-pn-realaudio" + diff --git a/libs/hachoir_parser/container/riff.py b/libs/hachoir_parser/container/riff.py new file mode 100644 index 0000000..a5e4fc0 --- /dev/null +++ b/libs/hachoir_parser/container/riff.py @@ -0,0 +1,439 @@ +# -*- coding: UTF-8 -*- + +""" +RIFF parser, able to parse: + * AVI video container + * WAV audio container + * CDA file + +Documents: +- libavformat source code from ffmpeg library + http://ffmpeg.mplayerhq.hu/ +- Video for Windows Programmer's Guide + http://www.opennet.ru/docs/formats/avi.txt +- What is an animated cursor? + http://www.gdgsoft.com/anituner/help/aniformat.htm + +Authors: + * Aurélien Jacobs + * Mickaël KENIKSSI + * Victor Stinner +Changelog: + * 2007-03-30: support ACON (animated icons) + * 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser + * 2006-08-03: creation of CDA parser by Mickaël KENIKSSI + * 2005-06-21: creation of WAV parser by Victor Stinner + * 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs +Thanks to: + * Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file + format information +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, Enum, + Bit, NullBits, NullBytes, + RawBytes, String, PaddingBytes, + SubFile) +from hachoir_core.tools import alignValue, humanDuration +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import filesizeHandler, textHandler +from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name +from hachoir_parser.image.ico import IcoFile +from datetime import timedelta + +def parseText(self): + yield String(self, "text", self["size"].value, + strip=" \0", truncate="\0", + charset="ISO-8859-1") + +def parseRawFormat(self, size): + yield RawBytes(self, "raw_format", size) + +def parseVideoFormat(self, size): + yield UInt32(self, "video_size", "Video format: Size") + yield UInt32(self, "width", "Video format: Width") + yield UInt32(self, "height", "Video format: Height") + yield UInt16(self, "panes", "Video format: Panes") + yield UInt16(self, "depth", "Video format: Depth") + yield UInt32(self, "tag1", "Video format: Tag1") + yield UInt32(self, "img_size", "Video format: Image size") + yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter") + yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter") + yield UInt32(self, "clr_used", "Video format: ClrUsed") + yield UInt32(self, "clr_important", "Video format: ClrImportant") + +def parseAudioFormat(self, size): + yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name) + yield UInt16(self, "channel", "Audio format: Channels") + yield UInt32(self, "sample_rate", "Audio format: Sample rate") + yield UInt32(self, "bit_rate", "Audio format: Bit rate") + yield UInt16(self, "block_align", "Audio format: Block align") + if size >= 16: + yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample") + if size >= 18: + yield UInt16(self, "ext_size", "Audio format: Size of extra information") + if size >= 28: # and self["a_channel"].value > 2 + yield UInt16(self, "reserved", "Audio format: ") + yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask") + yield UInt32(self, "subformat", "Audio format: Subformat id") + +def parseAVIStreamFormat(self): + size = self["size"].value + strtype = self["../stream_hdr/stream_type"].value + TYPE_HANDLER = { + "vids": (parseVideoFormat, 40), + "auds": (parseAudioFormat, 16) + } + handler = parseRawFormat + if strtype in TYPE_HANDLER: + info = TYPE_HANDLER[strtype] + if info[1] <= size: + handler = info[0] + for field in handler(self, size): + yield field + +def parseAVIStreamHeader(self): + if self["size"].value != 56: + raise ParserError("Invalid stream header size") + yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII") + field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII") + if self["stream_type"].value == "vids": + yield Enum(field, video_fourcc_name, lambda text: text.upper()) + else: + yield field + yield UInt32(self, "flags", "Stream flags") + yield UInt16(self, "priority", "Stream priority") + yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0") + yield UInt32(self, "init_frames", "InitialFrames") + yield UInt32(self, "scale", "Time scale") + yield UInt32(self, "rate", "Divide by scale to give frame rate") + yield UInt32(self, "start", "Stream start time (unit: rate/scale)") + yield UInt32(self, "length", "Stream length (unit: rate/scale)") + yield UInt32(self, "buf_size", "Suggested buffer size") + yield UInt32(self, "quality", "Stream quality") + yield UInt32(self, "sample_size", "Size of samples") + yield UInt16(self, "left", "Destination rectangle (left)") + yield UInt16(self, "top", "Destination rectangle (top)") + yield UInt16(self, "right", "Destination rectangle (right)") + yield UInt16(self, "bottom", "Destination rectangle (bottom)") + +class RedBook(FieldSet): + """ + RedBook offset parser, used in CD audio (.cda) file + """ + def createFields(self): + yield UInt8(self, "frame") + yield UInt8(self, "second") + yield UInt8(self, "minute") + yield PaddingBytes(self, "notused", 1) + +def formatSerialNumber(field): + """ + Format an disc serial number. + Eg. 0x00085C48 => "0008-5C48" + """ + sn = field.value + return "%04X-%04X" % (sn >> 16, sn & 0xFFFF) + +def parseCDDA(self): + """ + HSG address format: number of 1/75 second + + HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset) + HSG length = (minute*60 + second)*75 + frame (from RB length) + """ + yield UInt16(self, "cda_version", "CD file version (currently 1)") + yield UInt16(self, "track_no", "Number of track") + yield textHandler(UInt32(self, "disc_serial", "Disc serial number"), + formatSerialNumber) + yield UInt32(self, "hsg_offset", "Track offset (HSG format)") + yield UInt32(self, "hsg_length", "Track length (HSG format)") + yield RedBook(self, "rb_offset", "Track offset (Red-book format)") + yield RedBook(self, "rb_length", "Track length (Red-book format)") + +def parseWAVFormat(self): + size = self["size"].value + if size not in (16, 18): + self.warning("Format with size of %s bytes is not supported!" % size) + yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name) + yield UInt16(self, "nb_channel", "Number of audio channel") + yield UInt32(self, "sample_per_sec", "Sample per second") + yield UInt32(self, "byte_per_sec", "Average byte per second") + yield UInt16(self, "block_align", "Block align") + yield UInt16(self, "bit_per_sample", "Bits per sample") + +def parseWAVFact(self): + yield UInt32(self, "nb_sample", "Number of samples in audio stream") + +def parseAviHeader(self): + yield UInt32(self, "microsec_per_frame", "Microsecond per frame") + yield UInt32(self, "max_byte_per_sec", "Maximum byte per second") + yield NullBytes(self, "reserved", 4) + + # Flags + yield NullBits(self, "reserved[]", 4) + yield Bit(self, "has_index") + yield Bit(self, "must_use_index") + yield NullBits(self, "reserved[]", 2) + yield Bit(self, "is_interleaved") + yield NullBits(self, "reserved[]", 2) + yield Bit(self, "trust_cktype") + yield NullBits(self, "reserved[]", 4) + yield Bit(self, "was_capture_file") + yield Bit(self, "is_copyrighted") + yield NullBits(self, "reserved[]", 14) + + yield UInt32(self, "total_frame", "Total number of frames in the video") + yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)") + yield UInt32(self, "nb_stream", "Number of streams") + yield UInt32(self, "sug_buf_size", "Suggested buffer size") + yield UInt32(self, "width", "Width in pixel") + yield UInt32(self, "height", "Height in pixel") + yield UInt32(self, "scale") + yield UInt32(self, "rate") + yield UInt32(self, "start") + yield UInt32(self, "length") + +def parseODML(self): + yield UInt32(self, "total_frame", "Real number of frame of OpenDML video") + padding = self["size"].value - 4 + if 0 < padding: + yield NullBytes(self, "padding[]", padding) + +class AVIIndexEntry(FieldSet): + size = 16*8 + def createFields(self): + yield String(self, "tag", 4, "Tag", charset="ASCII") + yield UInt32(self, "flags") + yield UInt32(self, "start", "Offset from start of movie data") + yield UInt32(self, "length") + +def parseIndex(self): + while not self.eof: + yield AVIIndexEntry(self, "index[]") + +class Chunk(FieldSet): + TAG_INFO = { + # This dictionnary is edited by RiffFile.validate() + + "LIST": ("list[]", None, "Sub-field list"), + "JUNK": ("junk[]", None, "Junk (padding)"), + + # Metadata + "INAM": ("title", parseText, "Document title"), + "IART": ("artist", parseText, "Artist"), + "ICMT": ("comment", parseText, "Comment"), + "ICOP": ("copyright", parseText, "Copyright"), + "IENG": ("author", parseText, "Author"), + "ICRD": ("creation_date", parseText, "Creation date"), + "ISFT": ("producer", parseText, "Producer"), + "IDIT": ("datetime", parseText, "Date time"), + + # TODO: Todo: see below + # "strn": Stream description + # TWOCC code, movie/field[]/tag.value[2:4]: + # "db": "Uncompressed video frame", + # "dc": "Compressed video frame", + # "wb": "Audio data", + # "pc": "Palette change" + } + + subtag_info = { + "INFO": ("info", "File informations"), + "hdrl": ("headers", "Headers"), + "strl": ("stream[]", "Stream header list"), + "movi": ("movie", "Movie stream"), + "odml": ("odml", "ODML"), + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = (8 + alignValue(self["size"].value, 2)) * 8 + tag = self["tag"].value + if tag in self.TAG_INFO: + self.tag_info = self.TAG_INFO[tag] + if tag == "LIST": + subtag = self["subtag"].value + if subtag in self.subtag_info: + info = self.subtag_info[subtag] + self.tag_info = (info[0], None, info[1]) + self._name = self.tag_info[0] + self._description = self.tag_info[2] + else: + self.tag_info = ("field[]", None, None) + + def createFields(self): + yield String(self, "tag", 4, "Tag", charset="ASCII") + yield filesizeHandler(UInt32(self, "size", "Size")) + if not self["size"].value: + return + if self["tag"].value == "LIST": + yield String(self, "subtag", 4, "Sub-tag", charset="ASCII") + handler = self.tag_info[1] + while 8 < (self.size - self.current_size)/8: + field = self.__class__(self, "field[]") + yield field + if (field.size/8) % 2 != 0: + yield UInt8(self, "padding[]", "Padding") + else: + handler = self.tag_info[1] + if handler: + for field in handler(self): + yield field + else: + yield RawBytes(self, "raw_content", self["size"].value) + padding = self.seekBit(self._size) + if padding: + yield padding + + def createDescription(self): + tag = self["tag"].display + return u"Chunk (tag %s)" % tag + +class ChunkAVI(Chunk): + TAG_INFO = Chunk.TAG_INFO.copy() + TAG_INFO.update({ + "strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"), + "strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"), + "avih": ("avi_hdr", parseAviHeader, "AVI header"), + "idx1": ("index", parseIndex, "Stream index"), + "dmlh": ("odml_hdr", parseODML, "ODML header"), + }) + +class ChunkCDDA(Chunk): + TAG_INFO = Chunk.TAG_INFO.copy() + TAG_INFO.update({ + 'fmt ': ("cdda", parseCDDA, "CD audio informations"), + }) + +class ChunkWAVE(Chunk): + TAG_INFO = Chunk.TAG_INFO.copy() + TAG_INFO.update({ + 'fmt ': ("format", parseWAVFormat, "Audio format"), + 'fact': ("nb_sample", parseWAVFact, "Number of samples"), + 'data': ("audio_data", None, "Audio stream data"), + }) + +def parseAnimationHeader(self): + yield UInt32(self, "hdr_size", "Size of header (36 bytes)") + if self["hdr_size"].value != 36: + self.warning("Animation header with unknown size (%s)" % self["size"].value) + yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor") + yield UInt32(self, "nb_step", "Number of Blits before the animation cycles") + yield UInt32(self, "cx") + yield UInt32(self, "cy") + yield UInt32(self, "bit_count") + yield UInt32(self, "planes") + yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present") + yield Bit(self, "is_icon") + yield NullBits(self, "padding", 31) + +def parseAnimationSequence(self): + while not self.eof: + yield UInt32(self, "icon[]") + +def formatJiffie(field): + sec = float(field.value) / 60 + return humanDuration(timedelta(seconds=sec)) + +def parseAnimationRate(self): + while not self.eof: + yield textHandler(UInt32(self, "rate[]"), formatJiffie) + +def parseIcon(self): + yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile) + +class ChunkACON(Chunk): + TAG_INFO = Chunk.TAG_INFO.copy() + TAG_INFO.update({ + 'anih': ("anim_hdr", parseAnimationHeader, "Animation header"), + 'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"), + 'rate': ("anim_rate", parseAnimationRate, "Animation sequence"), + 'icon': ("icon[]", parseIcon, "Icon"), + }) + +class RiffFile(Parser): + PARSER_TAGS = { + "id": "riff", + "category": "container", + "file_ext": ("avi", "cda", "wav", "ani"), + "min_size": 16*8, + "mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"), + # FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )" + "magic": ( + ("AVI LIST", 8*8), + ("WAVEfmt ", 8*8), + ("CDDAfmt ", 8*8), + ("ACONanih", 8*8), + ), + "description": "Microsoft RIFF container" + } + VALID_TYPES = { + "WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"), + "CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"), + "AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"), + "ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"), + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != "RIFF": + return "Wrong signature" + if self["type"].value not in self.VALID_TYPES: + return "Unknown RIFF content type" + return True + + def createFields(self): + yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII") + yield filesizeHandler(UInt32(self, "filesize", "File size")) + yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII") + + # Choose chunk type depending on file type + try: + chunk_cls = self.VALID_TYPES[self["type"].value][0] + except KeyError: + chunk_cls = Chunk + + # Parse all chunks up to filesize + while self.current_size < self["filesize"].value*8+8: + yield chunk_cls(self, "chunk[]") + if not self.eof: + yield RawBytes(self, "padding[]", (self.size-self.current_size)/8) + + def createMimeType(self): + try: + return self.VALID_TYPES[self["type"].value][1] + except KeyError: + return None + + def createDescription(self): + tag = self["type"].value + if tag == "AVI ": + desc = u"Microsoft AVI video" + if "headers/avi_hdr" in self: + header = self["headers/avi_hdr"] + desc += ": %ux%u pixels" % (header["width"].value, header["height"].value) + microsec = header["microsec_per_frame"].value + if microsec: + desc += ", %.1f fps" % (1000000.0 / microsec) + if "total_frame" in header and header["total_frame"].value: + delta = timedelta(seconds=float(header["total_frame"].value) * microsec) + desc += ", " + humanDuration(delta) + return desc + else: + try: + return self.VALID_TYPES[tag][2] + except KeyError: + return u"Microsoft RIFF container" + + def createContentSize(self): + size = (self["filesize"].value + 8) * 8 + return min(size, self.stream.size) + + def createFilenameSuffix(self): + try: + return self.VALID_TYPES[self["type"].value][3] + except KeyError: + return ".riff" + diff --git a/libs/hachoir_parser/container/swf.py b/libs/hachoir_parser/container/swf.py new file mode 100644 index 0000000..942e3d9 --- /dev/null +++ b/libs/hachoir_parser/container/swf.py @@ -0,0 +1,433 @@ +""" +SWF (Macromedia/Adobe Flash) file parser. + +Documentation: + + - Alexis' SWF Reference: + http://www.m2osw.com/swf_alexref.html + - http://www.half-serious.com/swf/format/ + - http://www.anotherbigidea.com/javaswf/ + - http://www.gnu.org/software/gnash/ + +Author: Victor Stinner +Creation date: 29 october 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Bit, Bits, UInt8, UInt16, Int32, UInt32, Int64, CString, Enum, + Bytes, RawBytes, NullBits, String, SubFile) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.text_handler import textHandler, filesizeHandler +from hachoir_core.tools import paddingSize, humanFrequency +from hachoir_parser.image.common import RGB +from hachoir_parser.image.jpeg import JpegChunk, JpegFile +from hachoir_core.stream import StringInputStream, ConcatStream +from hachoir_parser.common.deflate import Deflate, has_deflate +from hachoir_parser.container.action_script import parseActionScript, parseABC +import math + +# Maximum file size (50 MB) +MAX_FILE_SIZE = 50 * 1024 * 1024 + +TWIPS = 20 + +class RECT(FieldSet): + endian = BIG_ENDIAN + def createFields(self): + yield Bits(self, "nbits", 5) + nbits = self["nbits"].value + if not nbits: + raise ParserError("SWF parser: Invalid RECT field size (0)") + yield Bits(self, "xmin", nbits, "X minimum in twips") + yield Bits(self, "xmax", nbits, "X maximum in twips") + yield Bits(self, "ymin", nbits, "Y minimum in twips") + yield Bits(self, "ymax", nbits, "Y maximum in twips") + size = paddingSize(self.current_size, 8) + if size: + yield NullBits(self, "padding", size) + + def getWidth(self): + return math.ceil(float(self["xmax"].value) / TWIPS) + def getHeight(self): + return math.ceil(float(self["ymax"].value) / TWIPS) + + def createDescription(self): + return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight()) + +class FixedFloat16(FieldSet): + def createFields(self): + yield UInt8(self, "float_part") + yield UInt8(self, "int_part") + + def createValue(self): + return self["int_part"].value + float(self["float_part"].value) / 256 + +def parseBackgroundColor(parent, size): + yield RGB(parent, "color") + +def bit2hertz(field): + return humanFrequency(5512.5 * (2 ** field.value)) + +SOUND_CODEC_MP3 = 2 +SOUND_CODEC = { + 0: "RAW", + 1: "ADPCM", + SOUND_CODEC_MP3: "MP3", + 3: "Uncompressed", + 6: "Nellymoser", +} + +class SoundEnvelope(FieldSet): + def createFields(self): + yield UInt8(self, "count") + for index in xrange(self["count"].value): + yield UInt32(self, "mark44[]") + yield UInt16(self, "level0[]") + yield UInt16(self, "level1[]") + +def parseSoundBlock(parent, size): + # TODO: Be able to get codec... Need to know last sound "def_sound[]" field +# if not (...)sound_header: +# raise ParserError("Sound block without header") + if True: #sound_header == SOUND_CODEC_MP3: + yield UInt16(parent, "samples") + yield UInt16(parent, "left") + size = (parent.size - parent.current_size) // 8 + if size: + yield RawBytes(parent, "music_data", size) + +def parseStartSound(parent, size): + yield UInt16(parent, "sound_id") + yield Bit(parent, "has_in_point") + yield Bit(parent, "has_out_point") + yield Bit(parent, "has_loops") + yield Bit(parent, "has_envelope") + yield Bit(parent, "no_multiple") + yield Bit(parent, "stop_playback") + yield NullBits(parent, "reserved", 2) + + if parent["has_in_point"].value: + yield UInt32(parent, "in_point") + if parent["has_out_point"].value: + yield UInt32(parent, "out_point") + if parent["has_loops"].value: + yield UInt16(parent, "loop_count") + if parent["has_envelope"].value: + yield SoundEnvelope(parent, "envelope") + +def parseDefineSound(parent, size): + yield UInt16(parent, "sound_id") + + yield Bit(parent, "is_stereo") + yield Bit(parent, "is_16bit") + yield textHandler(Bits(parent, "rate", 2), bit2hertz) + yield Enum(Bits(parent, "codec", 4), SOUND_CODEC) + + yield UInt32(parent, "sample_count") + + if parent["codec"].value == SOUND_CODEC_MP3: + yield UInt16(parent, "len") + + size = (parent.size - parent.current_size) // 8 + if size: + yield RawBytes(parent, "music_data", size) + +def parseSoundHeader(parent, size): + yield Bit(parent, "playback_is_stereo") + yield Bit(parent, "playback_is_16bit") + yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz) + yield NullBits(parent, "reserved", 4) + + yield Bit(parent, "sound_is_stereo") + yield Bit(parent, "sound_is_16bit") + yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz) + yield Enum(Bits(parent, "codec", 4), SOUND_CODEC) + + yield UInt16(parent, "sample_count") + + if parent["codec"].value == 2: + yield UInt16(parent, "latency_seek") + +class JpegHeader(FieldSet): + endian = BIG_ENDIAN + def createFields(self): + count = 1 + while True: + chunk = JpegChunk(self, "jpeg_chunk[]") + yield chunk + if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI): + break + count += 1 + +def parseJpeg(parent, size): + yield UInt16(parent, "char_id", "Character identifier") + size -= 2 + + code = parent["code"].value + if code != Tag.TAG_BITS: + if code == Tag.TAG_BITS_JPEG3: + yield UInt32(parent, "alpha_offset", "Character identifier") + size -= 4 + + addr = parent.absolute_address + parent.current_size + 16 + if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"): + header = JpegHeader(parent, "jpeg_header") + yield header + hdr_size = header.size // 8 + size -= hdr_size + else: + hdr_size = 0 + + if code == Tag.TAG_BITS_JPEG3: + img_size = parent["alpha_offset"].value - hdr_size + else: + img_size = size + else: + img_size = size + yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile) + if code == Tag.TAG_BITS_JPEG3: + size = (parent.size - parent.current_size) // 8 + yield RawBytes(parent, "alpha", size, "Image data") + +def parseVideoFrame(parent, size): + yield UInt16(parent, "stream_id") + yield UInt16(parent, "frame_num") + if 4 < size: + yield RawBytes(parent, "video_data", size-4) + +class Export(FieldSet): + def createFields(self): + yield UInt16(self, "object_id") + yield CString(self, "name") + +def parseExport(parent, size): + yield UInt16(parent, "count") + for index in xrange(parent["count"].value): + yield Export(parent, "export[]") + +def parseProductInfo(parent, size): + yield Int32(parent, "product_id") + yield Int32(parent, "edition") + yield UInt8(parent, "major_version") + yield UInt8(parent, "minor_version") + yield Int64(parent, "build_number") + yield Int64(parent, "compilation_date") + +def parseScriptLimits(parent, size): + yield UInt16(parent, "max_recursion_limit") + yield UInt16(parent, "timeout_seconds", "Seconds of processing until the SWF is considered 'stuck'") + +def parseSymbolClass(parent, size): + yield UInt16(parent, "count") + for index in xrange(parent["count"].value): + yield UInt16(parent, "symbol_id[]") + yield CString(parent, "symbol_name[]") + +def parseBinaryData(parent, size): + yield UInt16(parent, "data_id") + yield UInt32(parent, "reserved") + if size > 6: + yield RawBytes(parent, "data", size-6) + +class Tag(FieldSet): + TAG_BITS = 6 + TAG_BITS_JPEG2 = 32 + TAG_BITS_JPEG3 = 35 + TAG_DO_ABC_DEFINE = 82 + TAG_INFO = { + # SWF version 1.0 + 0: ("end[]", "End", None), + 1: ("show_frame[]", "Show frame", None), + 2: ("def_shape[]", "Define shape", None), + 3: ("free_char[]", "Free character", None), + 4: ("place_obj[]", "Place object", None), + 5: ("remove_obj[]", "Remove object", None), + 6: ("def_bits[]", "Define bits", parseJpeg), + 7: ("def_but[]", "Define button", None), + 8: ("jpg_table", "JPEG tables", None), + 9: ("bkgd_color[]", "Set background color", parseBackgroundColor), + 10: ("def_font[]", "Define font", None), + 11: ("def_text[]", "Define text", None), + 12: ("action[]", "Action script", parseActionScript), + 13: ("def_font_info[]", "Define font info", None), + + # SWF version 2.0 + 14: ("def_sound[]", "Define sound", parseDefineSound), + 15: ("start_sound[]", "Start sound", parseStartSound), + 16: ("stop_sound[]", "Stop sound", None), + 17: ("def_but_sound[]", "Define button sound", None), + 18: ("sound_hdr", "Sound stream header", parseSoundHeader), + 19: ("sound_blk[]", "Sound stream block", parseSoundBlock), + 20: ("def_bits_lossless[]", "Define bits lossless", None), + 21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg), + 22: ("def_shape2[]", "Define shape 2", None), + 23: ("def_but_cxform[]", "Define button CXFORM", None), + 24: ("protect", "File is protected", None), + + # SWF version 3.0 + 25: ("path_are_ps[]", "Paths are Postscript", None), + 26: ("place_obj2[]", "Place object 2", None), + 28: ("remove_obj2[]", "Remove object 2", None), + 29: ("sync_frame[]", "Synchronize frame", None), + 31: ("free_all[]", "Free all", None), + 32: ("def_shape3[]", "Define shape 3", None), + 33: ("def_text2[]", "Define text 2", None), + 34: ("def_but2[]", "Define button2", None), + 35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg), + 36: ("def_bits_lossless2[]", "Define bits lossless 2", None), + 39: ("def_sprite[]", "Define sprite", None), + 40: ("name_character[]", "Name character", None), + 41: ("product_info", "Generator product info", parseProductInfo), + 42: ("generator_text[]", "Generator text", None), + 43: ("frame_label[]", "Frame label", None), + 45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader), + 46: ("def_morph_shape[]", "Define morph shape", None), + 47: ("gen_frame[]", "Generate frame", None), + 48: ("def_font2[]", "Define font 2", None), + 49: ("tpl_command[]", "Template command", None), + + # SWF version 4.0 + 37: ("def_text_field[]", "Define text field", None), + 38: ("def_quicktime_movie[]", "Define QuickTime movie", None), + + # SWF version 5.0 + 50: ("def_cmd_obj[]", "Define command object", None), + 51: ("flash_generator", "Flash generator", None), + 52: ("gen_ext_font[]", "Gen external font", None), + 56: ("export[]", "Export", parseExport), + 57: ("import[]", "Import", None), + 58: ("ebnable_debug", "Enable debug", None), + + # SWF version 6.0 + 59: ("do_init_action[]", "Do init action", None), + 60: ("video_str[]", "Video stream", None), + 61: ("video_frame[]", "Video frame", parseVideoFrame), + 62: ("def_font_info2[]", "Define font info 2", None), + 63: ("mx4[]", "MX4", None), + 64: ("enable_debug2", "Enable debugger 2", None), + + # SWF version 7.0 + 65: ("script_limits[]", "Script limits", parseScriptLimits), + 66: ("tab_index[]", "Set tab index", None), + + # SWF version 8.0 + 69: ("file_attr[]", "File attributes", None), + 70: ("place_obj3[]", "Place object 3", None), + 71: ("import2[]", "Import a definition list from another movie", None), + 73: ("def_font_align[]", "Define font alignment zones", None), + 74: ("csm_txt_set[]", "CSM text settings", None), + 75: ("def_font3[]", "Define font text 3", None), + 77: ("metadata[]", "XML code describing the movie", None), + 78: ("def_scale_grid[]", "Define scaling factors", None), + 83: ("def_shape4[]", "Define shape 4", None), + 84: ("def_morph2[]", "Define a morphing shape 2", None), + + # SWF version 9.0 + 72: ("do_abc[]", "SWF 9 ActionScript container; actions only", parseABC), + 76: ("symbol_class[]", "Instantiate objects from a set of classes", parseSymbolClass), + 82: ("do_abc_define[]", "SWF 9 ActionScript container; identifier, name, actions", parseABC), + 86: ("def_scene_frame[]", "Define raw data for scenes and frames", None), + 87: ("def_binary_data[]", "Defines a buffer of any size with any binary user data", parseBinaryData), + 88: ("def_font_name[]", "Define the legal font name and copyright", None), + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + size = self["length"].value + if self[0].name == "length_ext": + self._size = (6+size) * 8 + else: + self._size = (2+size) * 8 + code = self["code"].value + if code in self.TAG_INFO: + self._name, self._description, self.parser = self.TAG_INFO[code] + else: + self.parser = None + + def createFields(self): + if self.stream.readBits(self.absolute_address, 6, self.endian) == 63: + yield Bits(self, "length_ext", 6) + yield Bits(self, "code", 10) + yield filesizeHandler(UInt32(self, "length")) + else: + yield filesizeHandler(Bits(self, "length", 6)) + yield Bits(self, "code", 10) + size = self["length"].value + if 0 < size: + if self.parser: + for field in self.parser(self, size): + yield field + else: + yield RawBytes(self, "data", size) + + def createDescription(self): + return "Tag: %s (%s)" % (self["code"].display, self["length"].display) + +class SwfFile(Parser): + VALID_VERSIONS = set(xrange(1, 10+1)) + PARSER_TAGS = { + "id": "swf", + "category": "container", + "file_ext": ["swf"], + "mime": (u"application/x-shockwave-flash",), + "min_size": 64, + "description": u"Macromedia Flash data" + } + PARSER_TAGS["magic"] = [] + for version in VALID_VERSIONS: + PARSER_TAGS["magic"].append(("FWS%c" % version, 0)) + PARSER_TAGS["magic"].append(("CWS%c" % version, 0)) + endian = LITTLE_ENDIAN + SWF_SCALE_FACTOR = 1.0 / 20 + + def validate(self): + if self.stream.readBytes(0, 3) not in ("FWS", "CWS"): + return "Wrong file signature" + if self["version"].value not in self.VALID_VERSIONS: + return "Unknown version" + if MAX_FILE_SIZE < self["filesize"].value: + return "File too big (%u)" % self["filesize"].value + if self["signature"].value == "FWS": + if self["rect/padding"].value != 0: + return "Unknown rectangle padding value" + return True + + def createFields(self): + yield String(self, "signature", 3, "SWF format signature", charset="ASCII") + yield UInt8(self, "version") + yield filesizeHandler(UInt32(self, "filesize")) + if self["signature"].value != "CWS": + yield RECT(self, "rect") + yield FixedFloat16(self, "frame_rate") + yield UInt16(self, "frame_count") + + while not self.eof: + yield Tag(self, "tag[]") + else: + size = (self.size - self.current_size) // 8 + if has_deflate: + data = Deflate(Bytes(self, "compressed_data", size), False) + def createInputStream(cis, source=None, **args): + stream = cis(source=source) + header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5)) + args.setdefault("tags",[]).append(("class", SwfFile)) + return ConcatStream((header, stream), source=stream.source, **args) + data.setSubIStream(createInputStream) + yield data + else: + yield Bytes(self, "compressed_data", size) + + def createDescription(self): + desc = ["version %u" % self["version"].value] + if self["signature"].value == "CWS": + desc.append("compressed") + return u"Macromedia Flash data: %s" % (", ".join(desc)) + + def createContentSize(self): + if self["signature"].value == "FWS": + return self["filesize"].value * 8 + else: + # TODO: Size of compressed Flash? + return None + diff --git a/libs/hachoir_parser/file_system/__init__.py b/libs/hachoir_parser/file_system/__init__.py new file mode 100644 index 0000000..863aae3 --- /dev/null +++ b/libs/hachoir_parser/file_system/__init__.py @@ -0,0 +1,8 @@ +from hachoir_parser.file_system.ext2 import EXT2_FS +from hachoir_parser.file_system.fat import FAT12, FAT16, FAT32 +from hachoir_parser.file_system.mbr import MSDos_HardDrive +from hachoir_parser.file_system.ntfs import NTFS +from hachoir_parser.file_system.iso9660 import ISO9660 +from hachoir_parser.file_system.reiser_fs import REISER_FS +from hachoir_parser.file_system.linux_swap import LinuxSwapFile + diff --git a/libs/hachoir_parser/file_system/ext2.py b/libs/hachoir_parser/file_system/ext2.py new file mode 100644 index 0000000..634fe06 --- /dev/null +++ b/libs/hachoir_parser/file_system/ext2.py @@ -0,0 +1,464 @@ +""" +EXT2 (Linux) file system parser. + +Author: Victor Stinner + +Sources: +- EXT2FS source code + http://ext2fsd.sourceforge.net/ +- Analysis of the Ext2fs structure + http://www.nondot.org/sabre/os/files/FileSystems/ext2fs/ +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Bit, Bits, UInt8, UInt16, UInt32, + Enum, String, TimestampUnix32, RawBytes, NullBytes) +from hachoir_core.tools import (alignValue, + humanDuration, humanFilesize) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler +from itertools import izip + +class DirectoryEntry(FieldSet): + file_type = { + 1: "Regular", + 2: "Directory", + 3: "Char. dev.", + 4: "Block dev.", + 5: "Fifo", + 6: "Socket", + 7: "Symlink", + 8: "Max" + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["rec_len"].value * 8 + + def createFields(self): + yield UInt32(self, "inode", "Inode") + yield UInt16(self, "rec_len", "Record length") + yield UInt8(self, "name_len", "Name length") + yield Enum(UInt8(self, "file_type", "File type"), self.file_type) + yield String(self, "name", self["name_len"].value, "File name") + size = (self._size - self.current_size)//8 + if size: + yield NullBytes(self, "padding", size) + + def createDescription(self): + name = self["name"].value.strip("\0") + if name: + return "Directory entry: %s" % name + else: + return "Directory entry (empty)" + +class Inode(FieldSet): + inode_type_name = { + 1: "list of bad blocks", + 2: "Root directory", + 3: "ACL inode", + 4: "ACL inode", + 5: "Boot loader", + 6: "Undelete directory", + 8: "EXT3 journal" + } + file_type = { + 1: "Fifo", + 2: "Character device", + 4: "Directory", + 6: "Block device", + 8: "Regular", + 10: "Symbolic link", + 12: "Socket", + } + file_type_letter = { + 1: "p", + 4: "d", + 2: "c", + 6: "b", + 10: "l", + 12: "s", + } + static_size = (68 + 15*4)*8 + + def __init__(self, parent, name, index): + FieldSet.__init__(self, parent, name, None) + self.uniq_id = 1+index + + def createDescription(self): + desc = "Inode %s: " % self.uniq_id + size = self["size"].value + if self["blocks"].value == 0: + desc += "(unused)" + elif 11 <= self.uniq_id: + size = humanFilesize(size) + desc += "file, size=%s, mode=%s" % (size, self.getMode()) + else: + if self.uniq_id in self.inode_type_name: + desc += self.inode_type_name[self.uniq_id] + if self.uniq_id == 2: + desc += " (%s)" % self.getMode() + else: + desc += "special" + return desc + + def getMode(self): + names = ( + ("owner_read", "owner_write", "owner_exec"), + ("group_read", "group_write", "group_exec"), + ("other_read", "other_write", "other_exec")) + letters = "rwx" + mode = [ "-" for index in xrange(10) ] + index = 1 + for loop in xrange(3): + for name, letter in izip(names[loop], letters): + if self[name].value: + mode[index] = letter + index += 1 + file_type = self["file_type"].value + if file_type in self.file_type_letter: + mode[0] = self.file_type_letter[file_type] + return "".join(mode) + + def createFields(self): + # File mode + yield Bit(self, "other_exec") + yield Bit(self, "other_write") + yield Bit(self, "other_read") + yield Bit(self, "group_exec") + yield Bit(self, "group_write") + yield Bit(self, "group_read") + yield Bit(self, "owner_exec") + yield Bit(self, "owner_write") + yield Bit(self, "owner_read") + yield Bit(self, "sticky") + yield Bit(self, "setgid") + yield Bit(self, "setuid") + yield Enum(Bits(self, "file_type", 4), self.file_type) + + yield UInt16(self, "uid", "User ID") + yield UInt32(self, "size", "File size (in bytes)") + yield TimestampUnix32(self, "atime", "Last access time") + yield TimestampUnix32(self, "ctime", "Creation time") + yield TimestampUnix32(self, "mtime", "Last modification time") + yield TimestampUnix32(self, "dtime", "Delete time") + yield UInt16(self, "gid", "Group ID") + yield UInt16(self, "links_count", "Links count") + yield UInt32(self, "blocks", "Number of blocks") + yield UInt32(self, "flags", "Flags") + yield NullBytes(self, "reserved[]", 4, "Reserved") + for index in xrange(15): + yield UInt32(self, "block[]") + yield UInt32(self, "version", "Version") + yield UInt32(self, "file_acl", "File ACL") + yield UInt32(self, "dir_acl", "Directory ACL") + yield UInt32(self, "faddr", "Block where the fragment of the file resides") + + os = self["/superblock/creator_os"].value + if os == SuperBlock.OS_LINUX: + yield UInt8(self, "frag", "Number of fragments in the block") + yield UInt8(self, "fsize", "Fragment size") + yield UInt16(self, "padding", "Padding") + yield UInt16(self, "uid_high", "High 16 bits of user ID") + yield UInt16(self, "gid_high", "High 16 bits of group ID") + yield NullBytes(self, "reserved[]", 4, "Reserved") + elif os == SuperBlock.OS_HURD: + yield UInt8(self, "frag", "Number of fragments in the block") + yield UInt8(self, "fsize", "Fragment size") + yield UInt16(self, "mode_high", "High 16 bits of mode") + yield UInt16(self, "uid_high", "High 16 bits of user ID") + yield UInt16(self, "gid_high", "High 16 bits of group ID") + yield UInt32(self, "author", "Author ID (?)") + else: + yield RawBytes(self, "raw", 12, "Reserved") + +class Bitmap(FieldSet): + def __init__(self, parent, name, start, size, description, **kw): + description = "%s: %s items" % (description, size) + FieldSet.__init__(self, parent, name, description, size=size, **kw) + self.start = 1+start + + def createFields(self): + for index in xrange(self._size): + yield Bit(self, "item[]", "Item %s" % (self.start+index)) + +BlockBitmap = Bitmap +InodeBitmap = Bitmap + +class GroupDescriptor(FieldSet): + static_size = 32*8 + + def __init__(self, parent, name, index): + FieldSet.__init__(self, parent, name) + self.uniq_id = index + + def createDescription(self): + blocks_per_group = self["/superblock/blocks_per_group"].value + start = self.uniq_id * blocks_per_group + end = start + blocks_per_group + return "Group descriptor: blocks %s-%s" % (start, end) + + def createFields(self): + yield UInt32(self, "block_bitmap", "Points to the blocks bitmap block") + yield UInt32(self, "inode_bitmap", "Points to the inodes bitmap block") + yield UInt32(self, "inode_table", "Points to the inodes table first block") + yield UInt16(self, "free_blocks_count", "Number of free blocks") + yield UInt16(self, "free_inodes_count", "Number of free inodes") + yield UInt16(self, "used_dirs_count", "Number of inodes allocated to directories") + yield UInt16(self, "padding", "Padding") + yield NullBytes(self, "reserved", 12, "Reserved") + +class SuperBlock(FieldSet): + static_size = 433*8 + + OS_LINUX = 0 + OS_HURD = 1 + os_name = { + 0: "Linux", + 1: "Hurd", + 2: "Masix", + 3: "FreeBSD", + 4: "Lites", + 5: "WinNT" + } + state_desc = { + 1: "Valid (Unmounted cleanly)", + 2: "Error (Errors detected)", + 4: "Orphan FS (Orphans being recovered)", + } + error_handling_desc = { 1: "Continue" } + + def __init__(self, parent, name): + FieldSet.__init__(self, parent, name) + self._group_count = None + + def createDescription(self): + if self["feature_compat"].value & 4: + fstype = "ext3" + else: + fstype = "ext2" + return "Superblock: %s file system" % fstype + + def createFields(self): + yield UInt32(self, "inodes_count", "Inodes count") + yield UInt32(self, "blocks_count", "Blocks count") + yield UInt32(self, "r_blocks_count", "Reserved blocks count") + yield UInt32(self, "free_blocks_count", "Free blocks count") + yield UInt32(self, "free_inodes_count", "Free inodes count") + yield UInt32(self, "first_data_block", "First data block") + yield UInt32(self, "log_block_size", "Block size") + yield UInt32(self, "log_frag_size", "Fragment size") + yield UInt32(self, "blocks_per_group", "Blocks per group") + yield UInt32(self, "frags_per_group", "Fragments per group") + yield UInt32(self, "inodes_per_group", "Inodes per group") + yield TimestampUnix32(self, "mtime", "Mount time") + yield TimestampUnix32(self, "wtime", "Write time") + yield UInt16(self, "mnt_count", "Mount count") + yield UInt16(self, "max_mnt_count", "Max mount count") + yield String(self, "magic", 2, "Magic number (0x53EF)") + yield Enum(UInt16(self, "state", "File system state"), self.state_desc) + yield Enum(UInt16(self, "errors", "Behaviour when detecting errors"), self.error_handling_desc) + yield UInt16(self, "minor_rev_level", "Minor revision level") + yield TimestampUnix32(self, "last_check", "Time of last check") + yield textHandler(UInt32(self, "check_interval", "Maximum time between checks"), self.postMaxTime) + yield Enum(UInt32(self, "creator_os", "Creator OS"), self.os_name) + yield UInt32(self, "rev_level", "Revision level") + yield UInt16(self, "def_resuid", "Default uid for reserved blocks") + yield UInt16(self, "def_resgid", "Default gid for reserved blocks") + yield UInt32(self, "first_ino", "First non-reserved inode") + yield UInt16(self, "inode_size", "Size of inode structure") + yield UInt16(self, "block_group_nr", "Block group # of this superblock") + yield UInt32(self, "feature_compat", "Compatible feature set") + yield UInt32(self, "feature_incompat", "Incompatible feature set") + yield UInt32(self, "feature_ro_compat", "Read-only compatible feature set") + yield RawBytes(self, "uuid", 16, "128-bit uuid for volume") + yield String(self, "volume_name", 16, "Volume name", strip="\0") + yield String(self, "last_mounted", 64, "Directory where last mounted", strip="\0") + yield UInt32(self, "compression", "For compression (algorithm usage bitmap)") + yield UInt8(self, "prealloc_blocks", "Number of blocks to try to preallocate") + yield UInt8(self, "prealloc_dir_blocks", "Number to preallocate for directories") + yield UInt16(self, "padding", "Padding") + yield String(self, "journal_uuid", 16, "uuid of journal superblock") + yield UInt32(self, "journal_inum", "inode number of journal file") + yield UInt32(self, "journal_dev", "device number of journal file") + yield UInt32(self, "last_orphan", "start of list of inodes to delete") + yield RawBytes(self, "reserved", 197, "Reserved") + + def _getGroupCount(self): + if self._group_count is None: + # Calculate number of groups + blocks_per_group = self["blocks_per_group"].value + self._group_count = (self["blocks_count"].value - self["first_data_block"].value + (blocks_per_group - 1)) / blocks_per_group + return self._group_count + group_count = property(_getGroupCount) + + def postMaxTime(self, chunk): + return humanDuration(chunk.value * 1000) + +class GroupDescriptors(FieldSet): + def __init__(self, parent, name, count): + FieldSet.__init__(self, parent, name) + self.count = count + + def createDescription(self): + return "Group descriptors: %s items" % self.count + + def createFields(self): + for index in range(0, self.count): + yield GroupDescriptor(self, "group[]", index) + +class InodeTable(FieldSet): + def __init__(self, parent, name, start, count): + FieldSet.__init__(self, parent, name) + self.start = start + self.count = count + self._size = self.count * self["/superblock/inode_size"].value * 8 + + def createDescription(self): + return "Group descriptors: %s items" % self.count + + def createFields(self): + for index in range(self.start, self.start+self.count): + yield Inode(self, "inode[]", index) + +class Group(FieldSet): + def __init__(self, parent, name, index): + FieldSet.__init__(self, parent, name) + self.uniq_id = index + + def createDescription(self): + desc = "Group %s: %s" % (self.uniq_id, humanFilesize(self.size/8)) + if "superblock_copy" in self: + desc += " (with superblock copy)" + return desc + + def createFields(self): + group = self["../group_desc/group[%u]" % self.uniq_id] + superblock = self["/superblock"] + block_size = self["/"].block_size + + # Read block bitmap + addr = self.absolute_address + 56*8 + self.superblock_copy = (self.stream.readBytes(addr, 2) == "\x53\xEF") + if self.superblock_copy: + yield SuperBlock(self, "superblock_copy") + + # Compute number of block and inodes + block_count = superblock["blocks_per_group"].value + inode_count = superblock["inodes_per_group"].value + block_index = self.uniq_id * block_count + inode_index = self.uniq_id * inode_count + if (block_count % 8) != 0: + raise ParserError("Invalid block count") + if (inode_count % 8) != 0: + raise ParserError("Invalid inode count") + block_count = min(block_count, superblock["blocks_count"].value - block_index) + inode_count = min(inode_count, superblock["inodes_count"].value - inode_index) + + # Read block bitmap + field = self.seekByte(group["block_bitmap"].value * block_size, relative=False, null=True) + if field: + yield field + yield BlockBitmap(self, "block_bitmap", block_index, block_count, "Block bitmap") + + # Read inode bitmap + field = self.seekByte(group["inode_bitmap"].value * block_size, relative=False) + if field: + yield field + yield InodeBitmap(self, "inode_bitmap", inode_index, inode_count, "Inode bitmap") + + # Read inode table + field = self.seekByte(alignValue(self.current_size//8, block_size)) + if field: + yield field + yield InodeTable(self, "inode_table", inode_index, inode_count) + + # Add padding if needed + addr = min(self.parent.size / 8, + (self.uniq_id+1) * superblock["blocks_per_group"].value * block_size) + yield self.seekByte(addr, "data", relative=False) + +class EXT2_FS(Parser): + """ + Parse an EXT2 or EXT3 partition. + + Attributes: + * block_size: Size of a block (in bytes) + + Fields: + * superblock: Most important block, store most important informations + * ... + """ + PARSER_TAGS = { + "id": "ext2", + "category": "file_system", + "description": "EXT2/EXT3 file system", + "min_size": (1024*2)*8, + "magic": ( + # (magic, state=valid) + ("\x53\xEF\1\0", 1080*8), + # (magic, state=error) + ("\x53\xEF\2\0", 1080*8), + # (magic, state=error) + ("\x53\xEF\4\0", 1080*8), + ), + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes((1024+56)*8, 2) != "\x53\xEF": + return "Invalid magic number" + if not(0 <= self["superblock/log_block_size"].value <= 2): + return "Invalid (log) block size" + if self["superblock/inode_size"].value != (68 + 15*4): + return "Unsupported inode size" + return True + + def createFields(self): + # Skip something (what is stored here? MBR?) + yield NullBytes(self, "padding[]", 1024) + + # Read superblock + superblock = SuperBlock(self, "superblock") + yield superblock + if not(0 <= self["superblock/log_block_size"].value <= 2): + raise ParserError("EXT2: Invalid (log) block size") + self.block_size = 1024 << superblock["log_block_size"].value # in bytes + + # Read groups' descriptor + field = self.seekByte(((1023 + superblock.size/8) / self.block_size + 1) * self.block_size, null=True) + if field: + yield field + groups = GroupDescriptors(self, "group_desc", superblock.group_count) + yield groups + + # Read groups + address = groups["group[0]/block_bitmap"].value * self.block_size + field = self.seekByte(address, null=True) + if field: + yield field + for index in range(0, superblock.group_count): + yield Group(self, "group[]", index) + + def getSuperblock(self): + # FIXME: Use superblock copy if main superblock is invalid + return self["superblock"] + + def createDescription(self): + superblock = self.getSuperblock() + block_size = 1024 << superblock["log_block_size"].value + nb_block = superblock["blocks_count"].value + total = nb_block * block_size + used = (superblock["free_blocks_count"].value) * block_size + desc = "EXT2/EXT3" + if "group[0]/inode_table/inode[7]/blocks" in self: + if 0 < self["group[0]/inode_table/inode[7]/blocks"].value: + desc = "EXT3" + else: + desc = "EXT2" + return desc + " file system: total=%s, used=%s, block=%s" % ( + humanFilesize(total), humanFilesize(used), + humanFilesize(block_size)) + + diff --git a/libs/hachoir_parser/file_system/fat.py b/libs/hachoir_parser/file_system/fat.py new file mode 100644 index 0000000..2aebe17 --- /dev/null +++ b/libs/hachoir_parser/file_system/fat.py @@ -0,0 +1,433 @@ +from hachoir_core.compatibility import sorted +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, StaticFieldSet, + RawBytes, PaddingBytes, createPaddingField, Link, Fragment, + Bit, Bits, UInt8, UInt16, UInt32, + String, Bytes, NullBytes) +from hachoir_core.field.integer import GenericInteger +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.error import error +from hachoir_core.tools import humanFilesize, makePrintable +import datetime +import re + +strip_index = re.compile(r'\[[^]]+]$') + + +class Boot(FieldSet): + static_size = 512*8 + def createFields(self): + yield Bytes(self, "jmp", 3, "Jump instruction (to skip over header on boot)") + yield Bytes(self, "oem_name", 8, "OEM Name (padded with spaces)") + yield UInt16(self, "sector_size", "Bytes per sector") + yield UInt8 (self, "cluster_size", "Sectors per cluster") + yield UInt16(self, "reserved_sectors", "Reserved sector count (including boot sector)") + yield UInt8 (self, "fat_nb", "Number of file allocation tables") + yield UInt16(self, "max_root", "Maximum number of root directory entries") + yield UInt16(self, "sectors1", "Total sectors (if zero, use 'sectors2')") + yield UInt8 (self, "media_desc", "Media descriptor") + yield UInt16(self, "fat_size", "Sectors per FAT") + yield UInt16(self, "track_size", "Sectors per track") + yield UInt16(self, "head_nb", "Number of heads") + yield UInt32(self, "hidden", "Hidden sectors") + yield UInt32(self, "sectors2", "Total sectors (if greater than 65535)") + if self.parent.version == 32: + yield UInt32(self, "fat32_size", "Sectors per FAT") + yield UInt16(self, "fat_flags", "FAT Flags") + yield UInt16(self, "version", "Version") + yield UInt32(self, "root_start", "Cluster number of root directory start") + yield UInt16(self, "inf_sector", "Sector number of FS Information Sector") + yield UInt16(self, "boot_copy", "Sector number of a copy of this boot sector") + yield NullBytes(self, "reserved[]", 12, "Reserved") + yield UInt8(self, "phys_drv", "Physical drive number") + yield NullBytes(self, "reserved[]", 1, 'Reserved ("current head")') + yield UInt8(self, "sign", "Signature") + yield textHandler(UInt32(self, "serial", "ID (serial number)"), hexadecimal) + yield String(self, "label", 11, "Volume Label", strip=' ', charset="ASCII") + yield String(self, "fs_type", 8, "FAT file system type", strip=' ', charset="ASCII") + yield Bytes(self, "code", 510-self.current_size/8, "Operating system boot code") + yield Bytes(self, "trail_sig", 2, "Signature (0x55 0xAA)") + + +class FSInfo(StaticFieldSet): + format = ( + (String, "lead_sig", 4, 'Signature ("RRaA")'), + (NullBytes, "reserved[]", 480), + (String, "struct_sig", 4, 'Signature ("rrAa")'), + (UInt32, "free_count", "Last known free cluster count on the volume"), + (UInt32, "nxt_free",), + (NullBytes, "reserved[]", 12), + (Bytes, "trail_sig", 4, "Signature (0x00 0x00 0x55 0xAA)") + ) + + +class FAT(FieldSet): + class FAT(FieldSet): + def createFields(self): + parent = self.parent + version = parent.parent.version + text_handler = parent.text_handler + while self.current_size < self._size: + yield textHandler(GenericInteger(self, 'entry[]', False, version), text_handler) + def createFields(self): + version = self.parent.version + max_entry = 1 << min(28, version) + def FatEntry(chunk): + i = chunk.value + j = (1 - i) % max_entry + if j == 0: + return "reserved cluster" + elif j == 1: + return "free cluster" + elif j < 10: + return "end of a chain" + elif j == 10: + return "bad cluster" + elif j < 18: + return "reserved value" + else: + return str(i) + self.text_handler = FatEntry + while self.current_size < self._size: + yield FAT.FAT(self, 'group[]', size=min(1000*version,self._size-self.current_size)) + + +class Date(FieldSet): + def __init__(self, parent, name): + FieldSet.__init__(self, parent, name, size={ + "create": 5, + "access": 2, + "modify": 4, + }[name] * 8) + + def createFields(self): + size = self.size / 8 + if size > 2: + if size > 4: + yield UInt8(self, "cs", "10ms units, values from 0 to 199") + yield Bits(self, "2sec", 5, "seconds/2") + yield Bits(self, "min", 6, "minutes") + yield Bits(self, "hour", 5, "hours") + yield Bits(self, "day", 5, "(1-31)") + yield Bits(self, "month", 4, "(1-12)") + yield Bits(self, "year", 7, "(0 = 1980, 127 = 2107)") + + def createDescription(self): + date = [ self["year"].value, self["month"].value, self["day"].value ] + size = self.size / 8 + if size > 2: + mkdate = datetime.datetime + cs = 200 * self["2sec"].value + if size > 4: + cs += self["cs"].value + date += [ self["hour"].value, self["min"].value, cs / 100, cs % 100 * 10000 ] + else: + mkdate = datetime.date + if date == [ 0 for i in date ]: + date = None + else: + date[0] += 1980 + try: + date = mkdate(*tuple(date)) + except ValueError: + return "invalid" + return str(date) + + +class InodeLink(Link): + def __init__(self, parent, name, target=None): + Link.__init__(self, parent, name) + self.target = target + self.first = None + + def _getTargetPath(self): + if not self.target: + parent = self.parent + self.target = strip_index.sub(r"\\", parent.parent._name) + parent.getFilename().rstrip("/") + return self.target + + def createValue(self): + field = InodeGen(self["/"], self.parent, self._getTargetPath())(self) + if field: + self._display = field.path + return Link.createValue(self) + + def createDisplay(self): + return "/%s[0]" % self._getTargetPath() + + +class FileEntry(FieldSet): + static_size = 32*8 + process = False + LFN = False + + def __init__(self, *args): + FieldSet.__init__(self, *args) + self.status = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN) + if self.status in (0, 0xE5): + return + + magic = self.stream.readBits(self.absolute_address+11*8, 8, LITTLE_ENDIAN) + if magic & 0x3F == 0x0F: + self.LFN = True + elif self.getFilename() not in (".", ".."): + self.process = True + + def getFilename(self): + name = self["name"].value + if isinstance(name, str): + name = makePrintable(name, "ASCII", to_unicode=True) + ext = self["ext"].value + if ext: + name += "." + ext + if name[0] == 5: + name = "\xE5" + name[1:] + if not self.LFN and self["directory"].value: + name += "/" + return name + + def createDescription(self): + if self.status == 0: + return "Free entry" + elif self.status == 0xE5: + return "Deleted file" + elif self.LFN: + name = "".join( field.value for field in self.array("name") ) + try: + name = name[:name.index('\0')] + except ValueError: + pass + seq_no = self["seq_no"].value + return "Long filename part: '%s' [%u]" % (name, seq_no) + else: + return "File: '%s'" % self.getFilename() + + def getCluster(self): + cluster = self["cluster_lo"].value + if self.parent.parent.version > 16: + cluster += self["cluster_hi"].value << 16 + return cluster + + def createFields(self): + if not self.LFN: + yield String(self, "name", 8, "DOS file name (padded with spaces)", + strip=' ', charset="ASCII") + yield String(self, "ext", 3, "DOS file extension (padded with spaces)", + strip=' ', charset="ASCII") + yield Bit(self, "read_only") + yield Bit(self, "hidden") + yield Bit(self, "system") + yield Bit(self, "volume_label") + yield Bit(self, "directory") + yield Bit(self, "archive") + yield Bit(self, "device") + yield Bit(self, "unused") + yield RawBytes(self, "reserved", 1, "Something about the case") + yield Date(self, "create") + yield Date(self, "access") + if self.parent.parent.version > 16: + yield UInt16(self, "cluster_hi") + else: + yield UInt16(self, "ea_index") + yield Date(self, "modify") + yield UInt16(self, "cluster_lo") + size = UInt32(self, "size") + yield size + if self.process: + del self.process + target_size = size.value + if self["directory"].value: + if target_size: + size.error("(FAT) value must be zero") + target_size = 0 + elif not target_size: + return + self.target_size = 8 * target_size + yield InodeLink(self, "data") + else: + yield UInt8(self, "seq_no", "Sequence Number") + yield String(self, "name[]", 10, "(5 UTF-16 characters)", + charset="UTF-16-LE") + yield UInt8(self, "magic", "Magic number (15)") + yield NullBytes(self, "reserved", 1, "(always 0)") + yield UInt8(self, "checksum", "Checksum of DOS file name") + yield String(self, "name[]", 12, "(6 UTF-16 characters)", + charset="UTF-16-LE") + yield UInt16(self, "first_cluster", "(always 0)") + yield String(self, "name[]", 4, "(2 UTF-16 characters)", + charset="UTF-16-LE") + +class Directory(Fragment): + def createFields(self): + while self.current_size < self._size: + yield FileEntry(self, "entry[]") + +class File(Fragment): + def _getData(self): + return self["data"] + def createFields(self): + yield Bytes(self, "data", self.datasize/8) + padding = self._size - self.current_size + if padding: + yield createPaddingField(self, padding) + +class InodeGen: + def __init__(self, root, entry, path): + self.root = root + self.cluster = root.clusters(entry.getCluster) + self.path = path + self.filesize = entry.target_size + self.done = 0 + def createInputStream(cis, **args): + args["size"] = self.filesize + args.setdefault("tags",[]).append(("filename", entry.getFilename())) + return cis(**args) + self.createInputStream = createInputStream + + def __call__(self, prev): + name = self.path + "[]" + address, size, last = self.cluster.next() + if self.filesize: + if self.done >= self.filesize: + error("(FAT) bad metadata for " + self.path) + return + field = File(self.root, name, size=size) + if prev.first is None: + field._description = 'File size: %s' % humanFilesize(self.filesize//8) + field.setSubIStream(self.createInputStream) + field.datasize = min(self.filesize - self.done, size) + self.done += field.datasize + else: + field = Directory(self.root, name, size=size) + padding = self.root.getFieldByAddress(address, feed=False) + if not isinstance(padding, (PaddingBytes, RawBytes)): + error("(FAT) address %u doesn't point to a padding field" % address) + return + if last: + next = None + else: + next = lambda: self(field) + field.setLinks(prev.first, next) + self.root.writeFieldsIn(padding, address, (field,)) + return field + + +class FAT_FS(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "category": "file_system", + "min_size": 512*8, + "file_ext": ("",), + } + + def _validate(self, type_offset): + if self.stream.readBytes(type_offset*8, 8) != ("FAT%-5u" % self.version): + return "Invalid FAT%u signature" % self.version + if self.stream.readBytes(510*8, 2) != "\x55\xAA": + return "Invalid BIOS signature" + return True + + def clusters(self, cluster_func): + max_entry = (1 << min(28, self.version)) - 16 + cluster = cluster_func() + if 1 < cluster < max_entry: + clus_nb = 1 + next = cluster + while True: + next = self.fat[next/1000][next%1000].value + if not 1 < next < max_entry: + break + if cluster + clus_nb == next: + clus_nb += 1 + else: + yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, False + cluster = next + clus_nb = 1 + yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, True + + def createFields(self): + # Read boot seector + boot = Boot(self, "boot", "Boot sector") + yield boot + self.sector_size = boot["sector_size"].value + + if self.version == 32: + for field in sorted(( + (boot["inf_sector"].value, lambda: FSInfo(self, "fsinfo")), + (boot["boot_copy"].value, lambda: Boot(self, "bkboot", "Copy of the boot sector")), + )): + if field[0]: + padding = self.seekByte(field[0] * self.sector_size) + if padding: + yield padding + yield field[1]() + padding = self.seekByte(boot["reserved_sectors"].value * self.sector_size) + if padding: + yield padding + + # Read the two FAT + fat_size = boot["fat_size"].value + if fat_size == 0: + fat_size = boot["fat32_size"].value + fat_size *= self.sector_size * 8 + for i in xrange(boot["fat_nb"].value): + yield FAT(self, "fat[]", "File Allocation Table", size=fat_size) + + # Read inode table (Directory) + self.cluster_size = boot["cluster_size"].value * self.sector_size * 8 + self.fat = self["fat[0]"] + if "root_start" in boot: + self.target_size = 0 + self.getCluster = lambda: boot["root_start"].value + yield InodeLink(self, "root", "root") + else: + yield Directory(self, "root[]", size=boot["max_root"].value * 32 * 8) + self.data_start = self.current_size - 2 * self.cluster_size + sectors = boot["sectors1"].value + if not sectors: + sectors = boot["sectors2"].value + + # Create one big padding field for the end + size = sectors * self.sector_size + if self._size: + size = min(size, self.size//8) + padding = self.seekByte(size) + if padding: + yield padding + + +class FAT12(FAT_FS): + PARSER_TAGS = { + "id": "fat12", + "description": "FAT12 filesystem", + "magic": (("FAT12 ", 54*8),), + } + version = 12 + + def validate(self): + return FAT_FS._validate(self, 54) + + +class FAT16(FAT_FS): + PARSER_TAGS = { + "id": "fat16", + "description": "FAT16 filesystem", + "magic": (("FAT16 ", 54*8),), + } + version = 16 + + def validate(self): + return FAT_FS._validate(self, 54) + + +class FAT32(FAT_FS): + PARSER_TAGS = { + "id": "fat32", + "description": "FAT32 filesystem", + "magic": (("FAT32 ", 82*8),), + } + version = 32 + + def validate(self): + return FAT_FS._validate(self, 82) diff --git a/libs/hachoir_parser/file_system/iso9660.py b/libs/hachoir_parser/file_system/iso9660.py new file mode 100644 index 0000000..3d93593 --- /dev/null +++ b/libs/hachoir_parser/file_system/iso9660.py @@ -0,0 +1,121 @@ +""" +ISO 9660 (cdrom) file system parser. + +Documents: +- Standard ECMA-119 (december 1987) + http://www.nondot.org/sabre/os/files/FileSystems/iso9660.pdf + +Author: Victor Stinner +Creation: 11 july 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt32, UInt64, Enum, + NullBytes, RawBytes, String) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN + +class PrimaryVolumeDescriptor(FieldSet): + static_size = 2041*8 + def createFields(self): + yield NullBytes(self, "unused[]", 1) + yield String(self, "system_id", 32, "System identifier", strip=" ") + yield String(self, "volume_id", 32, "Volume identifier", strip=" ") + yield NullBytes(self, "unused[]", 8) + yield UInt64(self, "space_size", "Volume space size") + yield NullBytes(self, "unused[]", 32) + yield UInt32(self, "set_size", "Volume set size") + yield UInt32(self, "seq_num", "Sequence number") + yield UInt32(self, "block_size", "Block size") + yield UInt64(self, "path_table_size", "Path table size") + yield UInt32(self, "occu_lpath", "Location of Occurrence of Type L Path Table") + yield UInt32(self, "opt_lpath", "Location of Optional of Type L Path Table") + yield UInt32(self, "occu_mpath", "Location of Occurrence of Type M Path Table") + yield UInt32(self, "opt_mpath", "Location of Optional of Type M Path Table") + yield RawBytes(self, "root", 34, "Directory Record for Root Directory") + yield String(self, "vol_set_id", 128, "Volume set identifier", strip=" ") + yield String(self, "publisher", 128, "Publisher identifier", strip=" ") + yield String(self, "data_preparer", 128, "Data preparer identifier", strip=" ") + yield String(self, "application", 128, "Application identifier", strip=" ") + yield String(self, "copyright", 37, "Copyright file identifier", strip=" ") + yield String(self, "abstract", 37, "Abstract file identifier", strip=" ") + yield String(self, "biographic", 37, "Biographic file identifier", strip=" ") + yield String(self, "creation_ts", 17, "Creation date and time", strip=" ") + yield String(self, "modification_ts", 17, "Modification date and time", strip=" ") + yield String(self, "expiration_ts", 17, "Expiration date and time", strip=" ") + yield String(self, "effective_ts", 17, "Effective date and time", strip=" ") + yield UInt8(self, "struct_ver", "Structure version") + yield NullBytes(self, "unused[]", 1) + yield String(self, "app_use", 512, "Application use", strip=" \0") + yield NullBytes(self, "unused[]", 653) + +class BootRecord(FieldSet): + static_size = 2041*8 + def createFields(self): + yield String(self, "sys_id", 31, "Boot system identifier", strip="\0") + yield String(self, "boot_id", 31, "Boot identifier", strip="\0") + yield RawBytes(self, "system_use", 1979, "Boot system use") + +class Terminator(FieldSet): + static_size = 2041*8 + def createFields(self): + yield NullBytes(self, "null", 2041) + +class Volume(FieldSet): + endian = BIG_ENDIAN + TERMINATOR = 255 + type_name = { + 0: "Boot Record", + 1: "Primary Volume Descriptor", + 2: "Supplementary Volume Descriptor", + 3: "Volume Partition Descriptor", + TERMINATOR: "Volume Descriptor Set Terminator", + } + static_size = 2048 * 8 + content_handler = { + 0: BootRecord, + 1: PrimaryVolumeDescriptor, + TERMINATOR: Terminator, + } + + def createFields(self): + yield Enum(UInt8(self, "type", "Volume descriptor type"), self.type_name) + yield RawBytes(self, "signature", 5, "ISO 9960 signature (CD001)") + if self["signature"].value != "CD001": + raise ParserError("Invalid ISO 9960 volume signature") + yield UInt8(self, "version", "Volume descriptor version") + cls = self.content_handler.get(self["type"].value, None) + if cls: + yield cls(self, "content") + else: + yield RawBytes(self, "raw_content", 2048-7) + +class ISO9660(Parser): + endian = LITTLE_ENDIAN + MAGIC = "\x01CD001" + NULL_BYTES = 0x8000 + PARSER_TAGS = { + "id": "iso9660", + "category": "file_system", + "description": "ISO 9660 file system", + "min_size": (NULL_BYTES + 6)*8, + "magic": ((MAGIC, NULL_BYTES*8),), + } + + def validate(self): + if self.stream.readBytes(self.NULL_BYTES*8, len(self.MAGIC)) != self.MAGIC: + return "Invalid signature" + return True + + def createFields(self): + yield self.seekByte(self.NULL_BYTES, null=True) + + while True: + volume = Volume(self, "volume[]") + yield volume + if volume["type"].value == Volume.TERMINATOR: + break + + if self.current_size < self._size: + yield self.seekBit(self._size, "end") + diff --git a/libs/hachoir_parser/file_system/linux_swap.py b/libs/hachoir_parser/file_system/linux_swap.py new file mode 100644 index 0000000..ea2e0db --- /dev/null +++ b/libs/hachoir_parser/file_system/linux_swap.py @@ -0,0 +1,114 @@ +""" +Linux swap file. + +Documentation: Linux kernel source code, files: + - mm/swapfile.c + - include/linux/swap.h + +Author: Victor Stinner +Creation date: 25 december 2006 (christmas ;-)) +""" + +from hachoir_parser import Parser +from hachoir_core.field import (ParserError, GenericVector, + UInt32, String, + Bytes, NullBytes, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import humanFilesize +from hachoir_core.bits import str2hex + +PAGE_SIZE = 4096 + +# Definition of MAX_SWAP_BADPAGES in Linux kernel: +# (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int) +MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4 + +class Page(RawBytes): + static_size = PAGE_SIZE*8 + def __init__(self, parent, name): + RawBytes.__init__(self, parent, name, PAGE_SIZE) + +class UUID(Bytes): + static_size = 16*8 + def __init__(self, parent, name): + Bytes.__init__(self, parent, name, 16) + def createDisplay(self): + text = str2hex(self.value, format=r"%02x") + return "%s-%s-%s-%s-%s" % ( + text[:8], text[8:12], text[12:16], text[16:20], text[20:]) + +class LinuxSwapFile(Parser): + PARSER_TAGS = { + "id": "linux_swap", + "file_ext": ("",), + "category": "file_system", + "min_size": PAGE_SIZE*8, + "description": "Linux swap file", + "magic": ( + ("SWAP-SPACE", (PAGE_SIZE-10)*8), + ("SWAPSPACE2", (PAGE_SIZE-10)*8), + ("S1SUSPEND\0", (PAGE_SIZE-10)*8), + ), + } + endian = LITTLE_ENDIAN + + def validate(self): + magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10) + if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"): + return "Unknown magic string" + if MAX_SWAP_BADPAGES < self["nb_badpage"].value: + return "Invalid number of bad page (%u)" % self["nb_badpage"].value + return True + + def getPageCount(self): + """ + Number of pages which can really be used for swapping: + number of page minus bad pages minus one page (used for the header) + """ + # -1 because first page is used for the header + return self["last_page"].value - self["nb_badpage"].value - 1 + + def createDescription(self): + if self["magic"].value == "S1SUSPEND\0": + text = "Suspend swap file version 1" + elif self["magic"].value == "SWAPSPACE2": + text = "Linux swap file version 2" + else: + text = "Linux swap file version 1" + nb_page = self.getPageCount() + return "%s, page size: %s, %s pages" % ( + text, humanFilesize(PAGE_SIZE), nb_page) + + def createFields(self): + # First kilobyte: boot sectors + yield RawBytes(self, "boot", 1024, "Space for disklabel etc.") + + # Header + yield UInt32(self, "version") + yield UInt32(self, "last_page") + yield UInt32(self, "nb_badpage") + yield UUID(self, "sws_uuid") + yield UUID(self, "sws_volume") + yield NullBytes(self, "reserved", 117*4) + + # Read bad pages (if any) + count = self["nb_badpage"].value + if count: + if MAX_SWAP_BADPAGES < count: + raise ParserError("Invalid number of bad page (%u)" % count) + yield GenericVector(self, "badpages", count, UInt32, "badpage") + + # Read magic + padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True) + if padding: + yield padding + yield String(self, "magic", 10, charset="ASCII") + + # Read all pages + yield GenericVector(self, "pages", self["last_page"].value, Page, "page") + + # Padding at the end + padding = self.seekBit(self.size, "end_padding", null=True) + if padding: + yield padding + diff --git a/libs/hachoir_parser/file_system/mbr.py b/libs/hachoir_parser/file_system/mbr.py new file mode 100644 index 0000000..d5c366f --- /dev/null +++ b/libs/hachoir_parser/file_system/mbr.py @@ -0,0 +1,230 @@ +""" +Master Boot Record. + + +""" + +# cfdisk uses the following algorithm to compute the geometry: +# 0. Use the values given by the user. +# 1. Try to guess the geometry from the partition table: +# if all the used partitions end at the same head H and the +# same sector S, then there are (H+1) heads and S sectors/cylinder. +# 2. Ask the system (ioctl/HDIO_GETGEO). +# 3. 255 heads and 63 sectors/cylinder. + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Enum, Bits, UInt8, UInt16, UInt32, + RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import humanFilesize +from hachoir_core.text_handler import textHandler, hexadecimal + +BLOCK_SIZE = 512 # bytes + +class CylinderNumber(Bits): + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 10, description) + + def createValue(self): + i = self.parent.stream.readInteger( + self.absolute_address, False, self._size, self.parent.endian) + return i >> 2 | i % 4 << 8 + +class PartitionHeader(FieldSet): + static_size = 16*8 + + # taken from the source of cfdisk: + # sed -n 's/.*{\(.*\), N_(\(.*\))}.*/ \1: \2,/p' i386_sys_types.c + system_name = { + 0x00: "Empty", + 0x01: "FAT12", + 0x02: "XENIX root", + 0x03: "XENIX usr", + 0x04: "FAT16 <32M", + 0x05: "Extended", + 0x06: "FAT16", + 0x07: "HPFS/NTFS", + 0x08: "AIX", + 0x09: "AIX bootable", + 0x0a: "OS/2 Boot Manager", + 0x0b: "W95 FAT32", + 0x0c: "W95 FAT32 (LBA)", + 0x0e: "W95 FAT16 (LBA)", + 0x0f: "W95 Ext'd (LBA)", + 0x10: "OPUS", + 0x11: "Hidden FAT12", + 0x12: "Compaq diagnostics", + 0x14: "Hidden FAT16 <32M", + 0x16: "Hidden FAT16", + 0x17: "Hidden HPFS/NTFS", + 0x18: "AST SmartSleep", + 0x1b: "Hidden W95 FAT32", + 0x1c: "Hidden W95 FAT32 (LBA)", + 0x1e: "Hidden W95 FAT16 (LBA)", + 0x24: "NEC DOS", + 0x39: "Plan 9", + 0x3c: "PartitionMagic recovery", + 0x40: "Venix 80286", + 0x41: "PPC PReP Boot", + 0x42: "SFS", + 0x4d: "QNX4.x", + 0x4e: "QNX4.x 2nd part", + 0x4f: "QNX4.x 3rd part", + 0x50: "OnTrack DM", + 0x51: "OnTrack DM6 Aux1", + 0x52: "CP/M", + 0x53: "OnTrack DM6 Aux3", + 0x54: "OnTrackDM6", + 0x55: "EZ-Drive", + 0x56: "Golden Bow", + 0x5c: "Priam Edisk", + 0x61: "SpeedStor", + 0x63: "GNU HURD or SysV", + 0x64: "Novell Netware 286", + 0x65: "Novell Netware 386", + 0x70: "DiskSecure Multi-Boot", + 0x75: "PC/IX", + 0x80: "Old Minix", + 0x81: "Minix / old Linux", + 0x82: "Linux swap / Solaris", + 0x83: "Linux (ext2/ext3)", + 0x84: "OS/2 hidden C: drive", + 0x85: "Linux extended", + 0x86: "NTFS volume set", + 0x87: "NTFS volume set", + 0x88: "Linux plaintext", + 0x8e: "Linux LVM", + 0x93: "Amoeba", + 0x94: "Amoeba BBT", + 0x9f: "BSD/OS", + 0xa0: "IBM Thinkpad hibernation", + 0xa5: "FreeBSD", + 0xa6: "OpenBSD", + 0xa7: "NeXTSTEP", + 0xa8: "Darwin UFS", + 0xa9: "NetBSD", + 0xab: "Darwin boot", + 0xb7: "BSDI fs", + 0xb8: "BSDI swap", + 0xbb: "Boot Wizard hidden", + 0xbe: "Solaris boot", + 0xbf: "Solaris", + 0xc1: "DRDOS/sec (FAT-12)", + 0xc4: "DRDOS/sec (FAT-16 < 32M)", + 0xc6: "DRDOS/sec (FAT-16)", + 0xc7: "Syrinx", + 0xda: "Non-FS data", + 0xdb: "CP/M / CTOS / ...", + 0xde: "Dell Utility", + 0xdf: "BootIt", + 0xe1: "DOS access", + 0xe3: "DOS R/O", + 0xe4: "SpeedStor", + 0xeb: "BeOS fs", + 0xee: "EFI GPT", + 0xef: "EFI (FAT-12/16/32)", + 0xf0: "Linux/PA-RISC boot", + 0xf1: "SpeedStor", + 0xf4: "SpeedStor", + 0xf2: "DOS secondary", + 0xfd: "Linux raid autodetect", + 0xfe: "LANstep", + 0xff: "BBT" + } + + def createFields(self): + yield UInt8(self, "bootable", "Bootable flag (true if equals to 0x80)") + if self["bootable"].value not in (0x00, 0x80): + self.warning("Stream doesn't look like master boot record (partition bootable error)!") + yield UInt8(self, "start_head", "Starting head number of the partition") + yield Bits(self, "start_sector", 6, "Starting sector number of the partition") + yield CylinderNumber(self, "start_cylinder", "Starting cylinder number of the partition") + yield Enum(UInt8(self, "system", "System indicator"), self.system_name) + yield UInt8(self, "end_head", "Ending head number of the partition") + yield Bits(self, "end_sector", 6, "Ending sector number of the partition") + yield CylinderNumber(self, "end_cylinder", "Ending cylinder number of the partition") + yield UInt32(self, "LBA", "LBA (number of sectors before this partition)") + yield UInt32(self, "size", "Size (block count)") + + def isUsed(self): + return self["system"].value != 0 + + def createDescription(self): + desc = "Partition header: " + if self.isUsed(): + system = self["system"].display + size = self["size"].value * BLOCK_SIZE + desc += "%s, %s" % (system, humanFilesize(size)) + else: + desc += "(unused)" + return desc + + +class MasterBootRecord(FieldSet): + static_size = 512*8 + + def createFields(self): + yield RawBytes(self, "program", 446, "Boot program (Intel x86 machine code)") + yield PartitionHeader(self, "header[0]") + yield PartitionHeader(self, "header[1]") + yield PartitionHeader(self, "header[2]") + yield PartitionHeader(self, "header[3]") + yield textHandler(UInt16(self, "signature", "Signature (0xAA55)"), hexadecimal) + + def _getPartitions(self): + return ( self[index] for index in xrange(1,5) ) + headers = property(_getPartitions) + + +class Partition(FieldSet): + def createFields(self): + mbr = MasterBootRecord(self, "mbr") + yield mbr + + # No error if we only want to analyse a backup of a mbr + if self.eof: + return + + for start, index, header in sorted((hdr["LBA"].value, index, hdr) + for index, hdr in enumerate(mbr.headers) if hdr.isUsed()): + # Seek to the beginning of the partition + padding = self.seekByte(start * BLOCK_SIZE, "padding[]") + if padding: + yield padding + + # Content of the partition + name = "partition[%u]" % index + size = BLOCK_SIZE * header["size"].value + desc = header["system"].display + if header["system"].value == 5: + yield Partition(self, name, desc, size * 8) + else: + yield RawBytes(self, name, size, desc) + + # Padding at the end + if self.current_size < self._size: + yield self.seekBit(self._size, "end") + + +class MSDos_HardDrive(Parser, Partition): + endian = LITTLE_ENDIAN + MAGIC = "\x55\xAA" + PARSER_TAGS = { + "id": "msdos_harddrive", + "category": "file_system", + "description": "MS-DOS hard drive with Master Boot Record (MBR)", + "min_size": 512*8, + "file_ext": ("",), +# "magic": ((MAGIC, 510*8),), + } + + def validate(self): + if self.stream.readBytes(510*8, 2) != self.MAGIC: + return "Invalid signature" + used = False + for hdr in self["mbr"].headers: + if hdr["bootable"].value not in (0x00, 0x80): + return "Wrong boot flag" + used |= hdr.isUsed() + return used or "No partition found" diff --git a/libs/hachoir_parser/file_system/ntfs.py b/libs/hachoir_parser/file_system/ntfs.py new file mode 100644 index 0000000..efea7e7 --- /dev/null +++ b/libs/hachoir_parser/file_system/ntfs.py @@ -0,0 +1,285 @@ +""" +New Technology File System (NTFS) file system parser. + +Sources: +- The NTFS documentation + http://www.linux-ntfs.org/ +- NTFS-3G driver + http://www.ntfs-3g.org/ + +Creation date: 3rd january 2007 +Author: Victor Stinner +""" + +SECTOR_SIZE = 512 + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Enum, + UInt8, UInt16, UInt32, UInt64, TimestampWin64, + String, Bytes, Bit, + NullBits, NullBytes, PaddingBytes, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.tools import humanFilesize, createDict +from hachoir_parser.common.msdos import MSDOSFileAttr32 + +class BiosParameterBlock(FieldSet): + """ + BIOS parameter block (bpb) structure + """ + static_size = 25 * 8 + MEDIA_TYPE = {0xf8: "Hard disk"} + + def createFields(self): + yield UInt16(self, "bytes_per_sector", "Size of a sector in bytes") + yield UInt8(self, "sectors_per_cluster", "Size of a cluster in sectors") + yield NullBytes(self, "reserved_sectors", 2) + yield NullBytes(self, "fats", 1) + yield NullBytes(self, "root_entries", 2) + yield NullBytes(self, "sectors", 2) + yield Enum(UInt8(self, "media_type"), self.MEDIA_TYPE) + yield NullBytes(self, "sectors_per_fat", 2) + yield UInt16(self, "sectors_per_track") + yield UInt16(self, "heads") + yield UInt32(self, "hidden_sectors") + yield NullBytes(self, "large_sectors", 4) + + def validate(self): + if self["bytes_per_sector"].value not in (256, 512, 1024, 2048, 4096): + return "Invalid sector size (%u bytes)" % \ + self["bytes_per_sector"].value + if self["sectors_per_cluster"].value not in (1, 2, 4, 8, 16, 32, 64, 128): + return "Invalid cluster size (%u sectors)" % \ + self["sectors_per_cluster"].value + return "" + +class MasterBootRecord(FieldSet): + static_size = 512*8 + def createFields(self): + yield Bytes(self, "jump", 3, "Intel x86 jump instruction") + yield String(self, "name", 8) + yield BiosParameterBlock(self, "bios", "BIOS parameters") + + yield textHandler(UInt8(self, "physical_drive", "(0x80)"), hexadecimal) + yield NullBytes(self, "current_head", 1) + yield textHandler(UInt8(self, "ext_boot_sig", "Extended boot signature (0x80)"), hexadecimal) + yield NullBytes(self, "unused", 1) + + yield UInt64(self, "nb_sectors") + yield UInt64(self, "mft_cluster", "Cluster location of MFT data") + yield UInt64(self, "mftmirr_cluster", "Cluster location of copy of MFT") + yield UInt8(self, "cluster_per_mft", "MFT record size in clusters") + yield NullBytes(self, "reserved[]", 3) + yield UInt8(self, "cluster_per_index", "Index block size in clusters") + yield NullBytes(self, "reserved[]", 3) + yield textHandler(UInt64(self, "serial_number"), hexadecimal) + yield textHandler(UInt32(self, "checksum", "Boot sector checksum"), hexadecimal) + yield Bytes(self, "boot_code", 426) + yield Bytes(self, "mbr_magic", 2, r"Master boot record magic number (\x55\xAA)") + + def createDescription(self): + size = self["nb_sectors"].value * self["bios/bytes_per_sector"].value + return "NTFS Master Boot Record (%s)" % humanFilesize(size) + +class MFT_Flags(FieldSet): + static_size = 16 + def createFields(self): + yield Bit(self, "in_use") + yield Bit(self, "is_directory") + yield NullBits(self, "padding", 14) + +class Attribute(FieldSet): + # --- Common code --- + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + type = self["type"].value + if type in self.ATTR_INFO: + self._name = self.ATTR_INFO[type][0] + self._parser = self.ATTR_INFO[type][2] + + def createFields(self): + yield Enum(textHandler(UInt32(self, "type"), hexadecimal), self.ATTR_NAME) + yield UInt32(self, "size") + yield UInt8(self, "non_resident", "Non-resident flag") + yield UInt8(self, "name_length", "Name length in bytes") + yield UInt16(self, "name_offset", "Name offset") + yield UInt16(self, "flags") + yield textHandler(UInt16(self, "attribute_id"), hexadecimal) + yield UInt32(self, "length_attr", "Length of the Attribute") + yield UInt16(self, "offset_attr", "Offset of the Attribute") + yield UInt8(self, "indexed_flag") + yield NullBytes(self, "padding", 1) + if self._parser: + for field in self._parser(self): + yield field + else: + size = self["length_attr"].value + if size: + yield RawBytes(self, "data", size) + size = (self.size - self.current_size) // 8 + if size: + yield PaddingBytes(self, "end_padding", size) + + def createDescription(self): + return "Attribute %s" % self["type"].display + FILENAME_NAMESPACE = { + 0: "POSIX", + 1: "Win32", + 2: "DOS", + 3: "Win32 & DOS", + } + + # --- Parser specific to a type --- + def parseStandardInfo(self): + yield TimestampWin64(self, "ctime", "File Creation") + yield TimestampWin64(self, "atime", "File Altered") + yield TimestampWin64(self, "mtime", "MFT Changed") + yield TimestampWin64(self, "rtime", "File Read") + yield MSDOSFileAttr32(self, "file_attr", "DOS File Permissions") + yield UInt32(self, "max_version", "Maximum Number of Versions") + yield UInt32(self, "version", "Version Number") + yield UInt32(self, "class_id") + yield UInt32(self, "owner_id") + yield UInt32(self, "security_id") + yield filesizeHandler(UInt64(self, "quota_charged", "Quota Charged")) + yield UInt64(self, "usn", "Update Sequence Number (USN)") + + def parseFilename(self): + yield UInt64(self, "ref", "File reference to the parent directory") + yield TimestampWin64(self, "ctime", "File Creation") + yield TimestampWin64(self, "atime", "File Altered") + yield TimestampWin64(self, "mtime", "MFT Changed") + yield TimestampWin64(self, "rtime", "File Read") + yield filesizeHandler(UInt64(self, "alloc_size", "Allocated size of the file")) + yield filesizeHandler(UInt64(self, "real_size", "Real size of the file")) + yield UInt32(self, "file_flags") + yield UInt32(self, "file_flags2", "Used by EAs and Reparse") + yield UInt8(self, "filename_length", "Filename length in characters") + yield Enum(UInt8(self, "filename_namespace"), self.FILENAME_NAMESPACE) + size = self["filename_length"].value * 2 + if size: + yield String(self, "filename", size, charset="UTF-16-LE") + + def parseData(self): + size = (self.size - self.current_size) // 8 + if size: + yield Bytes(self, "data", size) + + def parseBitmap(self): + size = (self.size - self.current_size) + for index in xrange(size): + yield Bit(self, "bit[]") + + # --- Type information --- + ATTR_INFO = { + 0x10: ('standard_info', 'STANDARD_INFORMATION ', parseStandardInfo), + 0x20: ('attr_list', 'ATTRIBUTE_LIST ', None), + 0x30: ('filename', 'FILE_NAME ', parseFilename), + 0x40: ('vol_ver', 'VOLUME_VERSION', None), + 0x40: ('obj_id', 'OBJECT_ID ', None), + 0x50: ('security', 'SECURITY_DESCRIPTOR ', None), + 0x60: ('vol_name', 'VOLUME_NAME ', None), + 0x70: ('vol_info', 'VOLUME_INFORMATION ', None), + 0x80: ('data', 'DATA ', parseData), + 0x90: ('index_root', 'INDEX_ROOT ', None), + 0xA0: ('index_alloc', 'INDEX_ALLOCATION ', None), + 0xB0: ('bitmap', 'BITMAP ', parseBitmap), + 0xC0: ('sym_link', 'SYMBOLIC_LINK', None), + 0xC0: ('reparse', 'REPARSE_POINT ', None), + 0xD0: ('ea_info', 'EA_INFORMATION ', None), + 0xE0: ('ea', 'EA ', None), + 0xF0: ('prop_set', 'PROPERTY_SET', None), + 0x100: ('log_util', 'LOGGED_UTILITY_STREAM', None), + } + ATTR_NAME = createDict(ATTR_INFO, 1) + +class File(FieldSet): +# static_size = 48*8 + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["bytes_allocated"].value * 8 + + def createFields(self): + yield Bytes(self, "signature", 4, "Usually the magic is 'FILE'") + yield UInt16(self, "usa_ofs", "Update Sequence Array offset") + yield UInt16(self, "usa_count", "Update Sequence Array count") + yield UInt64(self, "lsn", "$LogFile sequence number for this record") + yield UInt16(self, "sequence_number", "Number of times this mft record has been reused") + yield UInt16(self, "link_count", "Number of hard links") + yield UInt16(self, "attrs_offset", "Byte offset to the first attribute") + yield MFT_Flags(self, "flags") + yield UInt32(self, "bytes_in_use", "Number of bytes used in this record") + yield UInt32(self, "bytes_allocated", "Number of bytes allocated for this record") + yield UInt64(self, "base_mft_record") + yield UInt16(self, "next_attr_instance") + + # The below fields are specific to NTFS 3.1+ (Windows XP and above) + yield NullBytes(self, "reserved", 2) + yield UInt32(self, "mft_record_number", "Number of this mft record") + + padding = self.seekByte(self["attrs_offset"].value, relative=True) + if padding: + yield padding + + while not self.eof: + addr = self.absolute_address + self.current_size + if self.stream.readBytes(addr, 4) == "\xFF\xFF\xFF\xFF": + yield Bytes(self, "attr_end_marker", 8) + break + yield Attribute(self, "attr[]") + + size = self["bytes_in_use"].value - self.current_size//8 + if size: + yield RawBytes(self, "end_rawdata", size) + + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "end_padding", size, "Unused but allocated bytes") + + def createDescription(self): + text = "File" + if "filename/filename" in self: + text += ' "%s"' % self["filename/filename"].value + if "filename/real_size" in self: + text += ' (%s)' % self["filename/real_size"].display + if "standard_info/file_attr" in self: + text += ', %s' % self["standard_info/file_attr"].display + return text + +class NTFS(Parser): + MAGIC = "\xEB\x52\x90NTFS " + PARSER_TAGS = { + "id": "ntfs", + "category": "file_system", + "description": "NTFS file system", + "min_size": 1024*8, + "magic": ((MAGIC, 0),), + } + endian = LITTLE_ENDIAN + _cluster_size = None + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return "Invalid magic string" + err = self["mbr/bios"].validate() + if err: + return err + return True + + def createFields(self): + yield MasterBootRecord(self, "mbr") + + bios = self["mbr/bios"] + cluster_size = bios["sectors_per_cluster"].value * bios["bytes_per_sector"].value + offset = self["mbr/mft_cluster"].value * cluster_size + padding = self.seekByte(offset, relative=False) + if padding: + yield padding + for index in xrange(1000): + yield File(self, "file[]") + + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "end", size) + diff --git a/libs/hachoir_parser/file_system/reiser_fs.py b/libs/hachoir_parser/file_system/reiser_fs.py new file mode 100644 index 0000000..e71eb95 --- /dev/null +++ b/libs/hachoir_parser/file_system/reiser_fs.py @@ -0,0 +1,120 @@ +""" +ReiserFS file system version 3 parser (version 1, 2 and 4 are not supported). + +Author: Frederic Weisbecker +Creation date: 8 december 2006 + +Sources: + - http://p-nand-q.com/download/rfstool/reiserfs_docs.html + - http://homes.cerias.purdue.edu/~florian/reiser/reiserfs.php + - file://usr/src/linux-2.6.16.19/include/linux/reiserfs_fs.h + +NOTES: + +The most part of the description of the structures, their fields and their +comments decribed here comes from the file include/linux/reiserfs_fs.h +- written by Hans reiser - located in the Linux kernel 2.6.16.19 and from +the Reiserfs explanations in +http://p-nand-q.com/download/rfstool/reiserfs_docs.html written by Gerson +Kurz. +""" + + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Enum, + UInt16, UInt32, String, RawBytes, NullBytes) +from hachoir_core.endian import LITTLE_ENDIAN + +class Journal_params(FieldSet): + static_size = 32*8 + + def createFields(self): + yield UInt32(self, "1st_block", "Journal 1st block number") + yield UInt32(self, "dev", "Journal device number") + yield UInt32(self, "size", "Size of the journal") + yield UInt32(self, "trans_max", "Max number of blocks in a transaction") + #TODO: Must be explained: it was sb_journal_block_count + yield UInt32(self, "magic", "Random value made on fs creation.") + yield UInt32(self, "max_batch", "Max number of blocks to batch into a trans") + yield UInt32(self, "max_commit_age", "In seconds, how old can an async commit be") + yield UInt32(self, "max_trans_age", "In seconds, how old can a transaction be") + + + def createDescription(self): + return "Parameters of the journal" + +class SuperBlock(FieldSet): + static_size = 204*8 + + UMOUNT_STATE = { 1: "unmounted", 2: "not unmounted" } + HASH_FUNCTIONS = { + 0: "UNSET_HASH", + 1: "TEA_HASH", + 2: "YURA_HASH", + 3: "R5_HASH" + } + + def createFields(self): + #TODO: This structure is normally divided in two parts: + # _reiserfs_super_block_v1 + # _reiserfs_super_block + # It will be divided later to easily support older version of the first part + yield UInt32(self, "block_count", "Number of blocks") + yield UInt32(self, "free_blocks", "Number of free blocks") + yield UInt32(self, "root_block", "Root block number") + yield Journal_params(self, "Journal parameters") + yield UInt16(self, "blocksize", "Size of a block") + yield UInt16(self, "oid_maxsize", "Max size of object id array") + yield UInt16(self, "oid_cursize", "Current size of object id array") + yield Enum(UInt16(self, "umount_state", "Filesystem umounted or not"), self.UMOUNT_STATE) + yield String(self, "magic", 10, "Magic string", strip="\0") + #TODO: change the type of s_fs_state in Enum to have more details about this fsck state + yield UInt16(self, "fs_state", "Rebuilding phase of fsck ") + yield Enum(UInt32(self, "hash_function", "Hash function to sort names in a directory"), self.HASH_FUNCTIONS) + yield UInt16(self, "tree_height", "Height of disk tree") + yield UInt16(self, "bmap_nr", "Amount of bitmap blocks needed to address each block of file system") + #TODO: find a good description for this field + yield UInt16(self, "version", "Field only reliable on filesystem with non-standard journal") + yield UInt16(self, "reserved_for_journal", "Size in blocks of journal area on main device") + #TODO: same as above + yield UInt32(self, "inode_generation", "No description") + #TODO: same as above and should be an enum field + yield UInt32(self, "flags", "No description") + #TODO: Create a special Type to format this id + yield RawBytes(self, "uuid", 16, "Filesystem unique identifier") + yield String(self, "label", 16, "Filesystem volume label", strip="\0") + yield NullBytes(self, "unused", 88) + + def createDescription(self): + return "Superblock: ReiserFs Filesystem" + +class REISER_FS(Parser): + PARSER_TAGS = { + "id": "reiserfs", + "category": "file_system", + # 130 blocks before the journal + + # Minimal size of journal (513 blocks) + + # 1 block for the rest + # And The Minimal size of a block is 512 bytes + "min_size": (130+513+1) * (512*8), + "description": "ReiserFS file system" + } + endian = LITTLE_ENDIAN + + # Offsets (in bytes) of important information + SUPERBLOCK_OFFSET = 64*1024 + MAGIC_OFFSET = SUPERBLOCK_OFFSET + 52 + + def validate(self): + # Let's look at the magic field in the superblock + magic = self.stream.readBytes(self.MAGIC_OFFSET*8, 9).rstrip("\0") + if magic == "ReIsEr3Fs": + return True + if magic in ("ReIsEr2Fs", "ReIsErFs"): + return "Unsupported version of ReiserFs" + return "Invalid magic string" + + def createFields(self): + yield NullBytes(self, "padding[]", self.SUPERBLOCK_OFFSET) + yield SuperBlock(self, "superblock") + diff --git a/libs/hachoir_parser/game/__init__.py b/libs/hachoir_parser/game/__init__.py new file mode 100644 index 0000000..1b6447b --- /dev/null +++ b/libs/hachoir_parser/game/__init__.py @@ -0,0 +1,4 @@ +from hachoir_parser.game.zsnes import ZSNESFile +from hachoir_parser.game.spider_man_video import SpiderManVideoFile +from hachoir_parser.game.laf import LafFile +from hachoir_parser.game.blp import BLP1File, BLP2File \ No newline at end of file diff --git a/libs/hachoir_parser/game/blp.py b/libs/hachoir_parser/game/blp.py new file mode 100644 index 0000000..218e864 --- /dev/null +++ b/libs/hachoir_parser/game/blp.py @@ -0,0 +1,269 @@ +""" +Blizzard BLP Image File Parser + +Author: Robert Xiao +Creation date: July 10 2007 + +- BLP1 File Format + http://magos.thejefffiles.com/War3ModelEditor/MagosBlpFormat.txt +- BLP2 File Format (Wikipedia) + http://en.wikipedia.org/wiki/.BLP +- S3TC (DXT1, 3, 5) Formats + http://en.wikipedia.org/wiki/S3_Texture_Compression +""" + +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.field import String, UInt32, UInt8, Enum, FieldSet, RawBytes, GenericVector, Bit, Bits +from hachoir_parser.parser import Parser +from hachoir_parser.image.common import PaletteRGBA +from hachoir_core.tools import alignValue + +class PaletteIndex(UInt8): + def createDescription(self): + return "Palette index %i (%s)" % (self.value, self["/palette/color[%i]" % self.value].description) + +class Generic2DArray(FieldSet): + def __init__(self, parent, name, width, height, item_class, row_name="row", item_name="item", *args, **kwargs): + FieldSet.__init__(self, parent, name, *args, **kwargs) + self.width = width + self.height = height + self.item_class = item_class + self.row_name = row_name + self.item_name = item_name + + def createFields(self): + for i in xrange(self.height): + yield GenericVector(self, self.row_name+"[]", self.width, self.item_class, self.item_name) + +class BLP1File(Parser): + MAGIC = "BLP1" + PARSER_TAGS = { + "id": "blp1", + "category": "game", + "file_ext": ("blp",), + "mime": (u"application/x-blp",), # TODO: real mime type??? + "magic": ((MAGIC, 0),), + "min_size": 7*32, # 7 DWORDs start, incl. magic + "description": "Blizzard Image Format, version 1", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != "BLP1": + return "Invalid magic" + return True + + def createFields(self): + yield String(self, "magic", 4, "Signature (BLP1)") + yield Enum(UInt32(self, "compression"), { + 0:"JPEG Compression", + 1:"Uncompressed"}) + yield UInt32(self, "flags") + yield UInt32(self, "width") + yield UInt32(self, "height") + yield Enum(UInt32(self, "type"), { + 3:"Uncompressed Index List + Alpha List", + 4:"Uncompressed Index List + Alpha List", + 5:"Uncompressed Index List"}) + yield UInt32(self, "subtype") + for i in xrange(16): + yield UInt32(self, "mipmap_offset[]") + for i in xrange(16): + yield UInt32(self, "mipmap_size[]") + + compression = self["compression"].value + image_type = self["type"].value + width = self["width"].value + height = self["height"].value + + if compression == 0: # JPEG Compression + yield UInt32(self, "jpeg_header_len") + yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header") + else: + yield PaletteRGBA(self, "palette", 256) + + offsets = self.array("mipmap_offset") + sizes = self.array("mipmap_size") + for i in xrange(16): + if not offsets[i].value or not sizes[i].value: + continue + padding = self.seekByte(offsets[i].value) + if padding: + yield padding + if compression == 0: + yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image") + elif compression == 1: + yield Generic2DArray(self, "mipmap_indexes[%i]" % i, width, height, PaletteIndex, "row", "index", "Indexes into the palette") + if image_type in (3, 4): + yield Generic2DArray(self, "mipmap_alphas[%i]" % i, width, height, UInt8, "row", "alpha", "Alpha values") + width /= 2 + height /= 2 + +def interp_avg(data_low, data_high, n): + """Interpolated averages. For example, + + >>> list(interp_avg(1, 10, 3)) + [4, 7] + """ + if isinstance(data_low, (int, long)): + for i in range(1, n): + yield (data_low * (n-i) + data_high * i) / n + else: # iterable + pairs = zip(data_low, data_high) + pair_iters = [interp_avg(x, y, n) for x, y in pairs] + for i in range(1, n): + yield [iter.next() for iter in pair_iters] + +def color_name(data, bits): + """Color names in #RRGGBB format, given the number of bits for each component.""" + ret = ["#"] + for i in range(3): + ret.append("%02X" % (data[i] << (8-bits[i]))) + return ''.join(ret) + +class DXT1(FieldSet): + static_size = 64 + def __init__(self, parent, name, dxt2_mode=False, *args, **kwargs): + """with dxt2_mode on, this field will always use the four color model""" + FieldSet.__init__(self, parent, name, *args, **kwargs) + self.dxt2_mode = dxt2_mode + def createFields(self): + values = [[], []] + for i in (0, 1): + yield Bits(self, "blue[]", 5) + yield Bits(self, "green[]", 6) + yield Bits(self, "red[]", 5) + values[i] = [self["red[%i]" % i].value, + self["green[%i]" % i].value, + self["blue[%i]" % i].value] + if values[0] > values[1] or self.dxt2_mode: + values += interp_avg(values[0], values[1], 3) + else: + values += interp_avg(values[0], values[1], 2) + values.append(None) # transparent + for i in xrange(16): + pixel = Bits(self, "pixel[%i][%i]" % divmod(i, 4), 2) + color = values[pixel.value] + if color is None: + pixel._description = "Transparent" + else: + pixel._description = "RGB color: %s" % color_name(color, [5, 6, 5]) + yield pixel + +class DXT3Alpha(FieldSet): + static_size = 64 + def createFields(self): + for i in xrange(16): + yield Bits(self, "alpha[%i][%i]" % divmod(i, 4), 4) + +class DXT3(FieldSet): + static_size = 128 + def createFields(self): + yield DXT3Alpha(self, "alpha", "Alpha Channel Data") + yield DXT1(self, "color", True, "Color Channel Data") + +class DXT5Alpha(FieldSet): + static_size = 64 + def createFields(self): + values = [] + yield UInt8(self, "alpha_val[0]", "First alpha value") + values.append(self["alpha_val[0]"].value) + yield UInt8(self, "alpha_val[1]", "Second alpha value") + values.append(self["alpha_val[1]"].value) + if values[0] > values[1]: + values += interp_avg(values[0], values[1], 7) + else: + values += interp_avg(values[0], values[1], 5) + values += [0, 255] + for i in xrange(16): + pixel = Bits(self, "alpha[%i][%i]" % divmod(i, 4), 3) + alpha = values[pixel.value] + pixel._description = "Alpha value: %i" % alpha + yield pixel + +class DXT5(FieldSet): + static_size = 128 + def createFields(self): + yield DXT5Alpha(self, "alpha", "Alpha Channel Data") + yield DXT1(self, "color", True, "Color Channel Data") + +class BLP2File(Parser): + MAGIC = "BLP2" + PARSER_TAGS = { + "id": "blp2", + "category": "game", + "file_ext": ("blp",), + "mime": (u"application/x-blp",), + "magic": ((MAGIC, 0),), + "min_size": 5*32, # 5 DWORDs start, incl. magic + "description": "Blizzard Image Format, version 2", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != "BLP2": + return "Invalid magic" + return True + + def createFields(self): + yield String(self, "magic", 4, "Signature (BLP2)") + yield Enum(UInt32(self, "compression", "Compression type"), { + 0:"JPEG Compressed", + 1:"Uncompressed or DXT/S3TC compressed"}) + yield Enum(UInt8(self, "encoding", "Encoding type"), { + 1:"Raw", + 2:"DXT/S3TC Texture Compression (a.k.a. DirectX)"}) + yield UInt8(self, "alpha_depth", "Alpha channel depth, in bits (0 = no alpha)") + yield Enum(UInt8(self, "alpha_encoding", "Encoding used for alpha channel"), { + 0:"DXT1 alpha (0 or 1 bit alpha)", + 1:"DXT3 alpha (4 bit alpha)", + 7:"DXT5 alpha (8 bit interpolated alpha)"}) + yield Enum(UInt8(self, "has_mips", "Are mip levels present?"), { + 0:"No mip levels", + 1:"Mip levels present; number of levels determined by image size"}) + yield UInt32(self, "width", "Base image width") + yield UInt32(self, "height", "Base image height") + for i in xrange(16): + yield UInt32(self, "mipmap_offset[]") + for i in xrange(16): + yield UInt32(self, "mipmap_size[]") + yield PaletteRGBA(self, "palette", 256) + + compression = self["compression"].value + encoding = self["encoding"].value + alpha_depth = self["alpha_depth"].value + alpha_encoding = self["alpha_encoding"].value + width = self["width"].value + height = self["height"].value + + if compression == 0: # JPEG Compression + yield UInt32(self, "jpeg_header_len") + yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header") + + offsets = self.array("mipmap_offset") + sizes = self.array("mipmap_size") + for i in xrange(16): + if not offsets[i].value or not sizes[i].value: + continue + padding = self.seekByte(offsets[i].value) + if padding: + yield padding + if compression == 0: + yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image") + elif compression == 1 and encoding == 1: + yield Generic2DArray(self, "mipmap_indexes[%i]" % i, height, width, PaletteIndex, "row", "index", "Indexes into the palette") + if alpha_depth == 1: + yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, Bit, "row", "is_opaque", "Alpha values") + elif alpha_depth == 8: + yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, UInt8, "row", "alpha", "Alpha values") + elif compression == 1 and encoding == 2: + block_height = alignValue(height, 4) // 4 + block_width = alignValue(width, 4) // 4 + if alpha_depth in [0, 1] and alpha_encoding == 0: + yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT1, "row", "block", "DXT1-compressed image blocks") + elif alpha_depth == 8 and alpha_encoding == 1: + yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT3, "row", "block", "DXT3-compressed image blocks") + elif alpha_depth == 8 and alpha_encoding == 7: + yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT5, "row", "block", "DXT5-compressed image blocks") + width /= 2 + height /= 2 diff --git a/libs/hachoir_parser/game/laf.py b/libs/hachoir_parser/game/laf.py new file mode 100644 index 0000000..4a8e15c --- /dev/null +++ b/libs/hachoir_parser/game/laf.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- + +""" +LucasArts Font parser. + +Author: Cyril Zorin +Creation date: 1 January 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt16, UInt32, GenericVector) +from hachoir_core.endian import LITTLE_ENDIAN + +class CharData(FieldSet): + def __init__(self, chars, *args): + FieldSet.__init__(self, *args) + self.chars = chars + + def createFields(self): + for char in self.chars: + yield CharBitmap(char, self, "char_bitmap[]") + +class CharBitmap(FieldSet): + def __init__(self, char, *args): + FieldSet.__init__(self, *args) + self.char = char + + def createFields(self): + width = self.char["width_pixels"].value + for line in xrange(self.char["height_pixels"].value): + yield GenericVector(self, "line[]", width, + UInt8, "pixel") + +class CharInfo(FieldSet): + static_size = 16 * 8 + + def createFields(self): + yield UInt32(self, "data_offset") + yield UInt8(self, "logical_width") + yield UInt8(self, "unknown[]") + yield UInt8(self, "unknown[]") + yield UInt8(self, "unknown[]") + yield UInt32(self, "width_pixels") + yield UInt32(self, "height_pixels") + +class LafFile(Parser): + PARSER_TAGS = { + "id": "lucasarts_font", + "category": "game", + "file_ext" : ("laf",), + "min_size" : 32*8, + "description" : "LucasArts Font" + } + + endian = LITTLE_ENDIAN + + def validate(self): + if self["num_chars"].value != 256: + return "Invalid number of characters (%u)" % self["num_chars"].value + if self["first_char_code"].value != 0: + return "Invalid of code of first character code (%u)" % self["first_char_code"].value + if self["last_char_code"].value != 255: + return "Invalid of code of last character code (%u)" % self["last_char_code"].value + if self["char_codes/char[0]"].value != 0: + return "Invalid character code #0 (%u)" % self["char_codes/char[0]"].value + if self["chars/char[0]/data_offset"].value != 0: + return "Invalid character #0 offset" + return True + + def createFields(self): + yield UInt32(self, "num_chars") + yield UInt32(self, "raw_font_data_size") + yield UInt32(self, "max_char_width") + yield UInt32(self, "min_char_width") + yield UInt32(self, "unknown[]", 4) + yield UInt32(self, "unknown[]", 4) + yield UInt32(self, "first_char_code") + yield UInt32(self, "last_char_code") + + yield GenericVector(self, "char_codes", self["num_chars"].value, + UInt16, "char") + + yield GenericVector(self, "chars", self["num_chars"].value, + CharInfo, "char") + + # character data. we make an effort to provide + # something more meaningful than "RawBytes: + # character bitmap data" + yield CharData(self["chars"], self, "char_data") + + # read to the end + if self.current_size < self._size: + yield self.seekBit(self._size, "unknown[]") diff --git a/libs/hachoir_parser/game/spider_man_video.py b/libs/hachoir_parser/game/spider_man_video.py new file mode 100644 index 0000000..b9092f3 --- /dev/null +++ b/libs/hachoir_parser/game/spider_man_video.py @@ -0,0 +1,65 @@ +""" +Parser for an obscure FMV file format: bin files from the game +"The Amazing Spider-Man vs. The Kingpin" (Sega CD) + +Author: Mike Melanson +Creation date: 2006-09-30 +File samples: http://samples.mplayerhq.hu/game-formats/spiderman-segacd-bin/ +""" + +from hachoir_parser import Parser +from hachoir_core.field import FieldSet, UInt32, String, RawBytes +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +class Chunk(FieldSet): + tag_info = { + "CONF" : ("conf[]", None, "Configuration header"), + "AUDI" : ("audio[]", None, "Audio chunk"), + "SYNC" : ("sync[]", None, "Start of video frame data"), + "IVRA" : ("ivra[]", None, "Vector codebook (?)"), + "VRAM" : ("video[]", None, "Video RAM tile pattern"), + "CRAM" : ("color[]", None, "Color RAM (palette)"), + "CEND" : ("video_end[]", None, "End of video data"), + "MEND" : ("end_file", None, "End of file"), + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["length"].value * 8 + fourcc = self["fourcc"].value + if fourcc in self.tag_info: + self._name, self._parser, self._description = self.tag_info[fourcc] + else: + self._parser = None + self._description = "Unknown chunk: fourcc %s" % self["fourcc"].display + + def createFields(self): + yield String(self, "fourcc", 4, "FourCC", charset="ASCII") + yield textHandler(UInt32(self, "length", "length"), hexadecimal) + size = self["length"].value - 8 + if 0 < size: + if self._parser: + for field in self._parser(self, size): + yield field + else: + yield RawBytes(self, "data", size) + +class SpiderManVideoFile(Parser): + PARSER_TAGS = { + "id": "spiderman_video", + "category": "game", + "file_ext": ("bin",), + "min_size": 8*8, + "description": "The Amazing Spider-Man vs. The Kingpin (Sega CD) FMV video" + } + + endian = BIG_ENDIAN + + def validate(self): + return (self.stream.readBytes(0, 4) == 'CONF') + + def createFields(self): + while not self.eof: + yield Chunk(self, "chunk[]") + diff --git a/libs/hachoir_parser/game/zsnes.py b/libs/hachoir_parser/game/zsnes.py new file mode 100644 index 0000000..a8f7550 --- /dev/null +++ b/libs/hachoir_parser/game/zsnes.py @@ -0,0 +1,250 @@ +""" +ZSNES Save State Parser (v143 only currently) + +Author: Jason Gorski +Creation date: 2006-09-15 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, StaticFieldSet, + UInt8, UInt16, UInt32, + String, PaddingBytes, Bytes, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN + +class ZSTHeader(StaticFieldSet): + format = ( + (String, "zs_mesg", 26, "File header", {"charset": "ASCII"}), + (UInt8, "zs_mesglen", "File header string len"), + (UInt8, "zs_version", "Version minor #"), + (UInt8, "curcyc", "cycles left in scanline"), + (UInt16, "curypos", "current y position"), + (UInt8, "cacheud", "update cache every ? frames"), + (UInt8, "ccud", "current cache increment"), + (UInt8, "intrset", "interrupt set"), + (UInt8, "cycpl", "cycles per scanline"), + (UInt8, "cycphb", "cycles per hblank"), + (UInt8, "spcon", "SPC Enable (1=enabled)"), + (UInt16, "stackand", "value to and stack to keep it from going to the wrong area"), + (UInt16, "stackor", "value to or stack to keep it from going to the wrong area"), + ) + +class ZSTcpu(StaticFieldSet): + format = ( + (UInt16, "xat"), + (UInt8, "xdbt"), + (UInt8, "xpbt"), + (UInt16, "xst"), + (UInt16, "xdt"), + (UInt16, "xxt"), + (UInt16, "xyt"), + (UInt8, "xp"), + (UInt8, "xe"), + (UInt16, "xpc"), + (UInt8, "xirqb", "which bank the irqs start at"), + (UInt8, "debugger", "Start with debugger (1: yes, 0: no)"), + (UInt32, "Curtable" "Current table address"), + (UInt8, "curnmi", "if in NMI (1=yes)"), + (UInt32, "cycpbl", "percentage left of CPU/SPC to run (3.58 = 175)"), + (UInt32, "cycpblt", "percentage of CPU/SPC to run"), + ) + +class ZSTppu(FieldSet): + static_size = 3019*8 + def createFields(self): + yield UInt8(self, "sndrot", "rotates to use A,X or Y for sound skip") + yield UInt8(self, "sndrot2", "rotates a random value for sound skip") + yield UInt8(self, "INTEnab", "enables NMI(7)/VIRQ(5)/HIRQ(4)/JOY(0)") + yield UInt8(self, "NMIEnab", "controlled in e65816 loop. Sets to 81h") + yield UInt16(self, "VIRQLoc", "VIRQ Y location") + yield UInt8(self, "vidbright", "screen brightness 0..15") + yield UInt8(self, "previdbr", "previous screen brightness") + yield UInt8(self, "forceblnk", "force blanking on/off ($80=on)") + yield UInt32(self, "objptr", "pointer to object data in VRAM") + yield UInt32(self, "objptrn", "pointer2 to object data in VRAM") + yield UInt8(self, "objsize1", "1=8dot, 4=16dot, 16=32dot, 64=64dot") + yield UInt8(self, "objsize2", "large object size") + yield UInt8(self, "objmovs1", "number of bytes to move/paragraph") + yield UInt16(self, "objadds1", "number of bytes to add/paragraph") + yield UInt8(self, "objmovs2", "number of bytes to move/paragraph") + yield UInt16(self, "objadds2", "number of bytes to add/paragraph") + yield UInt16(self, "oamaddrt", "oam address") + yield UInt16(self, "oamaddrs", "oam address at beginning of vblank") + yield UInt8(self, "objhipr", "highest priority object #") + yield UInt8(self, "bgmode", "graphics mode 0..7") + yield UInt8(self, "bg3highst", "is 1 if background 3 has the highest priority") + yield UInt8(self, "bgtilesz", "0=8x8, 1=16x16 bit0=bg1, bit1=bg2, etc.") + yield UInt8(self, "mosaicon", "mosaic on, bit 0=bg1, bit1=bg2, etc.") + yield UInt8(self, "mosaicsz", "mosaic size in pixels") + yield UInt16(self, "bg1ptr", "pointer to background1") + yield UInt16(self, "bg2ptr", "pointer to background2") + yield UInt16(self, "bg3ptr", "pointer to background3") + yield UInt16(self, "bg4ptr", "pointer to background4") + yield UInt16(self, "bg1ptrb", "pointer to background1") + yield UInt16(self, "bg2ptrb", "pointer to background2") + yield UInt16(self, "bg3ptrb", "pointer to background3") + yield UInt16(self, "bg4ptrb", "pointer to background4") + yield UInt16(self, "bg1ptrc", "pointer to background1") + yield UInt16(self, "bg2ptrc", "pointer to background2") + yield UInt16(self, "bg3ptrc", "pointer to background3") + yield UInt16(self, "bg4ptrc", "pointer to background4") + yield UInt16(self, "bg1ptrd", "pointer to background1") + yield UInt16(self, "bg2ptrd", "pointer to background2") + yield UInt16(self, "bg3ptrd", "pointer to background3") + yield UInt16(self, "bg4ptrd", "pointer to background4") + yield UInt8(self, "bg1scsize", "bg #1 screen size (0=1x1,1=1x2,2=2x1,3=2x2)") + yield UInt8(self, "bg2scsize", "bg #2 screen size (0=1x1,1=1x2,2=2x1,3=2x2)") + yield UInt8(self, "bg3scsize", "bg #3 screen size (0=1x1,1=1x2,2=2x1,3=2x2)") + yield UInt8(self, "bg4scsize", "bg #4 screen size (0=1x1,1=1x2,2=2x1,3=2x2)") + yield UInt16(self, "bg1objptr", "pointer to tiles in background1") + yield UInt16(self, "bg2objptr", "pointer to tiles in background2") + yield UInt16(self, "bg3objptr", "pointer to tiles in background3") + yield UInt16(self, "bg4objptr", "pointer to tiles in background4") + yield UInt16(self, "bg1scrolx", "background 1 x position") + yield UInt16(self, "bg2scrolx", "background 2 x position") + yield UInt16(self, "bg3scrolx", "background 3 x position") + yield UInt16(self, "bg4scrolx", "background 4 x position") + yield UInt16(self, "bg1sx", "Temporary Variable for Debugging purposes") + yield UInt16(self, "bg1scroly", "background 1 y position") + yield UInt16(self, "bg2scroly", "background 2 y position") + yield UInt16(self, "bg3scroly", "background 3 y position") + yield UInt16(self, "bg4scroly", "background 4 y position") + yield UInt16(self, "addrincr", "vram increment (2,64,128,256)") + yield UInt8(self, "vramincr", "0 = increment at 2118/2138, 1 = 2119,213A") + yield UInt8(self, "vramread", "0 = address set, 1 = already read once") + yield UInt32(self, "vramaddr", "vram address") + + yield UInt16(self, "cgaddr", "cg (palette)") + yield UInt8(self, "cgmod", "if cgram is modified or not") + yield UInt16(self, "scrnon", "main & sub screen on") + yield UInt8(self, "scrndist", "which background is disabled") + yield UInt16(self, "resolutn", "screen resolution") + yield UInt8(self, "multa", "multiplier A") + yield UInt16(self, "diva", "divisor C") + yield UInt16(self, "divres", "quotent of divc/divb") + yield UInt16(self, "multres", "result of multa * multb/remainder of divc/divb") + yield UInt16(self, "latchx", "latched x value") + yield UInt16(self, "latchy", "latched y value") + yield UInt8(self, "latchxr", "low or high byte read for x value") + yield UInt8(self, "latchyr", "low or high byte read for y value") + yield UInt8(self, "frskipper", "used to control frame skipping") + yield UInt8(self, "winl1", "window 1 left position") + yield UInt8(self, "winr1", "window 1 right position") + yield UInt8(self, "winl2", "window 2 left position") + yield UInt8(self, "winr2", "window 2 right position") + yield UInt8(self, "winbg1en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG1") + yield UInt8(self, "winbg2en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG2") + yield UInt8(self, "winbg3en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG3") + yield UInt8(self, "winbg4en", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on BG4") + yield UInt8(self, "winobjen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on sprites") + yield UInt8(self, "wincolen", "Win1 on (IN/OUT) or Win2 on (IN/OUT) on backarea") + yield UInt8(self, "winlogica", "Window logic type for BG1 to 4") + yield UInt8(self, "winlogicb", "Window logic type for Sprites and Backarea") + yield UInt8(self, "winenabm", "Window logic enable for main screen") + yield UInt8(self, "winenabs", "Window logic enable for sub sceen") + yield UInt8(self, "mode7set", "mode 7 settings") + yield UInt16(self, "mode7A", "A value for Mode 7") + yield UInt16(self, "mode7B", "B value for Mode 7") + yield UInt16(self, "mode7C", "C value for Mode 7") + yield UInt16(self, "mode7D", "D value for Mode 7") + yield UInt16(self, "mode7X0", "Center X for Mode 7") + yield UInt16(self, "mode7Y0", "Center Y for Mode 7") + yield UInt8(self, "JoyAPos", "Old-Style Joystick Read Position for Joy 1 & 3") + yield UInt8(self, "JoyBPos", "Old-Style Joystick Read Position for Joy 2 & 4") + yield UInt32(self, "compmult", "Complement Multiplication for Mode 7") + yield UInt8(self, "joyalt", "temporary joystick alternation") + yield UInt32(self, "wramrwadr", "continuous read/write to wram address") + yield RawBytes(self, "dmadata", 129, "dma data (written from ports 43xx)") + yield UInt8(self, "irqon", "if IRQ has been called (80h) or not (0)") + yield UInt8(self, "nexthdma", "HDMA data to execute once vblank ends") + yield UInt8(self, "curhdma", "Currently executed hdma") + yield RawBytes(self, "hdmadata", 152, "4 dword register addresses, # bytes to transfer/line, address increment (word)") + yield UInt8(self, "hdmatype", "if first time executing hdma or not") + yield UInt8(self, "coladdr", "red value of color to add") + yield UInt8(self, "coladdg", "green value of color to add") + yield UInt8(self, "coladdb", "blue value of color to add") + yield UInt8(self, "colnull", "keep this 0 (when accessing colors by dword)") + yield UInt8(self, "scaddset", "screen/fixed color addition settings") + yield UInt8(self, "scaddtype", "which screen to add/sub") + yield UInt8(self, "Voice0Disabl2", "Disable Voice 0") + yield UInt8(self, "Voice1Disabl2", "Disable Voice 1") + yield UInt8(self, "Voice2Disabl2", "Disable Voice 2") + yield UInt8(self, "Voice3Disabl2", "Disable Voice 3") + yield UInt8(self, "Voice4Disabl2", "Disable Voice 4") + yield UInt8(self, "Voice5Disabl2", "Disable Voice 5") + yield UInt8(self, "Voice6Disabl2", "Disable Voice 6") + yield UInt8(self, "Voice7Disabl2", "Disable Voice 7") + yield RawBytes(self, "oamram", 1024, "OAMRAM (544 bytes)") + yield RawBytes(self, "cgram", 512, "CGRAM") + yield RawBytes(self, "pcgram", 512, "Previous CGRAM") + yield UInt8(self, "vraminctype") + yield UInt8(self, "vramincby8on", "if increment by 8 is on") + yield UInt8(self, "vramincby8left", "how many left") + yield UInt8(self, "vramincby8totl", "how many in total (32,64,128)") + yield UInt8(self, "vramincby8rowl", "how many left in that row (start at 8)") + yield UInt16(self, "vramincby8ptri", "increment by how many when rowl = 0") + yield UInt8(self, "nexthprior") + yield UInt8(self, "doirqnext") + yield UInt16(self, "vramincby8var") + yield UInt8(self, "screstype") + yield UInt8(self, "extlatch") + yield UInt8(self, "cfield") + yield UInt8(self, "interlval") + yield UInt16(self, "HIRQLoc HIRQ X") + + # NEWer ZST format + yield UInt8(self, "KeyOnStA") + yield UInt8(self, "KeyOnStB") + yield UInt8(self, "SDD1BankA") + yield UInt8(self, "SDD1BankB") + yield UInt8(self, "SDD1BankC") + yield UInt8(self, "SDD1BankD") + yield UInt8(self, "vramread2") + yield UInt8(self, "nosprincr") + yield UInt16(self, "poamaddrs") + yield UInt8(self, "ioportval") + yield UInt8(self, "iohvlatch") + yield UInt8(self, "ppustatus") + + yield PaddingBytes(self, "tempdat", 477, "Reserved/Unused") + +class ZSNESFile(Parser): + PARSER_TAGS = { + "id": "zsnes", + "category": "game", + "description": "ZSNES Save State File (only version 143)", + "min_size": 3091*8, + "file_ext": ("zst", "zs1", "zs2", "zs3", "zs4", "zs5", "zs6", + "zs7", "zs8", "zs9") + } + endian = LITTLE_ENDIAN + + def validate(self): + temp = self.stream.readBytes(0,28) + if temp[0:26] != "ZSNES Save State File V143": + return "Wrong header" + if ord(temp[27:28]) != 143: # extra... + return "Wrong save version %d <> 143" % temp[27:1] + return True + + def seek(self, offset): + padding = self.seekByte(offset, relative=False) + if padding is not None: + yield padding + + def createFields(self): + yield ZSTHeader(self, "header", "ZST header") # Offset: 0 + yield ZSTcpu(self, "cpu", "ZST cpu registers") # 41 + yield ZSTppu(self, "ppu", "ZST CPU registers") # 72 + yield RawBytes(self, "wram7E", 65536) # 3091 + yield RawBytes(self, "wram7F", 65536) # 68627 + yield RawBytes(self, "vram", 65536) # 134163 + + # TODO: Interpret extra on-cart chip data found at/beyond... 199699 + + # TODO: Interpret Thumbnail/Screenshot data found at 275291 + # 64*56*2(16bit colors) = 7168 + padding = self.seekByte(275291, relative=False) + if padding is not None: + yield padding + yield Bytes(self, "thumbnail", 7168, "Thumbnail of playing game in some sort of raw 64x56x16-bit RGB mode?") + diff --git a/libs/hachoir_parser/guess.py b/libs/hachoir_parser/guess.py new file mode 100644 index 0000000..1f77b48 --- /dev/null +++ b/libs/hachoir_parser/guess.py @@ -0,0 +1,123 @@ +""" +Parser list managment: +- createParser() find the best parser for a file. +""" + +import os +from hachoir_core.error import warning, info, HACHOIR_ERRORS +from hachoir_parser import ValidateError, HachoirParserList +from hachoir_core.stream import FileInputStream +from hachoir_core.i18n import _ + + +class QueryParser(object): + fallback = None + other = None + + def __init__(self, tags): + self.validate = True + self.use_fallback = False + self.parser_args = None + self.db = HachoirParserList.getInstance() + self.parsers = set(self.db) + parsers = [] + for tag in tags: + if not self.parsers: + break + parsers += self._getByTag(tag) + if self.fallback is None: + self.fallback = len(parsers) == 1 + if self.parsers: + other = len(parsers) + parsers += list(self.parsers) + self.other = parsers[other] + self.parsers = parsers + + def __iter__(self): + return iter(self.parsers) + + def translate(self, name, value): + if name == "filename": + filename = os.path.basename(value).split(".") + if len(filename) <= 1: + value = "" + else: + value = filename[-1].lower() + name = "file_ext" + return name, value + + def _getByTag(self, tag): + if tag is None: + self.parsers.clear() + return [] + elif callable(tag): + parsers = [ parser for parser in self.parsers if tag(parser) ] + for parser in parsers: + self.parsers.remove(parser) + elif tag[0] == "class": + self.validate = False + return [ tag[1] ] + elif tag[0] == "args": + self.parser_args = tag[1] + return [] + else: + tag = self.translate(*tag) + parsers = [] + if tag is not None: + key = tag[0] + byname = self.db.bytag.get(key,{}) + if tag[1] is None: + values = byname.itervalues() + else: + values = byname.get(tag[1],()), + if key == "id" and values: + self.validate = False + for value in values: + for parser in value: + if parser in self.parsers: + parsers.append(parser) + self.parsers.remove(parser) + return parsers + + def parse(self, stream, fallback=True): + fb = None + warn = warning + for parser in self.parsers: + try: + parser_obj = parser(stream, validate=self.validate) + if self.parser_args: + for key, value in self.parser_args.iteritems(): + setattr(parser_obj, key, value) + return parser_obj + except ValidateError, err: + res = unicode(err) + if fallback and self.fallback: + fb = parser + except HACHOIR_ERRORS, err: + res = unicode(err) + if warn: + if parser == self.other: + warn = info + warn(_("Skip parser '%s': %s") % (parser.__name__, res)) + fallback = False + if self.use_fallback and fb: + warning(_("Force use of parser '%s'") % fb.__name__) + return fb(stream) + + +def guessParser(stream): + return QueryParser(stream.tags).parse(stream) + + +def createParser(filename, real_filename=None, tags=None): + """ + Create a parser from a file or returns None on error. + + Options: + - filename (unicode): Input file name ; + - real_filename (str|unicode): Real file name. + """ + if not tags: + tags = [] + stream = FileInputStream(filename, real_filename, tags=tags) + return guessParser(stream) diff --git a/libs/hachoir_parser/image/__init__.py b/libs/hachoir_parser/image/__init__.py new file mode 100644 index 0000000..78c9c20 --- /dev/null +++ b/libs/hachoir_parser/image/__init__.py @@ -0,0 +1,12 @@ +from hachoir_parser.image.bmp import BmpFile +from hachoir_parser.image.gif import GifFile +from hachoir_parser.image.ico import IcoFile +from hachoir_parser.image.jpeg import JpegFile +from hachoir_parser.image.pcx import PcxFile +from hachoir_parser.image.psd import PsdFile +from hachoir_parser.image.png import PngFile +from hachoir_parser.image.tga import TargaFile +from hachoir_parser.image.tiff import TiffFile +from hachoir_parser.image.wmf import WMF_File +from hachoir_parser.image.xcf import XcfFile + diff --git a/libs/hachoir_parser/image/bmp.py b/libs/hachoir_parser/image/bmp.py new file mode 100644 index 0000000..c4865d3 --- /dev/null +++ b/libs/hachoir_parser/image/bmp.py @@ -0,0 +1,195 @@ +""" +Microsoft Bitmap picture parser. +- file extension: ".bmp" + +Author: Victor Stinner +Creation: 16 december 2005 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt16, UInt32, Bits, + String, RawBytes, Enum, + PaddingBytes, NullBytes, createPaddingField) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.image.common import RGB, PaletteRGBA +from hachoir_core.tools import alignValue + +class Pixel4bit(Bits): + static_size = 4 + def __init__(self, parent, name): + Bits.__init__(self, parent, name, 4) + +class ImageLine(FieldSet): + def __init__(self, parent, name, width, pixel_class): + FieldSet.__init__(self, parent, name) + self._pixel = pixel_class + self._width = width + self._size = alignValue(self._width * self._pixel.static_size, 32) + + def createFields(self): + for x in xrange(self._width): + yield self._pixel(self, "pixel[]") + size = self.size - self.current_size + if size: + yield createPaddingField(self, size) + +class ImagePixels(FieldSet): + def __init__(self, parent, name, width, height, pixel_class, size=None): + FieldSet.__init__(self, parent, name, size=size) + self._width = width + self._height = height + self._pixel = pixel_class + + def createFields(self): + for y in xrange(self._height-1, -1, -1): + yield ImageLine(self, "line[%u]" % y, self._width, self._pixel) + size = (self.size - self.current_size) // 8 + if size: + yield NullBytes(self, "padding", size) + +class CIEXYZ(FieldSet): + def createFields(self): + yield UInt32(self, "x") + yield UInt32(self, "y") + yield UInt32(self, "z") + +class BmpHeader(FieldSet): + color_space_name = { + 1: "Business (Saturation)", + 2: "Graphics (Relative)", + 4: "Images (Perceptual)", + 8: "Absolute colormetric (Absolute)", + } + + def getFormatVersion(self): + if "gamma_blue" in self: + return 4 + if "important_color" in self: + return 3 + return 2 + + def createFields(self): + # Version 2 (12 bytes) + yield UInt32(self, "header_size", "Header size") + yield UInt32(self, "width", "Width (pixels)") + yield UInt32(self, "height", "Height (pixels)") + yield UInt16(self, "nb_plan", "Number of plan (=1)") + yield UInt16(self, "bpp", "Bits per pixel") # may be zero for PNG/JPEG picture + + # Version 3 (40 bytes) + if self["header_size"].value < 40: + return + yield Enum(UInt32(self, "compression", "Compression method"), BmpFile.COMPRESSION_NAME) + yield UInt32(self, "image_size", "Image size (bytes)") + yield UInt32(self, "horizontal_dpi", "Horizontal DPI") + yield UInt32(self, "vertical_dpi", "Vertical DPI") + yield UInt32(self, "used_colors", "Number of color used") + yield UInt32(self, "important_color", "Number of import colors") + + # Version 4 (108 bytes) + if self["header_size"].value < 108: + return + yield textHandler(UInt32(self, "red_mask"), hexadecimal) + yield textHandler(UInt32(self, "green_mask"), hexadecimal) + yield textHandler(UInt32(self, "blue_mask"), hexadecimal) + yield textHandler(UInt32(self, "alpha_mask"), hexadecimal) + yield Enum(UInt32(self, "color_space"), self.color_space_name) + yield CIEXYZ(self, "red_primary") + yield CIEXYZ(self, "green_primary") + yield CIEXYZ(self, "blue_primary") + yield UInt32(self, "gamma_red") + yield UInt32(self, "gamma_green") + yield UInt32(self, "gamma_blue") + +def parseImageData(parent, name, size, header): + if ("compression" not in header) or (header["compression"].value in (0, 3)): + width = header["width"].value + height = header["height"].value + bpp = header["bpp"].value + if bpp == 32: + cls = UInt32 + elif bpp == 24: + cls = RGB + elif bpp == 8: + cls = UInt8 + elif bpp == 4: + cls = Pixel4bit + else: + cls = None + if cls: + return ImagePixels(parent, name, width, height, cls, size=size*8) + return RawBytes(parent, name, size) + +class BmpFile(Parser): + PARSER_TAGS = { + "id": "bmp", + "category": "image", + "file_ext": ("bmp",), + "mime": (u"image/x-ms-bmp", u"image/x-bmp"), + "min_size": 30*8, +# "magic": (("BM", 0),), + "magic_regex": (( + # "BM", , , header_size=(12|40|108) + "BM.{4}.{8}[\x0C\x28\x6C]\0{3}", + 0),), + "description": "Microsoft bitmap (BMP) picture" + } + endian = LITTLE_ENDIAN + + COMPRESSION_NAME = { + 0: u"Uncompressed", + 1: u"RLE 8-bit", + 2: u"RLE 4-bit", + 3: u"Bitfields", + 4: u"JPEG", + 5: u"PNG", + } + + def validate(self): + if self.stream.readBytes(0, 2) != 'BM': + return "Wrong file signature" + if self["header/header_size"].value not in (12, 40, 108): + return "Unknown header size (%s)" % self["header_size"].value + if self["header/nb_plan"].value != 1: + return "Invalid number of planes" + return True + + def createFields(self): + yield String(self, "signature", 2, "Header (\"BM\")", charset="ASCII") + yield UInt32(self, "file_size", "File size (bytes)") + yield PaddingBytes(self, "reserved", 4, "Reserved") + yield UInt32(self, "data_start", "Data start position") + yield BmpHeader(self, "header") + + # Compute number of color + header = self["header"] + bpp = header["bpp"].value + if 0 < bpp <= 8: + if "used_colors" in header and header["used_colors"].value: + nb_color = header["used_colors"].value + else: + nb_color = (1 << bpp) + else: + nb_color = 0 + + # Color palette (if any) + if nb_color: + yield PaletteRGBA(self, "palette", nb_color) + + # Seek to data start + field = self.seekByte(self["data_start"].value) + if field: + yield field + + # Image pixels + size = min(self["file_size"].value-self["data_start"].value, (self.size - self.current_size)//8) + yield parseImageData(self, "pixels", size, header) + + def createDescription(self): + return u"Microsoft Bitmap version %s" % self["header"].getFormatVersion() + + def createContentSize(self): + return self["file_size"].value * 8 + diff --git a/libs/hachoir_parser/image/common.py b/libs/hachoir_parser/image/common.py new file mode 100644 index 0000000..5046058 --- /dev/null +++ b/libs/hachoir_parser/image/common.py @@ -0,0 +1,49 @@ +from hachoir_core.field import FieldSet, UserVector, UInt8 + +class RGB(FieldSet): + color_name = { + ( 0, 0, 0): "Black", + (255, 0, 0): "Red", + ( 0, 255, 0): "Green", + ( 0, 0, 255): "Blue", + (255, 255, 255): "White", + } + static_size = 24 + + def createFields(self): + yield UInt8(self, "red", "Red") + yield UInt8(self, "green", "Green") + yield UInt8(self, "blue", "Blue") + + def createDescription(self): + rgb = self["red"].value, self["green"].value, self["blue"].value + name = self.color_name.get(rgb) + if not name: + name = "#%02X%02X%02X" % rgb + return "RGB color: " + name + +class RGBA(RGB): + static_size = 32 + + def createFields(self): + yield UInt8(self, "red", "Red") + yield UInt8(self, "green", "Green") + yield UInt8(self, "blue", "Blue") + yield UInt8(self, "alpha", "Alpha") + + def createDescription(self): + description = RGB.createDescription(self) + opacity = self["alpha"].value*100/255 + return "%s (opacity: %s%%)" % (description, opacity) + +class PaletteRGB(UserVector): + item_class = RGB + item_name = "color" + def createDescription(self): + return "Palette of %u RGB colors" % len(self) + +class PaletteRGBA(PaletteRGB): + item_class = RGBA + def createDescription(self): + return "Palette of %u RGBA colors" % len(self) + diff --git a/libs/hachoir_parser/image/exif.py b/libs/hachoir_parser/image/exif.py new file mode 100644 index 0000000..7b86793 --- /dev/null +++ b/libs/hachoir_parser/image/exif.py @@ -0,0 +1,361 @@ +""" +EXIF metadata parser (can be found in a JPEG picture for example) + +Author: Victor Stinner +""" + +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, + Int32, Enum, String, + Bytes, SubFile, + NullBytes, createPaddingField) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN, NETWORK_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import createDict + +MAX_COUNT = 1000 + +def rationalFactory(class_name, size, field_class): + class Rational(FieldSet): + static_size = size + + def createFields(self): + yield field_class(self, "numerator") + yield field_class(self, "denominator") + + def createValue(self): + return float(self["numerator"].value) / self["denominator"].value + cls = Rational + cls.__name__ = class_name + return cls + +RationalInt32 = rationalFactory("RationalInt32", 64, Int32) +RationalUInt32 = rationalFactory("RationalUInt32", 64, UInt32) + +class BasicIFDEntry(FieldSet): + TYPE_BYTE = 0 + TYPE_UNDEFINED = 7 + TYPE_RATIONAL = 5 + TYPE_SIGNED_RATIONAL = 10 + TYPE_INFO = { + 1: (UInt8, "BYTE (8 bits)"), + 2: (String, "ASCII (8 bits)"), + 3: (UInt16, "SHORT (16 bits)"), + 4: (UInt32, "LONG (32 bits)"), + 5: (RationalUInt32, "RATIONAL (2x LONG, 64 bits)"), + 7: (Bytes, "UNDEFINED (8 bits)"), + 9: (Int32, "SIGNED LONG (32 bits)"), + 10: (RationalInt32, "SRATIONAL (2x SIGNED LONGs, 64 bits)"), + } + ENTRY_FORMAT = createDict(TYPE_INFO, 0) + TYPE_NAME = createDict(TYPE_INFO, 1) + + def createFields(self): + yield Enum(textHandler(UInt16(self, "tag", "Tag"), hexadecimal), self.TAG_NAME) + yield Enum(textHandler(UInt16(self, "type", "Type"), hexadecimal), self.TYPE_NAME) + yield UInt32(self, "count", "Count") + if self["type"].value not in (self.TYPE_BYTE, self.TYPE_UNDEFINED) \ + and MAX_COUNT < self["count"].value: + raise ParserError("EXIF: Invalid count value (%s)" % self["count"].value) + value_size, array_size = self.getSizes() + + # Get offset/value + if not value_size: + yield NullBytes(self, "padding", 4) + elif value_size <= 32: + if 1 < array_size: + name = "value[]" + else: + name = "value" + kw = {} + cls = self.value_cls + if cls is String: + args = (self, name, value_size/8, "Value") + kw["strip"] = " \0" + kw["charset"] = "ISO-8859-1" + elif cls is Bytes: + args = (self, name, value_size/8, "Value") + else: + args = (self, name, "Value") + for index in xrange(array_size): + yield cls(*args, **kw) + + size = array_size * value_size + if size < 32: + yield NullBytes(self, "padding", (32-size)//8) + else: + yield UInt32(self, "offset", "Value offset") + + def getSizes(self): + """ + Returns (value_size, array_size): value_size in bits and + array_size in number of items. + """ + # Create format + self.value_cls = self.ENTRY_FORMAT.get(self["type"].value, Bytes) + + # Set size + count = self["count"].value + if self.value_cls in (String, Bytes): + return 8 * count, 1 + else: + return self.value_cls.static_size * count, count + +class ExifEntry(BasicIFDEntry): + OFFSET_JPEG_SOI = 0x0201 + EXIF_IFD_POINTER = 0x8769 + + TAG_WIDTH = 0xA002 + TAG_HEIGHT = 0xA003 + + TAG_GPS_LATITUDE_REF = 0x0001 + TAG_GPS_LATITUDE = 0x0002 + TAG_GPS_LONGITUDE_REF = 0x0003 + TAG_GPS_LONGITUDE = 0x0004 + TAG_GPS_ALTITUDE_REF = 0x0005 + TAG_GPS_ALTITUDE = 0x0006 + TAG_GPS_TIMESTAMP = 0x0007 + TAG_GPS_DATESTAMP = 0x001d + + TAG_IMG_TITLE = 0x010e + TAG_FILE_TIMESTAMP = 0x0132 + TAG_SOFTWARE = 0x0131 + TAG_CAMERA_MODEL = 0x0110 + TAG_CAMERA_MANUFACTURER = 0x010f + TAG_ORIENTATION = 0x0112 + TAG_EXPOSURE = 0x829A + TAG_FOCAL = 0x829D + TAG_BRIGHTNESS = 0x9203 + TAG_APERTURE = 0x9205 + TAG_USER_COMMENT = 0x9286 + + TAG_NAME = { + # GPS + 0x0000: "GPS version ID", + 0x0001: "GPS latitude ref", + 0x0002: "GPS latitude", + 0x0003: "GPS longitude ref", + 0x0004: "GPS longitude", + 0x0005: "GPS altitude ref", + 0x0006: "GPS altitude", + 0x0007: "GPS timestamp", + 0x0008: "GPS satellites", + 0x0009: "GPS status", + 0x000a: "GPS measure mode", + 0x000b: "GPS DOP", + 0x000c: "GPS speed ref", + 0x000d: "GPS speed", + 0x000e: "GPS track ref", + 0x000f: "GPS track", + 0x0010: "GPS img direction ref", + 0x0011: "GPS img direction", + 0x0012: "GPS map datum", + 0x0013: "GPS dest latitude ref", + 0x0014: "GPS dest latitude", + 0x0015: "GPS dest longitude ref", + 0x0016: "GPS dest longitude", + 0x0017: "GPS dest bearing ref", + 0x0018: "GPS dest bearing", + 0x0019: "GPS dest distance ref", + 0x001a: "GPS dest distance", + 0x001b: "GPS processing method", + 0x001c: "GPS area information", + 0x001d: "GPS datestamp", + 0x001e: "GPS differential", + + 0x0100: "Image width", + 0x0101: "Image height", + 0x0102: "Number of bits per component", + 0x0103: "Compression scheme", + 0x0106: "Pixel composition", + TAG_ORIENTATION: "Orientation of image", + 0x0115: "Number of components", + 0x011C: "Image data arrangement", + 0x0212: "Subsampling ratio Y to C", + 0x0213: "Y and C positioning", + 0x011A: "Image resolution width direction", + 0x011B: "Image resolution in height direction", + 0x0128: "Unit of X and Y resolution", + + 0x0111: "Image data location", + 0x0116: "Number of rows per strip", + 0x0117: "Bytes per compressed strip", + 0x0201: "Offset to JPEG SOI", + 0x0202: "Bytes of JPEG data", + + 0x012D: "Transfer function", + 0x013E: "White point chromaticity", + 0x013F: "Chromaticities of primaries", + 0x0211: "Color space transformation matrix coefficients", + 0x0214: "Pair of blank and white reference values", + + TAG_FILE_TIMESTAMP: "File change date and time", + TAG_IMG_TITLE: "Image title", + TAG_CAMERA_MANUFACTURER: "Camera (Image input equipment) manufacturer", + TAG_CAMERA_MODEL: "Camera (Input input equipment) model", + TAG_SOFTWARE: "Software", + 0x013B: "File change date and time", + 0x8298: "Copyright holder", + 0x8769: "Exif IFD Pointer", + + TAG_EXPOSURE: "Exposure time", + TAG_FOCAL: "F number", + 0x8822: "Exposure program", + 0x8824: "Spectral sensitivity", + 0x8827: "ISO speed rating", + 0x8828: "Optoelectric conversion factor OECF", + 0x9201: "Shutter speed", + 0x9202: "Aperture", + TAG_BRIGHTNESS: "Brightness", + 0x9204: "Exposure bias", + TAG_APERTURE: "Maximum lens aperture", + 0x9206: "Subject distance", + 0x9207: "Metering mode", + 0x9208: "Light source", + 0x9209: "Flash", + 0x920A: "Lens focal length", + 0x9214: "Subject area", + 0xA20B: "Flash energy", + 0xA20C: "Spatial frequency response", + 0xA20E: "Focal plane X resolution", + 0xA20F: "Focal plane Y resolution", + 0xA210: "Focal plane resolution unit", + 0xA214: "Subject location", + 0xA215: "Exposure index", + 0xA217: "Sensing method", + 0xA300: "File source", + 0xA301: "Scene type", + 0xA302: "CFA pattern", + 0xA401: "Custom image processing", + 0xA402: "Exposure mode", + 0xA403: "White balance", + 0xA404: "Digital zoom ratio", + 0xA405: "Focal length in 35 mm film", + 0xA406: "Scene capture type", + 0xA407: "Gain control", + 0xA408: "Contrast", + + 0x9000: "Exif version", + 0xA000: "Supported Flashpix version", + 0xA001: "Color space information", + 0x9101: "Meaning of each component", + 0x9102: "Image compression mode", + TAG_WIDTH: "Valid image width", + TAG_HEIGHT: "Valid image height", + 0x927C: "Manufacturer notes", + TAG_USER_COMMENT: "User comments", + 0xA004: "Related audio file", + 0x9003: "Date and time of original data generation", + 0x9004: "Date and time of digital data generation", + 0x9290: "DateTime subseconds", + 0x9291: "DateTimeOriginal subseconds", + 0x9292: "DateTimeDigitized subseconds", + 0xA420: "Unique image ID", + 0xA005: "Interoperability IFD Pointer" + } + + def createDescription(self): + return "Entry: %s" % self["tag"].display + +def sortExifEntry(a,b): + return int( a["offset"].value - b["offset"].value ) + +class ExifIFD(FieldSet): + def seek(self, offset): + """ + Seek to byte address relative to parent address. + """ + padding = offset - (self.address + self.current_size)/8 + if 0 < padding: + return createPaddingField(self, padding*8) + else: + return None + + def createFields(self): + offset_diff = 6 + yield UInt16(self, "count", "Number of entries") + entries = [] + next_chunk_offset = None + count = self["count"].value + if not count: + return + while count: + addr = self.absolute_address + self.current_size + next = self.stream.readBits(addr, 32, NETWORK_ENDIAN) + if next in (0, 0xF0000000): + break + entry = ExifEntry(self, "entry[]") + yield entry + if entry["tag"].value in (ExifEntry.EXIF_IFD_POINTER, ExifEntry.OFFSET_JPEG_SOI): + next_chunk_offset = entry["value"].value + offset_diff + if 32 < entry.getSizes()[0]: + entries.append(entry) + count -= 1 + yield UInt32(self, "next", "Next IFD offset") + try: + entries.sort( sortExifEntry ) + except TypeError: + raise ParserError("Unable to sort entries!") + value_index = 0 + for entry in entries: + padding = self.seek(entry["offset"].value + offset_diff) + if padding is not None: + yield padding + + value_size, array_size = entry.getSizes() + if not array_size: + continue + cls = entry.value_cls + if 1 < array_size: + name = "value_%s[]" % entry.name + else: + name = "value_%s" % entry.name + desc = "Value of \"%s\"" % entry["tag"].display + if cls is String: + for index in xrange(array_size): + yield cls(self, name, value_size/8, desc, strip=" \0", charset="ISO-8859-1") + elif cls is Bytes: + for index in xrange(array_size): + yield cls(self, name, value_size/8, desc) + else: + for index in xrange(array_size): + yield cls(self, name, desc) + value_index += 1 + if next_chunk_offset is not None: + padding = self.seek(next_chunk_offset) + if padding is not None: + yield padding + + def createDescription(self): + return "Exif IFD (id %s)" % self["id"].value + +class Exif(FieldSet): + def createFields(self): + # Headers + yield String(self, "header", 6, "Header (Exif\\0\\0)", charset="ASCII") + if self["header"].value != "Exif\0\0": + raise ParserError("Invalid EXIF signature!") + yield String(self, "byte_order", 2, "Byte order", charset="ASCII") + if self["byte_order"].value not in ("II", "MM"): + raise ParserError("Invalid endian!") + if self["byte_order"].value == "II": + self.endian = LITTLE_ENDIAN + else: + self.endian = BIG_ENDIAN + yield UInt16(self, "version", "TIFF version number") + yield UInt32(self, "img_dir_ofs", "Next image directory offset") + while not self.eof: + addr = self.absolute_address + self.current_size + tag = self.stream.readBits(addr, 16, NETWORK_ENDIAN) + if tag == 0xFFD8: + size = (self._size - self.current_size) // 8 + yield SubFile(self, "thumbnail", size, "Thumbnail (JPEG file)", mime_type="image/jpeg") + break + elif tag == 0xFFFF: + break + yield ExifIFD(self, "ifd[]", "IFD") + padding = self.seekBit(self._size) + if padding is not None: + yield padding + + diff --git a/libs/hachoir_parser/image/gif.py b/libs/hachoir_parser/image/gif.py new file mode 100644 index 0000000..c7e0b89 --- /dev/null +++ b/libs/hachoir_parser/image/gif.py @@ -0,0 +1,227 @@ +""" +GIF picture parser. + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Enum, UInt8, UInt16, + Bit, Bits, NullBytes, + String, PascalString8, Character, + NullBits, RawBytes) +from hachoir_parser.image.common import PaletteRGB +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import humanDuration +from hachoir_core.text_handler import textHandler, displayHandler, hexadecimal + +# Maximum image dimension (in pixel) +MAX_WIDTH = 6000 +MAX_HEIGHT = MAX_WIDTH +MAX_FILE_SIZE = 100 * 1024 * 1024 + +class Image(FieldSet): + def createFields(self): + yield UInt16(self, "left", "Left") + yield UInt16(self, "top", "Top") + yield UInt16(self, "width", "Width") + yield UInt16(self, "height", "Height") + + yield Bits(self, "bpp", 3, "Bits / pixel minus one") + yield NullBits(self, "nul", 2) + yield Bit(self, "sorted", "Sorted??") + yield Bit(self, "interlaced", "Interlaced?") + yield Bit(self, "has_local_map", "Use local color map?") + + if self["has_local_map"].value: + nb_color = 1 << (1 + self["bpp"].value) + yield PaletteRGB(self, "local_map", nb_color, "Local color map") + + yield UInt8(self, "code_size", "LZW Minimum Code Size") + while True: + blen = UInt8(self, "block_len[]", "Block Length") + yield blen + if blen.value != 0: + yield RawBytes(self, "data[]", blen.value, "Image Data") + else: + break + + def createDescription(self): + return "Image: %ux%u pixels at (%u,%u)" % ( + self["width"].value, self["height"].value, + self["left"].value, self["top"].value) + +DISPOSAL_METHOD = { + 0: "No disposal specified", + 1: "Do not dispose", + 2: "Restore to background color", + 3: "Restore to previous", +} + +NETSCAPE_CODE = { + 1: "Loop count", +} + +def parseApplicationExtension(parent): + yield PascalString8(parent, "app_name", "Application name") + yield UInt8(parent, "size") + size = parent["size"].value + if parent["app_name"].value == "NETSCAPE2.0" and size == 3: + yield Enum(UInt8(parent, "netscape_code"), NETSCAPE_CODE) + if parent["netscape_code"].value == 1: + yield UInt16(parent, "loop_count") + else: + yield RawBytes(parent, "raw", 2) + else: + yield RawBytes(parent, "raw", size) + yield NullBytes(parent, "terminator", 1, "Terminator (0)") + +def parseGraphicControl(parent): + yield UInt8(parent, "size", "Block size (4)") + + yield Bit(parent, "has_transp", "Has transparency") + yield Bit(parent, "user_input", "User input") + yield Enum(Bits(parent, "disposal_method", 3), DISPOSAL_METHOD) + yield NullBits(parent, "reserved[]", 3) + + if parent["size"].value != 4: + raise ParserError("Invalid graphic control size") + yield displayHandler(UInt16(parent, "delay", "Delay time in millisecond"), humanDuration) + yield UInt8(parent, "transp", "Transparent color index") + yield NullBytes(parent, "terminator", 1, "Terminator (0)") + +def parseComments(parent): + while True: + field = PascalString8(parent, "comment[]", strip=" \0\r\n\t") + yield field + if field.length == 0: + break + +def parseTextExtension(parent): + yield UInt8(parent, "block_size", "Block Size") + yield UInt16(parent, "left", "Text Grid Left") + yield UInt16(parent, "top", "Text Grid Top") + yield UInt16(parent, "width", "Text Grid Width") + yield UInt16(parent, "height", "Text Grid Height") + yield UInt8(parent, "cell_width", "Character Cell Width") + yield UInt8(parent, "cell_height", "Character Cell Height") + yield UInt8(parent, "fg_color", "Foreground Color Index") + yield UInt8(parent, "bg_color", "Background Color Index") + while True: + field = PascalString8(parent, "comment[]", strip=" \0\r\n\t") + yield field + if field.length == 0: + break + +def defaultExtensionParser(parent): + while True: + size = UInt8(parent, "size[]", "Size (in bytes)") + yield size + if 0 < size.value: + yield RawBytes(parent, "content[]", size.value) + else: + break + +class Extension(FieldSet): + ext_code = { + 0xf9: ("graphic_ctl[]", parseGraphicControl, "Graphic control"), + 0xfe: ("comments[]", parseComments, "Comments"), + 0xff: ("app_ext[]", parseApplicationExtension, "Application extension"), + 0x01: ("text_ext[]", parseTextExtension, "Plain text extension") + } + def __init__(self, *args): + FieldSet.__init__(self, *args) + code = self["code"].value + if code in self.ext_code: + self._name, self.parser, self._description = self.ext_code[code] + else: + self.parser = defaultExtensionParser + + def createFields(self): + yield textHandler(UInt8(self, "code", "Extension code"), hexadecimal) + for field in self.parser(self): + yield field + + def createDescription(self): + return "Extension: function %s" % self["func"].display + +class ScreenDescriptor(FieldSet): + def createFields(self): + yield UInt16(self, "width", "Width") + yield UInt16(self, "height", "Height") + yield Bits(self, "bpp", 3, "Bits per pixel minus one") + yield Bit(self, "reserved", "(reserved)") + yield Bits(self, "color_res", 3, "Color resolution minus one") + yield Bit(self, "global_map", "Has global map?") + yield UInt8(self, "background", "Background color") + yield UInt8(self, "pixel_aspect_ratio", "Pixel Aspect Ratio") + + def createDescription(self): + colors = 1 << (self["bpp"].value+1) + return "Screen descriptor: %ux%u pixels %u colors" \ + % (self["width"].value, self["height"].value, colors) + +class GifFile(Parser): + endian = LITTLE_ENDIAN + separator_name = { + "!": "Extension", + ",": "Image", + ";": "Terminator" + } + PARSER_TAGS = { + "id": "gif", + "category": "image", + "file_ext": ("gif",), + "mime": (u"image/gif",), + "min_size": (6 + 7 + 1 + 9)*8, # signature + screen + separator + image + "magic": (("GIF87a", 0), ("GIF89a", 0)), + "description": "GIF picture" + } + + def validate(self): + if self.stream.readBytes(0, 6) not in ("GIF87a", "GIF89a"): + return "Wrong header" + if self["screen/width"].value == 0 or self["screen/height"].value == 0: + return "Invalid image size" + if MAX_WIDTH < self["screen/width"].value: + return "Image width too big (%u)" % self["screen/width"].value + if MAX_HEIGHT < self["screen/height"].value: + return "Image height too big (%u)" % self["screen/height"].value + return True + + def createFields(self): + # Header + yield String(self, "magic", 3, "File magic code", charset="ASCII") + yield String(self, "version", 3, "GIF version", charset="ASCII") + + yield ScreenDescriptor(self, "screen") + if self["screen/global_map"].value: + bpp = (self["screen/bpp"].value+1) + yield PaletteRGB(self, "color_map", 1 << bpp, "Color map") + self.color_map = self["color_map"] + else: + self.color_map = None + + self.images = [] + while True: + code = Enum(Character(self, "separator[]", "Separator code"), self.separator_name) + yield code + code = code.value + if code == "!": + yield Extension(self, "extensions[]") + elif code == ",": + yield Image(self, "image[]") + elif code == ";": + # GIF Terminator + break + else: + raise ParserError("Wrong GIF image separator: 0x%02X" % ord(code)) + + def createContentSize(self): + field = self["image[0]"] + start = field.absolute_address + field.size + end = start + MAX_FILE_SIZE*8 + pos = self.stream.searchBytes("\0;", start, end) + if pos: + return pos + 16 + return None diff --git a/libs/hachoir_parser/image/ico.py b/libs/hachoir_parser/image/ico.py new file mode 100644 index 0000000..193a81c --- /dev/null +++ b/libs/hachoir_parser/image/ico.py @@ -0,0 +1,139 @@ +""" +Microsoft Windows icon and cursor file format parser. + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, Enum, RawBytes) +from hachoir_parser.image.common import PaletteRGBA +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.win32 import BitmapInfoHeader + +class IconHeader(FieldSet): + def createFields(self): + yield UInt8(self, "width", "Width") + yield UInt8(self, "height", "Height") + yield UInt8(self, "nb_color", "Number of colors") + yield UInt8(self, "reserved", "(reserved)") + yield UInt16(self, "planes", "Color planes (=1)") + yield UInt16(self, "bpp", "Bits per pixel") + yield UInt32(self, "size", "Content size in bytes") + yield UInt32(self, "offset", "Data offset") + + def createDescription(self): + return "Icon: %ux%u pixels, %u bits/pixel" % \ + (self["width"].value, self["height"].value, self["bpp"].value) + + def isValid(self): + if self["nb_color"].value == 0: + if self["bpp"].value in (8, 24, 32) and self["planes"].value == 1: + return True + if self["planes"].value == 4 and self["bpp"].value == 0: + return True + elif self["nb_color"].value == 16: + if self["bpp"].value in (4, 16) and self["planes"].value == 1: + return True + else: + return False + if self["bpp"].value == 0 and self["planes"].value == 0: + return True + return False + +class IconData(FieldSet): + def __init__(self, parent, name, header): + FieldSet.__init__(self, parent, name, "Icon data") + self.header = header + + def createFields(self): + yield BitmapInfoHeader(self, "header") + + # Read palette if needed + nb_color = self.header["nb_color"].value + if self.header["bpp"].value == 8: + nb_color = 256 + if nb_color != 0: + yield PaletteRGBA(self, "palette", nb_color) + + # Read pixels + size = self.header["size"].value - self.current_size/8 + yield RawBytes(self, "pixels", size, "Image pixels") + +class IcoFile(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "ico", + "category": "image", + "file_ext": ("ico", "cur"), + "mime": (u"image/x-ico",), + "min_size": (22 + 40)*8, +# "magic": ( +# ("\0\0\1\0", 0), # Icon +# ("\0\0\2\0", 0), # Cursor +# ), + "magic_regex": (( + # signature=0, type=(1|2), count in 1..20, + "\0\0[\1\2]\0[\x01-\x14]." + # size=(16x16|32x32|48x48|64x64), + "(\x10\x10|\x20\x20|\x30\x30|\x40\x40)" + # nb_color=0 or 16; nb_plane=(0|1|4), bpp=(0|8|24|32) + "[\x00\x10]\0[\0\1\4][\0\x08\x18\x20]\0", + 0),), + "description": "Microsoft Windows icon or cursor", + } + TYPE_NAME = { + 1: "icon", + 2: "cursor" + } + + def validate(self): + # Check signature and type + if self["signature"].value != 0: + return "Wrong file signature" + if self["type"].value not in self.TYPE_NAME: + return "Unknown picture type" + + # Check all icon headers + index = -1 + for field in self: + if field.name.startswith("icon_header"): + index += 1 + if not field.isValid(): + return "Invalid header #%u" % index + elif 0 <= index: + break + return True + + def createFields(self): + yield UInt16(self, "signature", "Signature (0x0000)") + yield Enum(UInt16(self, "type", "Resource type"), self.TYPE_NAME) + yield UInt16(self, "nb_items", "Number of items") + items = [] + for index in xrange(self["nb_items"].value): + item = IconHeader(self, "icon_header[]") + yield item + items.append(item) + for header in items: + if header["offset"].value*8 != self.current_size: + raise ParserError("Icon: Problem with icon data offset.") + yield IconData(self, "icon_data[]", header) + + def createDescription(self): + desc = "Microsoft Windows %s" % self["type"].display + size = [] + for header in self.array("icon_header"): + size.append("%ux%ux%u" % (header["width"].value, + header["height"].value, header["bpp"].value)) + if size: + return "%s: %s" % (desc, ", ".join(size)) + else: + return desc + + def createContentSize(self): + count = self["nb_items"].value + if not count: + return None + field = self["icon_data[%u]" % (count-1)] + return field.absolute_address + field.size + diff --git a/libs/hachoir_parser/image/iptc.py b/libs/hachoir_parser/image/iptc.py new file mode 100644 index 0000000..6727de7 --- /dev/null +++ b/libs/hachoir_parser/image/iptc.py @@ -0,0 +1,113 @@ +""" +IPTC metadata parser (can be found in a JPEG picture for example) + +Sources: +- Image-MetaData Perl module: + http://www.annocpan.org/~BETTELLI/Image-MetaData-JPEG-0.15/... + ...lib/Image/MetaData/JPEG/TagLists.pod +- IPTC tag name and description: + http://peccatte.karefil.com/software/IPTCTableau.pdf + +Author: Victor Stinner +""" + +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, String, RawBytes, NullBytes) +from hachoir_core.text_handler import textHandler, hexadecimal + +def IPTC_String(parent, name, desc=None): + # Charset may be utf-8, ISO-8859-1, or ... + return String(parent, name, parent["size"].value, desc, + strip=" ") + +dataset1 = { +} +dataset2 = { + 0: ("record_version", "Record version (2 for JPEG)", UInt16), + 5: ("obj_name", "Object name", None), + 7: ("edit_stat", "Edit status", None), + 10: ("urgency", "Urgency", UInt8), + 15: ("category[]", "Category", None), + 22: ("fixture", "Fixture identifier", IPTC_String), + 25: ("keyword[]", "Keywords", IPTC_String), + 30: ("release_date", "Release date", IPTC_String), + 35: ("release_time", "Release time", IPTC_String), + 40: ("instruction", "Special instructions", IPTC_String), + 55: ("date_created", "Date created", IPTC_String), + 60: ("time_created", "Time created (ISO 8601)", IPTC_String), + 65: ("originating_prog", "Originating program", IPTC_String), + 70: ("prog_ver", "Program version", IPTC_String), + 80: ("author", "By-line (Author)", IPTC_String), + 85: ("author_job", "By-line (Author precision)", IPTC_String), + 90: ("city", "City", IPTC_String), + 95: ("state", "Province / State", IPTC_String), + 100: ("country_code", "Country / Primary location code", IPTC_String), + 101: ("country_name", "Country / Primary location name", IPTC_String), + 103: ("trans_ref", "Original transmission reference", IPTC_String), + 105: ("headline", "Headline", IPTC_String), + 110: ("credit", "Credit", IPTC_String), + 115: ("source", "Source", IPTC_String), + 116: ("copyright", "Copyright notice", IPTC_String), + 120: ("caption", "Caption/Abstract", IPTC_String), + 122: ("writer", "Writer/editor", IPTC_String), + 231: ("history[]", "Document history (timestamp)", IPTC_String) +} +datasets = {1: dataset1, 2: dataset2} + +class IPTC_Size(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + value = 0 + for field in self: + value <<= 15 + value += (field.value & 0x7fff) + self.createValue = lambda: value + + def createFields(self): + while True: + field = UInt16(self, "value[]") + yield field + if field.value < 0x8000: + break + +class IPTC_Chunk(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + number = self["dataset_nb"].value + self.dataset_info = None + if number in datasets: + tag = self["tag"].value + if tag in datasets[number]: + self.dataset_info = datasets[number][tag] + self._name = self.dataset_info[0] + self._description = self.dataset_info[1] + size_chunk = self["size"] + self._size = 3*8 + size_chunk.size + size_chunk.value*8 + + def createFields(self): + yield textHandler(UInt8(self, "signature", "IPTC signature (0x1c)"), hexadecimal) + if self["signature"].value != 0x1C: + raise ParserError("Wrong IPTC signature") + yield textHandler(UInt8(self, "dataset_nb", "Dataset number"), hexadecimal) + yield UInt8(self, "tag", "Tag") + yield IPTC_Size(self, "size", "Content size") + + size = self["size"].value + if 0 < size: + if self.dataset_info: + cls = self.dataset_info[2] + else: + cls = None + if cls: + yield cls(self, "content") + else: + yield RawBytes(self, "content", size) + +class IPTC(FieldSet): + def createFields(self): + while 5 <= (self._size - self.current_size)/8: + yield IPTC_Chunk(self, "chunk[]") + size = (self._size - self.current_size) / 8 + if 0 < size: + yield NullBytes(self, "padding", size) + diff --git a/libs/hachoir_parser/image/jpeg.py b/libs/hachoir_parser/image/jpeg.py new file mode 100644 index 0000000..30944aa --- /dev/null +++ b/libs/hachoir_parser/image/jpeg.py @@ -0,0 +1,368 @@ +""" +JPEG picture parser. + +Information: + +- APP14 documents + http://partners.adobe.com/public/developer/en/ps/sdk/5116.DCT_Filter.pdf + http://java.sun.com/j2se/1.5.0/docs/api/javax/imageio/metadata/doc-files/jpeg_metadata.html#color +- APP12: + http://search.cpan.org/~exiftool/Image-ExifTool/lib/Image/ExifTool/TagNames.pod + +Author: Victor Stinner +""" + +from hachoir_core.error import HachoirError +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, Enum, + Bit, Bits, NullBits, NullBytes, + String, RawBytes) +from hachoir_parser.image.common import PaletteRGB +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.image.exif import Exif +from hachoir_parser.image.photoshop_metadata import PhotoshopMetadata + +MAX_FILESIZE = 100 * 1024 * 1024 + +# The four tables (hash/sum for color/grayscale JPEG) comes +# from ImageMagick project +QUALITY_HASH_COLOR = ( + 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, + 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, + 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, + 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, + 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, + 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, + 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, + 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, + 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, + 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, + 0) + +QUALITY_SUM_COLOR = ( + 32640,32635,32266,31495,30665,29804,29146,28599,28104,27670, + 27225,26725,26210,25716,25240,24789,24373,23946,23572,22846, + 21801,20842,19949,19121,18386,17651,16998,16349,15800,15247, + 14783,14321,13859,13535,13081,12702,12423,12056,11779,11513, + 11135,10955,10676,10392,10208, 9928, 9747, 9564, 9369, 9193, + 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, + 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, + 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, + 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, + 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, + 0) + +QUALITY_HASH_GRAY = ( + 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, + 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, + 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, + 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, + 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, + 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, + 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, + 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, + 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, + 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, + 0) + +QUALITY_SUM_GRAY = ( + 16320,16315,15946,15277,14655,14073,13623,13230,12859,12560, + 12240,11861,11456,11081,10714,10360,10027, 9679, 9368, 9056, + 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, + 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, + 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, + 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, + 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, + 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, + 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, + 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, + 0) + +JPEG_NATURAL_ORDER = ( + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63) + +class JpegChunkApp0(FieldSet): + UNIT_NAME = { + 0: "pixels", + 1: "dots per inch", + 2: "dots per cm", + } + + def createFields(self): + yield String(self, "jfif", 5, "JFIF string", charset="ASCII") + if self["jfif"].value != "JFIF\0": + raise ParserError( + "Stream doesn't look like JPEG chunk (wrong JFIF signature)") + yield UInt8(self, "ver_maj", "Major version") + yield UInt8(self, "ver_min", "Minor version") + yield Enum(UInt8(self, "units", "Units"), self.UNIT_NAME) + if self["units"].value == 0: + yield UInt16(self, "aspect_x", "Aspect ratio (X)") + yield UInt16(self, "aspect_y", "Aspect ratio (Y)") + else: + yield UInt16(self, "x_density", "X density") + yield UInt16(self, "y_density", "Y density") + yield UInt8(self, "thumb_w", "Thumbnail width") + yield UInt8(self, "thumb_h", "Thumbnail height") + thumb_size = self["thumb_w"].value * self["thumb_h"].value + if thumb_size != 0: + yield PaletteRGB(self, "thumb_palette", 256) + yield RawBytes(self, "thumb_data", thumb_size, "Thumbnail data") + +class Ducky(FieldSet): + BLOCK_TYPE = { + 0: "end", + 1: "Quality", + 2: "Comment", + 3: "Copyright", + } + def createFields(self): + yield Enum(UInt16(self, "type"), self.BLOCK_TYPE) + if self["type"].value == 0: + return + yield UInt16(self, "size") + size = self["size"].value + if size: + yield RawBytes(self, "data", size) + +class APP12(FieldSet): + """ + The JPEG APP12 "Picture Info" segment was used by some older cameras, and + contains ASCII-based meta information. + """ + def createFields(self): + yield String(self, "ducky", 5, '"Ducky" string', charset="ASCII") + while not self.eof: + yield Ducky(self, "item[]") + +class StartOfFrame(FieldSet): + def createFields(self): + yield UInt8(self, "precision") + + yield UInt16(self, "height") + yield UInt16(self, "width") + yield UInt8(self, "nr_components") + + for index in range(self["nr_components"].value): + yield UInt8(self, "component_id[]") + yield UInt8(self, "high[]") + yield UInt8(self, "low[]") + +class Comment(FieldSet): + def createFields(self): + yield String(self, "comment", self.size//8, strip="\0") + +class AdobeChunk(FieldSet): + COLORSPACE_TRANSFORMATION = { + 1: "YCbCr (converted from RGB)", + 2: "YCCK (converted from CMYK)", + } + def createFields(self): + if self.stream.readBytes(self.absolute_address, 5) != "Adobe": + yield RawBytes(self, "raw", self.size//8, "Raw data") + return + yield String(self, "adobe", 5, "\"Adobe\" string", charset="ASCII") + yield UInt16(self, "version", "DCT encoder version") + yield Enum(Bit(self, "flag00"), + {False: "Chop down or subsampling", True: "Blend"}) + yield NullBits(self, "flags0_reserved", 15) + yield NullBytes(self, "flags1", 2) + yield Enum(UInt8(self, "color_transform", "Colorspace transformation code"), self.COLORSPACE_TRANSFORMATION) + +class StartOfScan(FieldSet): + def createFields(self): + yield UInt8(self, "nr_components") + + for index in range(self["nr_components"].value): + comp_id = UInt8(self, "component_id[]") + yield comp_id + if not(1 <= comp_id.value <= self["nr_components"].value): + raise ParserError("JPEG error: Invalid component-id") + yield UInt8(self, "value[]") + yield RawBytes(self, "raw", 3) # TODO: What's this??? + +class RestartInterval(FieldSet): + def createFields(self): + yield UInt16(self, "interval", "Restart interval") + +class QuantizationTable(FieldSet): + def createFields(self): + # Code based on function get_dqt() (jdmarker.c from libjpeg62) + yield Bits(self, "is_16bit", 4) + yield Bits(self, "index", 4) + if self["index"].value >= 4: + raise ParserError("Invalid quantification index (%s)" % self["index"].value) + if self["is_16bit"].value: + coeff_type = UInt16 + else: + coeff_type = UInt8 + for index in xrange(64): + natural = JPEG_NATURAL_ORDER[index] + yield coeff_type(self, "coeff[%u]" % natural) + + def createDescription(self): + return "Quantification table #%u" % self["index"].value + +class DefineQuantizationTable(FieldSet): + def createFields(self): + while self.current_size < self.size: + yield QuantizationTable(self, "qt[]") + +class JpegChunk(FieldSet): + TAG_SOI = 0xD8 + TAG_EOI = 0xD9 + TAG_SOS = 0xDA + TAG_DQT = 0xDB + TAG_DRI = 0xDD + TAG_INFO = { + 0xC4: ("huffman[]", "Define Huffman Table (DHT)", None), + 0xD8: ("start_image", "Start of image (SOI)", None), + 0xD9: ("end_image", "End of image (EOI)", None), + 0xDA: ("start_scan", "Start Of Scan (SOS)", StartOfScan), + 0xDB: ("quantization[]", "Define Quantization Table (DQT)", DefineQuantizationTable), + 0xDC: ("nb_line", "Define number of Lines (DNL)", None), + 0xDD: ("restart_interval", "Define Restart Interval (DRI)", RestartInterval), + 0xE0: ("app0", "APP0", JpegChunkApp0), + 0xE1: ("exif", "Exif metadata", Exif), + 0xE2: ("icc", "ICC profile", None), + 0xEC: ("app12", "APP12", APP12), + 0xED: ("photoshop", "Photoshop", PhotoshopMetadata), + 0xEE: ("adobe", "Image encoding information for DCT filters (Adobe)", AdobeChunk), + 0xFE: ("comment[]", "Comment", Comment), + } + START_OF_FRAME = { + 0xC0: u"Baseline", + 0xC1: u"Extended sequential", + 0xC2: u"Progressive", + 0xC3: u"Lossless", + 0xC5: u"Differential sequential", + 0xC6: u"Differential progressive", + 0xC7: u"Differential lossless", + 0xC9: u"Extended sequential, arithmetic coding", + 0xCA: u"Progressive, arithmetic coding", + 0xCB: u"Lossless, arithmetic coding", + 0xCD: u"Differential sequential, arithmetic coding", + 0xCE: u"Differential progressive, arithmetic coding", + 0xCF: u"Differential lossless, arithmetic coding", + } + for key, text in START_OF_FRAME.iteritems(): + TAG_INFO[key] = ("start_frame", "Start of frame (%s)" % text.lower(), StartOfFrame) + + def __init__(self, parent, name, description=None): + FieldSet.__init__(self, parent, name, description) + tag = self["type"].value + if tag == 0xE1: + # Hack for Adobe extension: XAP metadata (as XML) + bytes = self.stream.readBytes(self.absolute_address + 32, 6) + if bytes == "Exif\0\0": + self._name = "exif" + self._description = "EXIF" + self._parser = Exif + else: + self._parser = None + elif tag in self.TAG_INFO: + self._name, self._description, self._parser = self.TAG_INFO[tag] + else: + self._parser = None + + def createFields(self): + yield textHandler(UInt8(self, "header", "Header"), hexadecimal) + if self["header"].value != 0xFF: + raise ParserError("JPEG: Invalid chunk header!") + yield textHandler(UInt8(self, "type", "Type"), hexadecimal) + tag = self["type"].value + if tag in (self.TAG_SOI, self.TAG_EOI): + return + yield UInt16(self, "size", "Size") + size = (self["size"].value - 2) + if 0 < size: + if self._parser: + yield self._parser(self, "content", "Chunk content", size=size*8) + else: + yield RawBytes(self, "data", size, "Data") + + def createDescription(self): + return "Chunk: %s" % self["type"].display + +class JpegFile(Parser): + endian = BIG_ENDIAN + PARSER_TAGS = { + "id": "jpeg", + "category": "image", + "file_ext": ("jpg", "jpeg"), + "mime": (u"image/jpeg",), + "magic": ( + ("\xFF\xD8\xFF\xE0", 0), # (Start Of Image, APP0) + ("\xFF\xD8\xFF\xE1", 0), # (Start Of Image, EXIF) + ("\xFF\xD8\xFF\xEE", 0), # (Start Of Image, Adobe) + ), + "min_size": 22*8, + "description": "JPEG picture", + "subfile": "skip", + } + + def validate(self): + if self.stream.readBytes(0, 2) != "\xFF\xD8": + return "Invalid file signature" + try: + for index, field in enumerate(self): + chunk_type = field["type"].value + if chunk_type not in JpegChunk.TAG_INFO: + return "Unknown chunk type: 0x%02X (chunk #%s)" % (chunk_type, index) + if index == 2: + # Only check 3 fields + break + except HachoirError: + return "Unable to parse at least three chunks" + return True + + def createFields(self): + while not self.eof: + chunk = JpegChunk(self, "chunk[]") + yield chunk + if chunk["type"].value == JpegChunk.TAG_SOS: + # TODO: Read JPEG image data... + break + + # TODO: is it possible to handle piped input? + if self._size is None: + raise NotImplementedError + + has_end = False + size = (self._size - self.current_size) // 8 + if size: + if 2 < size \ + and self.stream.readBytes(self._size - 16, 2) == "\xff\xd9": + has_end = True + size -= 2 + yield RawBytes(self, "data", size, "JPEG data") + if has_end: + yield JpegChunk(self, "chunk[]") + + def createDescription(self): + desc = "JPEG picture" + if "sof/content" in self: + header = self["sof/content"] + desc += ": %ux%u pixels" % (header["width"].value, header["height"].value) + return desc + + def createContentSize(self): + if "end" in self: + return self["end"].absolute_address + self["end"].size + if "data" not in self: + return None + start = self["data"].absolute_address + end = self.stream.searchBytes("\xff\xd9", start, MAX_FILESIZE*8) + if end is not None: + return end + 16 + return None + diff --git a/libs/hachoir_parser/image/pcx.py b/libs/hachoir_parser/image/pcx.py new file mode 100644 index 0000000..cb2a63b --- /dev/null +++ b/libs/hachoir_parser/image/pcx.py @@ -0,0 +1,73 @@ +""" +PCX picture filter. +""" + +from hachoir_parser import Parser +from hachoir_core.field import ( + UInt8, UInt16, + PaddingBytes, RawBytes, + Enum) +from hachoir_parser.image.common import PaletteRGB +from hachoir_core.endian import LITTLE_ENDIAN + +class PcxFile(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "pcx", + "category": "image", + "file_ext": ("pcx",), + "mime": (u"image/x-pcx",), + "min_size": 128*8, + "description": "PC Paintbrush (PCX) picture" + } + compression_name = { 1: "Run-length encoding (RLE)" } + version_name = { + 0: u"Version 2.5 of PC Paintbrush", + 2: u"Version 2.8 with palette information", + 3: u"Version 2.8 without palette information", + 4: u"PC Paintbrush for Windows", + 5: u"Version 3.0 (or greater) of PC Paintbrush" + } + + def validate(self): + if self["id"].value != 10: + return "Wrong signature" + if self["version"].value not in self.version_name: + return "Unknown format version" + if self["bpp"].value not in (1, 2, 4, 8, 24, 32): + return "Unknown bits/pixel" + if self["reserved[0]"].value != "\0": + return "Invalid reserved value" + return True + + def createFields(self): + yield UInt8(self, "id", "PCX identifier (10)") + yield Enum(UInt8(self, "version", "PCX version"), self.version_name) + yield Enum(UInt8(self, "compression", "Compression method"), self.compression_name) + yield UInt8(self, "bpp", "Bits / pixel") + yield UInt16(self, "xmin", "Minimum X") + yield UInt16(self, "ymin", "Minimum Y") + yield UInt16(self, "xmax", "Width minus one") # value + 1 + yield UInt16(self, "ymax", "Height minus one") # value + 1 + yield UInt16(self, "horiz_dpi", "Horizontal DPI") + yield UInt16(self, "vert_dpi", "Vertical DPI") + yield PaletteRGB(self, "palette_4bits", 16, "Palette (4 bits)") + yield PaddingBytes(self, "reserved[]", 1) + yield UInt8(self, "nb_color_plan", "Number of color plans") + yield UInt16(self, "bytes_per_line", "Bytes per line") + yield UInt16(self, "color_mode", "Color mode") + yield PaddingBytes(self, "reserved[]", 58) + + if self._size is None: # TODO: is it possible to handle piped input? + raise NotImplementedError + + nb_colors = 256 + size = (self._size - self.current_size)/8 + has_palette = self["bpp"].value == 8 + if has_palette: + size -= nb_colors*3 + yield RawBytes(self, "image_data", size, "Image data") + + if has_palette: + yield PaletteRGB(self, "palette_8bits", nb_colors, "Palette (8 bit)") + diff --git a/libs/hachoir_parser/image/photoshop_metadata.py b/libs/hachoir_parser/image/photoshop_metadata.py new file mode 100644 index 0000000..15fed72 --- /dev/null +++ b/libs/hachoir_parser/image/photoshop_metadata.py @@ -0,0 +1,171 @@ +""" Photoshop metadata parser. + +References: +- http://www.scribd.com/doc/32900475/Photoshop-File-Formats +""" + +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, Float32, Enum, + SubFile, String, CString, PascalString8, + NullBytes, RawBytes) +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import alignValue, createDict +from hachoir_parser.image.iptc import IPTC +from hachoir_parser.common.win32 import PascalStringWin32 + +BOOL = {0: False, 1: True} + +class Version(FieldSet): + def createFields(self): + yield UInt32(self, "version") + yield UInt8(self, "has_realm") + yield PascalStringWin32(self, "writer_name", charset="UTF-16-BE") + yield PascalStringWin32(self, "reader_name", charset="UTF-16-BE") + yield UInt32(self, "file_version") + size = (self.size - self.current_size) // 8 + if size: + yield NullBytes(self, "padding", size) + +class FixedFloat32(FieldSet): + def createFields(self): + yield UInt16(self, "int_part") + yield UInt16(self, "float_part") + + def createValue(self): + return self["int_part"].value + float(self["float_part"].value) / (1<<16) + +class ResolutionInfo(FieldSet): + def createFields(self): + yield FixedFloat32(self, "horiz_res") + yield Enum(UInt16(self, "horiz_res_unit"), {1:'px/in', 2:'px/cm'}) + yield Enum(UInt16(self, "width_unit"), {1:'inches', 2:'cm', 3:'points', 4:'picas', 5:'columns'}) + yield FixedFloat32(self, "vert_res") + yield Enum(UInt16(self, "vert_res_unit"), {1:'px/in', 2:'px/cm'}) + yield Enum(UInt16(self, "height_unit"), {1:'inches', 2:'cm', 3:'points', 4:'picas', 5:'columns'}) + +class PrintScale(FieldSet): + def createFields(self): + yield Enum(UInt16(self, "style"), {0:'centered', 1:'size to fit', 2:'user defined'}) + yield Float32(self, "x_location") + yield Float32(self, "y_location") + yield Float32(self, "scale") + +class PrintFlags(FieldSet): + def createFields(self): + yield Enum(UInt8(self, "labels"), BOOL) + yield Enum(UInt8(self, "crop_marks"), BOOL) + yield Enum(UInt8(self, "color_bars"), BOOL) + yield Enum(UInt8(self, "reg_marks"), BOOL) + yield Enum(UInt8(self, "negative"), BOOL) + yield Enum(UInt8(self, "flip"), BOOL) + yield Enum(UInt8(self, "interpolate"), BOOL) + yield Enum(UInt8(self, "caption"), BOOL) + yield Enum(UInt8(self, "print_flags"), BOOL) + yield Enum(UInt8(self, "unknown"), BOOL) + + def createValue(self): + return [field.name for field in self if field.value] + + def createDisplay(self): + return ', '.join(self.value) + +class PrintFlags2(FieldSet): + def createFields(self): + yield UInt16(self, "version") + yield UInt8(self, "center_crop_marks") + yield UInt8(self, "reserved") + yield UInt32(self, "bleed_width") + yield UInt16(self, "bleed_width_scale") + +class GridGuides(FieldSet): + def createFields(self): + yield UInt32(self, "version") + yield UInt32(self, "horiz_cycle", "Horizontal grid spacing, in quarter inches") + yield UInt32(self, "vert_cycle", "Vertical grid spacing, in quarter inches") + yield UInt32(self, "guide_count", "Number of guide resource blocks (can be 0)") + +class Thumbnail(FieldSet): + def createFields(self): + yield Enum(UInt32(self, "format"), {0:'Raw RGB', 1:'JPEG RGB'}) + yield UInt32(self, "width", "Width of thumbnail in pixels") + yield UInt32(self, "height", "Height of thumbnail in pixels") + yield UInt32(self, "widthbytes", "Padded row bytes = (width * bits per pixel + 31) / 32 * 4") + yield UInt32(self, "uncompressed_size", "Total size = widthbytes * height * planes") + yield UInt32(self, "compressed_size", "Size after compression. Used for consistency check") + yield UInt16(self, "bits_per_pixel") + yield UInt16(self, "num_planes") + yield SubFile(self, "thumbnail", self['compressed_size'].value, "Thumbnail (JPEG file)", mime_type="image/jpeg") + +class Photoshop8BIM(FieldSet): + TAG_INFO = { + 0x03ed: ("res_info", ResolutionInfo, "Resolution information"), + 0x03f3: ("print_flag", PrintFlags, "Print flags: labels, crop marks, colour bars, etc."), + 0x03f5: ("col_half_info", None, "Colour half-toning information"), + 0x03f8: ("color_trans_func", None, "Colour transfer function"), + 0x0404: ("iptc", IPTC, "IPTC/NAA"), + 0x0406: ("jpeg_qual", None, "JPEG quality"), + 0x0408: ("grid_guide", GridGuides, "Grid guides informations"), + 0x0409: ("thumb_res", Thumbnail, "Thumbnail resource (PS 4.0)"), + 0x0410: ("watermark", UInt8, "Watermark"), + 0x040a: ("copyright_flag", UInt8, "Copyright flag"), + 0x040b: ("url", None, "URL"), + 0x040c: ("thumb_res2", Thumbnail, "Thumbnail resource (PS 5.0)"), + 0x040d: ("glob_angle", UInt32, "Global lighting angle for effects"), + 0x0411: ("icc_tagged", None, "ICC untagged (1 means intentionally untagged)"), + 0x0414: ("base_layer_id", UInt32, "Base value for new layers ID's"), + 0x0416: ("indexed_colors", UInt16, "Number of colors in table that are actually defined"), + 0x0417: ("transparency_index", UInt16, "Index of transparent color"), + 0x0419: ("glob_altitude", UInt32, "Global altitude"), + 0x041a: ("slices", None, "Slices"), + 0x041e: ("url_list", None, "Unicode URLs"), + 0x0421: ("version", Version, "Version information"), + 0x0425: ("caption_digest", None, "16-byte MD5 caption digest"), + 0x0426: ("printscale", PrintScale, "Printer scaling"), + 0x2710: ("print_flag2", PrintFlags2, "Print flags (2)"), + } + TAG_NAME = createDict(TAG_INFO, 0) + CONTENT_HANDLER = createDict(TAG_INFO, 1) + TAG_DESC = createDict(TAG_INFO, 2) + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + try: + self._name, self.handler, self._description = self.TAG_INFO[self["tag"].value] + except KeyError: + self.handler = None + size = self["size"] + self._size = size.address + size.size + alignValue(size.value, 2) * 8 + + def createFields(self): + yield String(self, "signature", 4, "8BIM signature", charset="ASCII") + if self["signature"].value != "8BIM": + raise ParserError("Stream doesn't look like 8BIM item (wrong signature)!") + yield textHandler(UInt16(self, "tag"), hexadecimal) + if self.stream.readBytes(self.absolute_address + self.current_size, 4) != "\0\0\0\0": + yield PascalString8(self, "name") + size = 2 + (self["name"].size // 8) % 2 + yield NullBytes(self, "name_padding", size) + else: + yield String(self, "name", 4, strip="\0") + yield UInt16(self, "size") + size = alignValue(self["size"].value, 2) + if not size: + return + if self.handler: + if issubclass(self.handler, FieldSet): + yield self.handler(self, "content", size=size*8) + else: + yield self.handler(self, "content") + else: + yield RawBytes(self, "content", size) + +class PhotoshopMetadata(FieldSet): + def createFields(self): + yield CString(self, "signature", "Photoshop version") + if self["signature"].value == "Photoshop 3.0": + while not self.eof: + yield Photoshop8BIM(self, "item[]") + else: + size = (self._size - self.current_size) / 8 + yield RawBytes(self, "rawdata", size) + diff --git a/libs/hachoir_parser/image/png.py b/libs/hachoir_parser/image/png.py new file mode 100644 index 0000000..acbfc85 --- /dev/null +++ b/libs/hachoir_parser/image/png.py @@ -0,0 +1,268 @@ +""" +PNG picture file parser. + +Documents: +- RFC 2083 + http://www.faqs.org/rfcs/rfc2083.html + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Fragment, + ParserError, MissingField, + UInt8, UInt16, UInt32, + String, CString, + Bytes, RawBytes, + Bit, NullBits, + Enum, CompressedField) +from hachoir_parser.image.common import RGB +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.endian import NETWORK_ENDIAN +from hachoir_core.tools import humanFilesize +from datetime import datetime + +MAX_FILESIZE = 500 * 1024 * 1024 # 500 MB + +try: + from zlib import decompressobj + + class Gunzip: + def __init__(self, stream): + self.gzip = decompressobj() + + def __call__(self, size, data=None): + if data is None: + data = self.gzip.unconsumed_tail + return self.gzip.decompress(data, size) + + has_deflate = True +except ImportError: + has_deflate = False + +UNIT_NAME = {1: "Meter"} +COMPRESSION_NAME = { + 0: u"deflate" # with 32K sliding window +} +MAX_CHUNK_SIZE = 5 * 1024 * 1024 # Maximum chunk size (5 MB) + +def headerParse(parent): + yield UInt32(parent, "width", "Width (pixels)") + yield UInt32(parent, "height", "Height (pixels)") + yield UInt8(parent, "bit_depth", "Bit depth") + yield NullBits(parent, "reserved", 5) + yield Bit(parent, "has_alpha", "Has alpha channel?") + yield Bit(parent, "color", "Color used?") + yield Bit(parent, "has_palette", "Has a color palette?") + yield Enum(UInt8(parent, "compression", "Compression method"), COMPRESSION_NAME) + yield UInt8(parent, "filter", "Filter method") + yield UInt8(parent, "interlace", "Interlace method") + +def headerDescription(parent): + return "Header: %ux%u pixels and %u bits/pixel" % \ + (parent["width"].value, parent["height"].value, getBitsPerPixel(parent)) + +def paletteParse(parent): + size = parent["size"].value + if (size % 3) != 0: + raise ParserError("Palette have invalid size (%s), should be 3*n!" % size) + nb_colors = size // 3 + for index in xrange(nb_colors): + yield RGB(parent, "color[]") + +def paletteDescription(parent): + return "Palette: %u colors" % (parent["size"].value // 3) + +def gammaParse(parent): + yield UInt32(parent, "gamma", "Gamma (x100,000)") +def gammaValue(parent): + return float(parent["gamma"].value) / 100000 +def gammaDescription(parent): + return "Gamma: %.3f" % parent.value + +def textParse(parent): + yield CString(parent, "keyword", "Keyword", charset="ISO-8859-1") + length = parent["size"].value - parent["keyword"].size/8 + if length: + yield String(parent, "text", length, "Text", charset="ISO-8859-1") + +def textDescription(parent): + if "text" in parent: + return u'Text: %s' % parent["text"].display + else: + return u'Text' + +def timestampParse(parent): + yield UInt16(parent, "year", "Year") + yield UInt8(parent, "month", "Month") + yield UInt8(parent, "day", "Day") + yield UInt8(parent, "hour", "Hour") + yield UInt8(parent, "minute", "Minute") + yield UInt8(parent, "second", "Second") + +def timestampValue(parent): + value = datetime( + parent["year"].value, parent["month"].value, parent["day"].value, + parent["hour"].value, parent["minute"].value, parent["second"].value) + return value + +def physicalParse(parent): + yield UInt32(parent, "pixel_per_unit_x", "Pixel per unit, X axis") + yield UInt32(parent, "pixel_per_unit_y", "Pixel per unit, Y axis") + yield Enum(UInt8(parent, "unit", "Unit type"), UNIT_NAME) + +def physicalDescription(parent): + x = parent["pixel_per_unit_x"].value + y = parent["pixel_per_unit_y"].value + desc = "Physical: %ux%u pixels" % (x,y) + if parent["unit"].value == 1: + desc += " per meter" + return desc + +def parseBackgroundColor(parent): + yield UInt16(parent, "red") + yield UInt16(parent, "green") + yield UInt16(parent, "blue") + +def backgroundColorDesc(parent): + rgb = parent["red"].value, parent["green"].value, parent["blue"].value + name = RGB.color_name.get(rgb) + if not name: + name = "#%02X%02X%02X" % rgb + return "Background color: %s" % name + + +class ImageData(Fragment): + def __init__(self, parent, name="compressed_data"): + Fragment.__init__(self, parent, name, None, 8*parent["size"].value) + data = parent.name.split('[') + data, next = "../%s[%%u]" % data[0], int(data[1][:-1]) + 1 + first = parent.getField(data % 0) + if first is parent: + first = None + if has_deflate: + CompressedField(self, Gunzip) + else: + first = first[name] + try: + _next = parent[data % next] + next = lambda: _next[name] + except MissingField: + next = None + self.setLinks(first, next) + +def parseTransparency(parent): + for i in range(parent["size"].value): + yield UInt8(parent, "alpha_value[]", "Alpha value for palette entry %i"%i) + +def getBitsPerPixel(header): + nr_component = 1 + if header["has_alpha"].value: + nr_component += 1 + if header["color"].value and not header["has_palette"].value: + nr_component += 2 + return nr_component * header["bit_depth"].value + +class Chunk(FieldSet): + TAG_INFO = { + "tIME": ("time", timestampParse, "Timestamp", timestampValue), + "pHYs": ("physical", physicalParse, physicalDescription, None), + "IHDR": ("header", headerParse, headerDescription, None), + "PLTE": ("palette", paletteParse, paletteDescription, None), + "gAMA": ("gamma", gammaParse, gammaDescription, gammaValue), + "tEXt": ("text[]", textParse, textDescription, None), + "tRNS": ("transparency", parseTransparency, "Transparency Info", None), + + "bKGD": ("background", parseBackgroundColor, backgroundColorDesc, None), + "IDAT": ("data[]", lambda parent: (ImageData(parent),), "Image data", None), + "iTXt": ("utf8_text[]", None, "International text (encoded in UTF-8)", None), + "zTXt": ("comp_text[]", None, "Compressed text", None), + "IEND": ("end", None, "End", None) + } + + def createValueFunc(self): + return self.value_func(self) + + def __init__(self, parent, name, description=None): + FieldSet.__init__(self, parent, name, description) + self._size = (self["size"].value + 3*4) * 8 + if MAX_CHUNK_SIZE < (self._size//8): + raise ParserError("PNG: Chunk is too big (%s)" + % humanFilesize(self._size//8)) + tag = self["tag"].value + self.desc_func = None + self.value_func = None + if tag in self.TAG_INFO: + self._name, self.parse_func, desc, value_func = self.TAG_INFO[tag] + if value_func: + self.value_func = value_func + self.createValue = self.createValueFunc + if desc: + if isinstance(desc, str): + self._description = desc + else: + self.desc_func = desc + else: + self._description = "" + self.parse_func = None + + def createFields(self): + yield UInt32(self, "size", "Size") + yield String(self, "tag", 4, "Tag", charset="ASCII") + + size = self["size"].value + if size != 0: + if self.parse_func: + for field in self.parse_func(self): + yield field + else: + yield RawBytes(self, "content", size, "Data") + yield textHandler(UInt32(self, "crc32", "CRC32"), hexadecimal) + + def createDescription(self): + if self.desc_func: + return self.desc_func(self) + else: + return "Chunk: %s" % self["tag"].display + +class PngFile(Parser): + PARSER_TAGS = { + "id": "png", + "category": "image", + "file_ext": ("png",), + "mime": (u"image/png", u"image/x-png"), + "min_size": 8*8, # just the identifier + "magic": [('\x89PNG\r\n\x1A\n', 0)], + "description": "Portable Network Graphics (PNG) picture" + } + endian = NETWORK_ENDIAN + + def validate(self): + if self["id"].value != '\x89PNG\r\n\x1A\n': + return "Invalid signature" + if self[1].name != "header": + return "First chunk is not header" + return True + + def createFields(self): + yield Bytes(self, "id", 8, r"PNG identifier ('\x89PNG\r\n\x1A\n')") + while not self.eof: + yield Chunk(self, "chunk[]") + + def createDescription(self): + header = self["header"] + desc = "PNG picture: %ux%ux%u" % ( + header["width"].value, header["height"].value, getBitsPerPixel(header)) + if header["has_alpha"].value: + desc += " (alpha layer)" + return desc + + def createContentSize(self): + field = self["header"] + start = field.absolute_address + field.size + end = MAX_FILESIZE * 8 + pos = self.stream.searchBytes("\0\0\0\0IEND\xae\x42\x60\x82", start, end) + if pos is not None: + return pos + 12*8 + return None + diff --git a/libs/hachoir_parser/image/psd.py b/libs/hachoir_parser/image/psd.py new file mode 100644 index 0000000..6ea09fb --- /dev/null +++ b/libs/hachoir_parser/image/psd.py @@ -0,0 +1,85 @@ +""" +Photoshop parser (.psd file). + +Creation date: 8 january 2006 +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt16, UInt32, String, NullBytes, Enum, RawBytes) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_parser.image.photoshop_metadata import Photoshop8BIM + +class Config(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = (4 + self["size"].value) * 8 + + def createFields(self): + yield UInt32(self, "size") + while not self.eof: + yield Photoshop8BIM(self, "item[]") + +class PsdFile(Parser): + endian = BIG_ENDIAN + PARSER_TAGS = { + "id": "psd", + "category": "image", + "file_ext": ("psd",), + "mime": (u"image/psd", u"image/photoshop", u"image/x-photoshop"), + "min_size": 4*8, + "magic": (("8BPS\0\1",0),), + "description": "Photoshop (PSD) picture", + } + COLOR_MODE = { + 0: u"Bitmap", + 1: u"Grayscale", + 2: u"Indexed", + 3: u"RGB color", + 4: u"CMYK color", + 7: u"Multichannel", + 8: u"Duotone", + 9: u"Lab Color", + } + COMPRESSION_NAME = { + 0: "Raw data", + 1: "RLE", + } + + def validate(self): + if self.stream.readBytes(0, 4) != "8BPS": + return "Invalid signature" + return True + + def createFields(self): + yield String(self, "signature", 4, "PSD signature (8BPS)", charset="ASCII") + yield UInt16(self, "version") + yield NullBytes(self, "reserved[]", 6) + yield UInt16(self, "nb_channels") + yield UInt32(self, "width") + yield UInt32(self, "height") + yield UInt16(self, "depth") + yield Enum(UInt16(self, "color_mode"), self.COLOR_MODE) + + # Mode data + yield UInt32(self, "mode_data_size") + size = self["mode_data_size"].value + if size: + yield RawBytes(self, "mode_data", size) + + # Resources + yield Config(self, "config") + + # Reserved + yield UInt32(self, "reserved_data_size") + size = self["reserved_data_size"].value + if size: + yield RawBytes(self, "reserved_data", size) + + yield Enum(UInt16(self, "compression"), self.COMPRESSION_NAME) + + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "end", size) + diff --git a/libs/hachoir_parser/image/tga.py b/libs/hachoir_parser/image/tga.py new file mode 100644 index 0000000..716ab28 --- /dev/null +++ b/libs/hachoir_parser/image/tga.py @@ -0,0 +1,85 @@ +""" +Truevision Targa Graphic (TGA) picture parser. + +Author: Victor Stinner +Creation: 18 december 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.image.common import PaletteRGB + +class Line(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["/width"].value * self["/bpp"].value + + def createFields(self): + for x in xrange(self["/width"].value): + yield UInt8(self, "pixel[]") + +class Pixels(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["/width"].value * self["/height"].value * self["/bpp"].value + + def createFields(self): + if self["/options"].value == 0: + RANGE = xrange(self["/height"].value-1,-1,-1) + else: + RANGE = xrange(self["/height"].value) + for y in RANGE: + yield Line(self, "line[%u]" % y) + +class TargaFile(Parser): + PARSER_TAGS = { + "id": "targa", + "category": "image", + "file_ext": ("tga",), + "mime": (u"image/targa", u"image/tga", u"image/x-tga"), + "min_size": 18*8, + "description": u"Truevision Targa Graphic (TGA)" + } + CODEC_NAME = { + 1: u"8-bit uncompressed", + 2: u"24-bit uncompressed", + 9: u"8-bit RLE", + 10: u"24-bit RLE", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self["version"].value != 1: + return "Unknown version" + if self["codec"].value not in self.CODEC_NAME: + return "Unknown codec" + if self["x_min"].value != 0 or self["y_min"].value != 0: + return "(x_min, y_min) is not (0,0)" + if self["bpp"].value not in (8, 24): + return "Unknown bits/pixel value" + return True + + def createFields(self): + yield UInt8(self, "hdr_size", "Header size in bytes") + yield UInt8(self, "version", "Targa version (always one)") + yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME) + yield UInt16(self, "palette_ofs", "Palette absolute file offset") + yield UInt16(self, "nb_color", "Number of color") + yield UInt8(self, "color_map_size", "Color map entry size") + yield UInt16(self, "x_min") + yield UInt16(self, "y_min") + yield UInt16(self, "width") + yield UInt16(self, "height") + yield UInt8(self, "bpp", "Bits per pixel") + yield UInt8(self, "options", "Options (0: vertical mirror)") + if self["bpp"].value == 8: + yield PaletteRGB(self, "palette", 256) + if self["codec"].value == 1: + yield Pixels(self, "pixels") + else: + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "raw_pixels", size) + + diff --git a/libs/hachoir_parser/image/tiff.py b/libs/hachoir_parser/image/tiff.py new file mode 100644 index 0000000..a096212 --- /dev/null +++ b/libs/hachoir_parser/image/tiff.py @@ -0,0 +1,211 @@ +""" +TIFF image parser. + +Authors: Victor Stinner and Sebastien Ponce +Creation date: 30 september 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, SeekableFieldSet, ParserError, RootSeekableFieldSet, + UInt16, UInt32, Bytes, String) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_parser.image.exif import BasicIFDEntry +from hachoir_core.tools import createDict + +MAX_COUNT = 250 + +class IFDEntry(BasicIFDEntry): + static_size = 12*8 + + TAG_INFO = { + 254: ("new_subfile_type", "New subfile type"), + 255: ("subfile_type", "Subfile type"), + 256: ("img_width", "Image width in pixels"), + 257: ("img_height", "Image height in pixels"), + 258: ("bits_per_sample", "Bits per sample"), + 259: ("compression", "Compression method"), + 262: ("photo_interpret", "Photometric interpretation"), + 263: ("thres", "Thresholding"), + 264: ("cell_width", "Cellule width"), + 265: ("cell_height", "Cellule height"), + 266: ("fill_order", "Fill order"), + 269: ("doc_name", "Document name"), + 270: ("description", "Image description"), + 271: ("make", "Make"), + 272: ("model", "Model"), + 273: ("strip_ofs", "Strip offsets"), + 274: ("orientation", "Orientation"), + 277: ("sample_pixel", "Samples per pixel"), + 278: ("row_per_strip", "Rows per strip"), + 279: ("strip_byte", "Strip byte counts"), + 280: ("min_sample_value", "Min sample value"), + 281: ("max_sample_value", "Max sample value"), + 282: ("xres", "X resolution"), + 283: ("yres", "Y resolution"), + 284: ("planar_conf", "Planar configuration"), + 285: ("page_name", "Page name"), + 286: ("xpos", "X position"), + 287: ("ypos", "Y position"), + 288: ("free_ofs", "Free offsets"), + 289: ("free_byte", "Free byte counts"), + 290: ("gray_resp_unit", "Gray response unit"), + 291: ("gray_resp_curve", "Gray response curve"), + 292: ("group3_opt", "Group 3 options"), + 293: ("group4_opt", "Group 4 options"), + 296: ("res_unit", "Resolution unit"), + 297: ("page_nb", "Page number"), + 301: ("color_respt_curve", "Color response curves"), + 305: ("software", "Software"), + 306: ("date_time", "Date time"), + 315: ("artist", "Artist"), + 316: ("host_computer", "Host computer"), + 317: ("predicator", "Predicator"), + 318: ("white_pt", "White point"), + 319: ("prim_chomat", "Primary chromaticities"), + 320: ("color_map", "Color map"), + 321: ("half_tone_hints", "Halftone Hints"), + 322: ("tile_width", "TileWidth"), + 323: ("tile_length", "TileLength"), + 324: ("tile_offsets", "TileOffsets"), + 325: ("tile_byte_counts", "TileByteCounts"), + 332: ("ink_set", "InkSet"), + 333: ("ink_names", "InkNames"), + 334: ("number_of_inks", "NumberOfInks"), + 336: ("dot_range", "DotRange"), + 337: ("target_printer", "TargetPrinter"), + 338: ("extra_samples", "ExtraSamples"), + 339: ("sample_format", "SampleFormat"), + 340: ("smin_sample_value", "SMinSampleValue"), + 341: ("smax_sample_value", "SMaxSampleValue"), + 342: ("transfer_range", "TransferRange"), + 512: ("jpeg_proc", "JPEGProc"), + 513: ("jpeg_interchange_format", "JPEGInterchangeFormat"), + 514: ("jpeg_interchange_format_length", "JPEGInterchangeFormatLength"), + 515: ("jpeg_restart_interval", "JPEGRestartInterval"), + 517: ("jpeg_lossless_predictors", "JPEGLosslessPredictors"), + 518: ("jpeg_point_transforms", "JPEGPointTransforms"), + 519: ("jpeg_qtables", "JPEGQTables"), + 520: ("jpeg_dctables", "JPEGDCTables"), + 521: ("jpeg_actables", "JPEGACTables"), + 529: ("ycbcr_coefficients", "YCbCrCoefficients"), + 530: ("ycbcr_subsampling", "YCbCrSubSampling"), + 531: ("ycbcr_positioning", "YCbCrPositioning"), + 532: ("reference_blackwhite", "ReferenceBlackWhite"), + 33432: ("copyright", "Copyright"), + 0x8769: ("ifd_pointer", "Pointer to next IFD entry"), + } + TAG_NAME = createDict(TAG_INFO, 0) + + def __init__(self, *args): + FieldSet.__init__(self, *args) + tag = self["tag"].value + if tag in self.TAG_INFO: + self._name, self._description = self.TAG_INFO[tag] + else: + self._parser = None + +class IFD(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = 16 + self["count"].value * IFDEntry.static_size + self._has_offset = False + + def createFields(self): + yield UInt16(self, "count") + if MAX_COUNT < self["count"].value: + raise ParserError("TIFF IFD: Invalid count (%s)" + % self["count"].value) + for index in xrange(self["count"].value): + yield IFDEntry(self, "entry[]") + +class ImageFile(SeekableFieldSet): + def __init__(self, parent, name, description, ifd): + SeekableFieldSet.__init__(self, parent, name, description, None) + self._has_offset = False + self._ifd = ifd + + def createFields(self): + datas = {} + for entry in self._ifd: + if type(entry) != IFDEntry: + continue + for c in entry: + if c.name != "offset": + continue + self.seekByte(c.value, False) + desc = "data of ifd entry " + entry.name, + entryType = BasicIFDEntry.ENTRY_FORMAT[entry["type"].value] + count = entry["count"].value + if entryType == String: + yield String(self, entry.name, count, desc, "\0", "ISO-8859-1") + else: + d = Data(self, entry.name, desc, entryType, count) + datas[d.name] = d + yield d + break + # image data + if "strip_ofs" in datas and "strip_byte" in datas: + for i in xrange(datas["strip_byte"]._count): + self.seekByte(datas["strip_ofs"]["value["+str(i)+"]"].value, False) + yield Bytes(self, "strip[]", datas["strip_byte"]["value["+str(i)+"]"].value) + +class Data(FieldSet): + + def __init__(self, parent, name, desc, type, count): + size = type.static_size * count + FieldSet.__init__(self, parent, name, desc, size) + self._count = count + self._type = type + + def createFields(self): + for i in xrange(self._count): + yield self._type(self, "value[]") + +class TiffFile(RootSeekableFieldSet, Parser): + PARSER_TAGS = { + "id": "tiff", + "category": "image", + "file_ext": ("tif", "tiff"), + "mime": (u"image/tiff",), + "min_size": 8*8, +# TODO: Re-enable magic + "magic": (("II\x2A\0", 0), ("MM\0\x2A", 0)), + "description": "TIFF picture" + } + + # Correct endian is set in constructor + endian = LITTLE_ENDIAN + + def __init__(self, stream, **args): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + if self.stream.readBytes(0, 2) == "MM": + self.endian = BIG_ENDIAN + Parser.__init__(self, stream, **args) + + def validate(self): + endian = self.stream.readBytes(0, 2) + if endian not in ("MM", "II"): + return "Invalid endian (%r)" % endian + if self["version"].value != 42: + return "Unknown TIFF version" + return True + + def createFields(self): + yield String(self, "endian", 2, 'Endian ("II" or "MM")', charset="ASCII") + yield UInt16(self, "version", "TIFF version number") + offset = UInt32(self, "img_dir_ofs[]", "Next image directory offset (in bytes from the beginning)") + yield offset + ifds = [] + while True: + if offset.value == 0: + break + + self.seekByte(offset.value, relative=False) + ifd = IFD(self, "ifd[]", "Image File Directory", None) + ifds.append(ifd) + yield ifd + offset = UInt32(self, "img_dir_ofs[]", "Next image directory offset (in bytes from the beginning)") + yield offset + for ifd in ifds: + image = ImageFile(self, "image[]", "Image File", ifd) + yield image diff --git a/libs/hachoir_parser/image/wmf.py b/libs/hachoir_parser/image/wmf.py new file mode 100644 index 0000000..86f9840 --- /dev/null +++ b/libs/hachoir_parser/image/wmf.py @@ -0,0 +1,611 @@ +""" +Hachoir parser of Microsoft Windows Metafile (WMF) file format. + +Documentation: + - Microsoft Windows Metafile; also known as: WMF, + Enhanced Metafile, EMF, APM + http://wvware.sourceforge.net/caolan/ora-wmf.html + - libwmf source code: + - include/libwmf/defs.h: enums + - src/player/meta.h: arguments parsers + - libemf source code + +Author: Victor Stinner +Creation date: 26 december 2006 +""" + +MAX_FILESIZE = 50 * 1024 * 1024 + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, StaticFieldSet, Enum, + MissingField, ParserError, + UInt32, Int32, UInt16, Int16, UInt8, NullBytes, RawBytes, String) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import createDict +from hachoir_parser.image.common import RGBA + +POLYFILL_MODE = {1: "Alternate", 2: "Winding"} + +BRUSH_STYLE = { + 0: u"Solid", + 1: u"Null", + 2: u"Hollow", + 3: u"Pattern", + 4: u"Indexed", + 5: u"DIB pattern", + 6: u"DIB pattern point", + 7: u"Pattern 8x8", + 8: u"DIB pattern 8x8", +} + +HATCH_STYLE = { + 0: u"Horizontal", # ----- + 1: u"Vertical", # ||||| + 2: u"FDIAGONAL", # \\\\\ + 3: u"BDIAGONAL", # ///// + 4: u"Cross", # +++++ + 5: u"Diagonal cross", # xxxxx +} + +PEN_STYLE = { + 0: u"Solid", + 1: u"Dash", # ------- + 2: u"Dot", # ....... + 3: u"Dash dot", # _._._._ + 4: u"Dash dot dot", # _.._.._ + 5: u"Null", + 6: u"Inside frame", + 7: u"User style", + 8: u"Alternate", +} + +# Binary raster operations +ROP2_DESC = { + 1: u"Black (0)", + 2: u"Not merge pen (DPon)", + 3: u"Mask not pen (DPna)", + 4: u"Not copy pen (PN)", + 5: u"Mask pen not (PDna)", + 6: u"Not (Dn)", + 7: u"Xor pen (DPx)", + 8: u"Not mask pen (DPan)", + 9: u"Mask pen (DPa)", + 10: u"Not xor pen (DPxn)", + 11: u"No operation (D)", + 12: u"Merge not pen (DPno)", + 13: u"Copy pen (P)", + 14: u"Merge pen not (PDno)", + 15: u"Merge pen (DPo)", + 16: u"White (1)", +} + +def parseXY(parser): + yield Int16(parser, "x") + yield Int16(parser, "y") + +def parseCreateBrushIndirect(parser): + yield Enum(UInt16(parser, "brush_style"), BRUSH_STYLE) + yield RGBA(parser, "color") + yield Enum(UInt16(parser, "brush_hatch"), HATCH_STYLE) + +def parsePenIndirect(parser): + yield Enum(UInt16(parser, "pen_style"), PEN_STYLE) + yield UInt16(parser, "pen_width") + yield UInt16(parser, "pen_height") + yield RGBA(parser, "color") + +def parsePolyFillMode(parser): + yield Enum(UInt16(parser, "operation"), POLYFILL_MODE) + +def parseROP2(parser): + yield Enum(UInt16(parser, "operation"), ROP2_DESC) + +def parseObjectID(parser): + yield UInt16(parser, "object_id") + +class Point(FieldSet): + static_size = 32 + def createFields(self): + yield Int16(self, "x") + yield Int16(self, "y") + def createDescription(self): + return "Point (%s, %s)" % (self["x"].value, self["y"].value) + +def parsePolygon(parser): + yield UInt16(parser, "count") + for index in xrange(parser["count"].value): + yield Point(parser, "point[]") + +META = { + 0x0000: ("EOF", u"End of file", None), + 0x001E: ("SAVEDC", u"Save device context", None), + 0x0035: ("REALIZEPALETTE", u"Realize palette", None), + 0x0037: ("SETPALENTRIES", u"Set palette entries", None), + 0x00f7: ("CREATEPALETTE", u"Create palette", None), + 0x0102: ("SETBKMODE", u"Set background mode", None), + 0x0103: ("SETMAPMODE", u"Set mapping mode", None), + 0x0104: ("SETROP2", u"Set foreground mix mode", parseROP2), + 0x0106: ("SETPOLYFILLMODE", u"Set polygon fill mode", parsePolyFillMode), + 0x0107: ("SETSTRETCHBLTMODE", u"Set bitmap streching mode", None), + 0x0108: ("SETTEXTCHAREXTRA", u"Set text character extra", None), + 0x0127: ("RESTOREDC", u"Restore device context", None), + 0x012A: ("INVERTREGION", u"Invert region", None), + 0x012B: ("PAINTREGION", u"Paint region", None), + 0x012C: ("SELECTCLIPREGION", u"Select clipping region", None), + 0x012D: ("SELECTOBJECT", u"Select object", parseObjectID), + 0x012E: ("SETTEXTALIGN", u"Set text alignment", None), + 0x0142: ("CREATEDIBPATTERNBRUSH", u"Create DIB brush with specified pattern", None), + 0x01f0: ("DELETEOBJECT", u"Delete object", parseObjectID), + 0x0201: ("SETBKCOLOR", u"Set background color", None), + 0x0209: ("SETTEXTCOLOR", u"Set text color", None), + 0x020A: ("SETTEXTJUSTIFICATION", u"Set text justification", None), + 0x020B: ("SETWINDOWORG", u"Set window origin", parseXY), + 0x020C: ("SETWINDOWEXT", u"Set window extends", parseXY), + 0x020D: ("SETVIEWPORTORG", u"Set view port origin", None), + 0x020E: ("SETVIEWPORTEXT", u"Set view port extends", None), + 0x020F: ("OFFSETWINDOWORG", u"Offset window origin", None), + 0x0211: ("OFFSETVIEWPORTORG", u"Offset view port origin", None), + 0x0213: ("LINETO", u"Draw a line to", None), + 0x0214: ("MOVETO", u"Move to", None), + 0x0220: ("OFFSETCLIPRGN", u"Offset clipping rectangle", None), + 0x0228: ("FILLREGION", u"Fill region", None), + 0x0231: ("SETMAPPERFLAGS", u"Set mapper flags", None), + 0x0234: ("SELECTPALETTE", u"Select palette", None), + 0x02FB: ("CREATEFONTINDIRECT", u"Create font indirect", None), + 0x02FA: ("CREATEPENINDIRECT", u"Create pen indirect", parsePenIndirect), + 0x02FC: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseCreateBrushIndirect), + 0x0324: ("POLYGON", u"Draw a polygon", parsePolygon), + 0x0325: ("POLYLINE", u"Draw a polyline", None), + 0x0410: ("SCALEWINDOWEXT", u"Scale window extends", None), + 0x0412: ("SCALEVIEWPORTEXT", u"Scale view port extends", None), + 0x0415: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None), + 0x0416: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None), + 0x0418: ("ELLIPSE", u"Draw an ellipse", None), + 0x0419: ("FLOODFILL", u"Flood fill", None), + 0x041B: ("RECTANGLE", u"Draw a rectangle", None), + 0x041F: ("SETPIXEL", u"Set pixel", None), + 0x0429: ("FRAMEREGION", u"Fram region", None), + 0x0521: ("TEXTOUT", u"Draw text", None), + 0x0538: ("POLYPOLYGON", u"Draw multiple polygons", None), + 0x0548: ("EXTFLOODFILL", u"Extend flood fill", None), + 0x061C: ("ROUNDRECT", u"Draw a rounded rectangle", None), + 0x061D: ("PATBLT", u"Pattern blitting", None), + 0x0626: ("ESCAPE", u"Escape", None), + 0x06FF: ("CREATEREGION", u"Create region", None), + 0x0817: ("ARC", u"Draw an arc", None), + 0x081A: ("PIE", u"Draw a pie", None), + 0x0830: ("CHORD", u"Draw a chord", None), + 0x0940: ("DIBBITBLT", u"DIB bit blitting", None), + 0x0a32: ("EXTTEXTOUT", u"Draw text (extra)", None), + 0x0b41: ("DIBSTRETCHBLT", u"DIB stretch blitting", None), + 0x0d33: ("SETDIBTODEV", u"Set DIB to device", None), + 0x0f43: ("STRETCHDIB", u"Stretch DIB", None), +} +META_NAME = createDict(META, 0) +META_DESC = createDict(META, 1) + +#---------------------------------------------------------------------------- +# EMF constants + +# EMF mapping modes +EMF_MAPPING_MODE = { + 1: "TEXT", + 2: "LOMETRIC", + 3: "HIMETRIC", + 4: "LOENGLISH", + 5: "HIENGLISH", + 6: "TWIPS", + 7: "ISOTROPIC", + 8: "ANISOTROPIC", +} + +#---------------------------------------------------------------------------- +# EMF parser + +def parseEmfMappingMode(parser): + yield Enum(Int32(parser, "mapping_mode"), EMF_MAPPING_MODE) + +def parseXY32(parser): + yield Int32(parser, "x") + yield Int32(parser, "y") + +def parseObjectID32(parser): + yield textHandler(UInt32(parser, "object_id"), hexadecimal) + +def parseBrushIndirect(parser): + yield UInt32(parser, "ihBrush") + yield UInt32(parser, "style") + yield RGBA(parser, "color") + yield Int32(parser, "hatch") + +class Point16(FieldSet): + static_size = 32 + def createFields(self): + yield Int16(self, "x") + yield Int16(self, "y") + def createDescription(self): + return "Point16: (%i,%i)" % (self["x"].value, self["y"].value) + +def parsePoint16array(parser): + yield RECT32(parser, "bounds") + yield UInt32(parser, "count") + for index in xrange(parser["count"].value): + yield Point16(parser, "point[]") + +def parseGDIComment(parser): + yield UInt32(parser, "data_size") + size = parser["data_size"].value + if size: + yield RawBytes(parser, "data", size) + +def parseICMMode(parser): + yield UInt32(parser, "icm_mode") + +def parseExtCreatePen(parser): + yield UInt32(parser, "ihPen") + yield UInt32(parser, "offBmi") + yield UInt32(parser, "cbBmi") + yield UInt32(parser, "offBits") + yield UInt32(parser, "cbBits") + yield UInt32(parser, "pen_style") + yield UInt32(parser, "width") + yield UInt32(parser, "brush_style") + yield RGBA(parser, "color") + yield UInt32(parser, "hatch") + yield UInt32(parser, "nb_style") + for index in xrange(parser["nb_style"].value): + yield UInt32(parser, "style") + +EMF_META = { + 1: ("HEADER", u"Header", None), + 2: ("POLYBEZIER", u"Draw poly bezier", None), + 3: ("POLYGON", u"Draw polygon", None), + 4: ("POLYLINE", u"Draw polyline", None), + 5: ("POLYBEZIERTO", u"Draw poly bezier to", None), + 6: ("POLYLINETO", u"Draw poly line to", None), + 7: ("POLYPOLYLINE", u"Draw poly polyline", None), + 8: ("POLYPOLYGON", u"Draw poly polygon", None), + 9: ("SETWINDOWEXTEX", u"Set window extend EX", parseXY32), + 10: ("SETWINDOWORGEX", u"Set window origin EX", parseXY32), + 11: ("SETVIEWPORTEXTEX", u"Set viewport extend EX", parseXY32), + 12: ("SETVIEWPORTORGEX", u"Set viewport origin EX", parseXY32), + 13: ("SETBRUSHORGEX", u"Set brush org EX", None), + 14: ("EOF", u"End of file", None), + 15: ("SETPIXELV", u"Set pixel V", None), + 16: ("SETMAPPERFLAGS", u"Set mapper flags", None), + 17: ("SETMAPMODE", u"Set mapping mode", parseEmfMappingMode), + 18: ("SETBKMODE", u"Set background mode", None), + 19: ("SETPOLYFILLMODE", u"Set polyfill mode", None), + 20: ("SETROP2", u"Set ROP2", None), + 21: ("SETSTRETCHBLTMODE", u"Set stretching blitting mode", None), + 22: ("SETTEXTALIGN", u"Set text align", None), + 23: ("SETCOLORADJUSTMENT", u"Set color adjustment", None), + 24: ("SETTEXTCOLOR", u"Set text color", None), + 25: ("SETBKCOLOR", u"Set background color", None), + 26: ("OFFSETCLIPRGN", u"Offset clipping region", None), + 27: ("MOVETOEX", u"Move to EX", parseXY32), + 28: ("SETMETARGN", u"Set meta region", None), + 29: ("EXCLUDECLIPRECT", u"Exclude clipping rectangle", None), + 30: ("INTERSECTCLIPRECT", u"Intersect clipping rectangle", None), + 31: ("SCALEVIEWPORTEXTEX", u"Scale viewport extend EX", None), + 32: ("SCALEWINDOWEXTEX", u"Scale window extend EX", None), + 33: ("SAVEDC", u"Save device context", None), + 34: ("RESTOREDC", u"Restore device context", None), + 35: ("SETWORLDTRANSFORM", u"Set world transform", None), + 36: ("MODIFYWORLDTRANSFORM", u"Modify world transform", None), + 37: ("SELECTOBJECT", u"Select object", parseObjectID32), + 38: ("CREATEPEN", u"Create pen", None), + 39: ("CREATEBRUSHINDIRECT", u"Create brush indirect", parseBrushIndirect), + 40: ("DELETEOBJECT", u"Delete object", parseObjectID32), + 41: ("ANGLEARC", u"Draw angle arc", None), + 42: ("ELLIPSE", u"Draw ellipse", None), + 43: ("RECTANGLE", u"Draw rectangle", None), + 44: ("ROUNDRECT", u"Draw rounded rectangle", None), + 45: ("ARC", u"Draw arc", None), + 46: ("CHORD", u"Draw chord", None), + 47: ("PIE", u"Draw pie", None), + 48: ("SELECTPALETTE", u"Select palette", None), + 49: ("CREATEPALETTE", u"Create palette", None), + 50: ("SETPALETTEENTRIES", u"Set palette entries", None), + 51: ("RESIZEPALETTE", u"Resize palette", None), + 52: ("REALIZEPALETTE", u"Realize palette", None), + 53: ("EXTFLOODFILL", u"EXT flood fill", None), + 54: ("LINETO", u"Draw line to", parseXY32), + 55: ("ARCTO", u"Draw arc to", None), + 56: ("POLYDRAW", u"Draw poly draw", None), + 57: ("SETARCDIRECTION", u"Set arc direction", None), + 58: ("SETMITERLIMIT", u"Set miter limit", None), + 59: ("BEGINPATH", u"Begin path", None), + 60: ("ENDPATH", u"End path", None), + 61: ("CLOSEFIGURE", u"Close figure", None), + 62: ("FILLPATH", u"Fill path", None), + 63: ("STROKEANDFILLPATH", u"Stroke and fill path", None), + 64: ("STROKEPATH", u"Stroke path", None), + 65: ("FLATTENPATH", u"Flatten path", None), + 66: ("WIDENPATH", u"Widen path", None), + 67: ("SELECTCLIPPATH", u"Select clipping path", None), + 68: ("ABORTPATH", u"Arbort path", None), + 70: ("GDICOMMENT", u"GDI comment", parseGDIComment), + 71: ("FILLRGN", u"Fill region", None), + 72: ("FRAMERGN", u"Frame region", None), + 73: ("INVERTRGN", u"Invert region", None), + 74: ("PAINTRGN", u"Paint region", None), + 75: ("EXTSELECTCLIPRGN", u"EXT select clipping region", None), + 76: ("BITBLT", u"Bit blitting", None), + 77: ("STRETCHBLT", u"Stretch blitting", None), + 78: ("MASKBLT", u"Mask blitting", None), + 79: ("PLGBLT", u"PLG blitting", None), + 80: ("SETDIBITSTODEVICE", u"Set DIB bits to device", None), + 81: ("STRETCHDIBITS", u"Stretch DIB bits", None), + 82: ("EXTCREATEFONTINDIRECTW", u"EXT create font indirect W", None), + 83: ("EXTTEXTOUTA", u"EXT text out A", None), + 84: ("EXTTEXTOUTW", u"EXT text out W", None), + 85: ("POLYBEZIER16", u"Draw poly bezier (16-bit)", None), + 86: ("POLYGON16", u"Draw polygon (16-bit)", parsePoint16array), + 87: ("POLYLINE16", u"Draw polyline (16-bit)", parsePoint16array), + 88: ("POLYBEZIERTO16", u"Draw poly bezier to (16-bit)", parsePoint16array), + 89: ("POLYLINETO16", u"Draw polyline to (16-bit)", parsePoint16array), + 90: ("POLYPOLYLINE16", u"Draw poly polyline (16-bit)", None), + 91: ("POLYPOLYGON16", u"Draw poly polygon (16-bit)", parsePoint16array), + 92: ("POLYDRAW16", u"Draw poly draw (16-bit)", None), + 93: ("CREATEMONOBRUSH", u"Create monobrush", None), + 94: ("CREATEDIBPATTERNBRUSHPT", u"Create DIB pattern brush PT", None), + 95: ("EXTCREATEPEN", u"EXT create pen", parseExtCreatePen), + 96: ("POLYTEXTOUTA", u"Poly text out A", None), + 97: ("POLYTEXTOUTW", u"Poly text out W", None), + 98: ("SETICMMODE", u"Set ICM mode", parseICMMode), + 99: ("CREATECOLORSPACE", u"Create color space", None), + 100: ("SETCOLORSPACE", u"Set color space", None), + 101: ("DELETECOLORSPACE", u"Delete color space", None), + 102: ("GLSRECORD", u"GLS record", None), + 103: ("GLSBOUNDEDRECORD", u"GLS bound ED record", None), + 104: ("PIXELFORMAT", u"Pixel format", None), +} +EMF_META_NAME = createDict(EMF_META, 0) +EMF_META_DESC = createDict(EMF_META, 1) + +class Function(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + if self.root.isEMF(): + self._size = self["size"].value * 8 + else: + self._size = self["size"].value * 16 + + def createFields(self): + if self.root.isEMF(): + yield Enum(UInt32(self, "function"), EMF_META_NAME) + yield UInt32(self, "size") + try: + parser = EMF_META[self["function"].value][2] + except KeyError: + parser = None + else: + yield UInt32(self, "size") + yield Enum(UInt16(self, "function"), META_NAME) + try: + parser = META[self["function"].value][2] + except KeyError: + parser = None + if parser: + for field in parser(self): + yield field + else: + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "data", size) + + def isValid(self): + func = self["function"] + return func.value in func.getEnum() + + def createDescription(self): + if self.root.isEMF(): + return EMF_META_DESC[self["function"].value] + try: + return META_DESC[self["function"].value] + except KeyError: + return "Function %s" % self["function"].display + +class RECT16(StaticFieldSet): + format = ( + (Int16, "left"), + (Int16, "top"), + (Int16, "right"), + (Int16, "bottom"), + ) + def createDescription(self): + return "%s: %ux%u at (%u,%u)" % ( + self.__class__.__name__, + self["right"].value-self["left"].value, + self["bottom"].value-self["top"].value, + self["left"].value, + self["top"].value) + +class RECT32(RECT16): + format = ( + (Int32, "left"), + (Int32, "top"), + (Int32, "right"), + (Int32, "bottom"), + ) + +class PlaceableHeader(FieldSet): + """ + Header of Placeable Metafile (file extension .APM), + created by Aldus Corporation + """ + MAGIC = "\xD7\xCD\xC6\x9A\0\0" # (magic, handle=0x0000) + + def createFields(self): + yield textHandler(UInt32(self, "signature", "Placeable Metafiles signature (0x9AC6CDD7)"), hexadecimal) + yield UInt16(self, "handle") + yield RECT16(self, "rect") + yield UInt16(self, "inch") + yield NullBytes(self, "reserved", 4) + yield textHandler(UInt16(self, "checksum"), hexadecimal) + +class EMF_Header(FieldSet): + MAGIC = "\x20\x45\x4D\x46\0\0" # (magic, min_ver=0x0000) + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + + def createFields(self): + LONG = Int32 + yield UInt32(self, "type", "Record type (always 1)") + yield UInt32(self, "size", "Size of the header in bytes") + yield RECT32(self, "Bounds", "Inclusive bounds") + yield RECT32(self, "Frame", "Inclusive picture frame") + yield textHandler(UInt32(self, "signature", "Signature ID (always 0x464D4520)"), hexadecimal) + yield UInt16(self, "min_ver", "Minor version") + yield UInt16(self, "maj_ver", "Major version") + yield UInt32(self, "file_size", "Size of the file in bytes") + yield UInt32(self, "NumOfRecords", "Number of records in the metafile") + yield UInt16(self, "NumOfHandles", "Number of handles in the handle table") + yield NullBytes(self, "reserved", 2) + yield UInt32(self, "desc_size", "Size of description in 16-bit words") + yield UInt32(self, "desc_ofst", "Offset of description string in metafile") + yield UInt32(self, "nb_colors", "Number of color palette entries") + yield LONG(self, "width_px", "Width of reference device in pixels") + yield LONG(self, "height_px", "Height of reference device in pixels") + yield LONG(self, "width_mm", "Width of reference device in millimeters") + yield LONG(self, "height_mm", "Height of reference device in millimeters") + + # Read description (if any) + offset = self["desc_ofst"].value + current = (self.absolute_address + self.current_size) // 8 + size = self["desc_size"].value * 2 + if offset == current and size: + yield String(self, "description", size, charset="UTF-16-LE", strip="\0 ") + + # Read padding (if any) + size = self["size"].value - self.current_size//8 + if size: + yield RawBytes(self, "padding", size) + +class WMF_File(Parser): + PARSER_TAGS = { + "id": "wmf", + "category": "image", + "file_ext": ("wmf", "apm", "emf"), + "mime": ( + u"image/wmf", u"image/x-wmf", u"image/x-win-metafile", + u"application/x-msmetafile", u"application/wmf", u"application/x-wmf", + u"image/x-emf"), + "magic": ( + (PlaceableHeader.MAGIC, 0), + (EMF_Header.MAGIC, 40*8), + # WMF: file_type=memory, header size=9, version=3.0 + ("\0\0\x09\0\0\3", 0), + # WMF: file_type=disk, header size=9, version=3.0 + ("\1\0\x09\0\0\3", 0), + ), + "min_size": 40*8, + "description": u"Microsoft Windows Metafile (WMF)", + } + endian = LITTLE_ENDIAN + FILE_TYPE = {0: "memory", 1: "disk"} + + def validate(self): + if self.isEMF(): + # Check EMF header + emf = self["emf_header"] + if emf["signature"].value != 0x464D4520: + return "Invalid signature" + if emf["type"].value != 1: + return "Invalid record type" + if emf["reserved"].value != "\0\0": + return "Invalid reserved" + else: + # Check AMF header + if self.isAPM(): + amf = self["amf_header"] + if amf["handle"].value != 0: + return "Invalid handle" + if amf["reserved"].value != "\0\0\0\0": + return "Invalid reserved" + + # Check common header + if self["file_type"].value not in (0, 1): + return "Invalid file type" + if self["header_size"].value != 9: + return "Invalid header size" + if self["nb_params"].value != 0: + return "Invalid number of parameters" + + # Check first functions + for index in xrange(5): + try: + func = self["func[%u]" % index] + except MissingField: + if self.done: + return True + return "Unable to get function #%u" % index + except ParserError: + return "Unable to create function #%u" % index + + # Check first frame values + if not func.isValid(): + return "Function #%u is invalid" % index + return True + + def createFields(self): + if self.isEMF(): + yield EMF_Header(self, "emf_header") + else: + if self.isAPM(): + yield PlaceableHeader(self, "amf_header") + yield Enum(UInt16(self, "file_type"), self.FILE_TYPE) + yield UInt16(self, "header_size", "Size of header in 16-bit words (always 9)") + yield UInt8(self, "win_ver_min", "Minor version of Microsoft Windows") + yield UInt8(self, "win_ver_maj", "Major version of Microsoft Windows") + yield UInt32(self, "file_size", "Total size of the metafile in 16-bit words") + yield UInt16(self, "nb_obj", "Number of objects in the file") + yield UInt32(self, "max_record_size", "The size of largest record in 16-bit words") + yield UInt16(self, "nb_params", "Not Used (always 0)") + + while not(self.eof): + yield Function(self, "func[]") + + def isEMF(self): + """File is in EMF format?""" + if 1 <= self.current_length: + return self[0].name == "emf_header" + if self.size < 44*8: + return False + magic = EMF_Header.MAGIC + return self.stream.readBytes(40*8, len(magic)) == magic + + def isAPM(self): + """File is in Aldus Placeable Metafiles format?""" + if 1 <= self.current_length: + return self[0].name == "amf_header" + else: + magic = PlaceableHeader.MAGIC + return (self.stream.readBytes(0, len(magic)) == magic) + + def createDescription(self): + if self.isEMF(): + return u"Microsoft Enhanced Metafile (EMF) picture" + elif self.isAPM(): + return u"Aldus Placeable Metafile (APM) picture" + else: + return u"Microsoft Windows Metafile (WMF) picture" + + def createMimeType(self): + if self.isEMF(): + return u"image/x-emf" + else: + return u"image/wmf" + + def createContentSize(self): + if self.isEMF(): + return None + start = self["func[0]"].absolute_address + end = self.stream.searchBytes("\3\0\0\0\0\0", start, MAX_FILESIZE * 8) + if end is not None: + return end + 6*8 + return None + diff --git a/libs/hachoir_parser/image/xcf.py b/libs/hachoir_parser/image/xcf.py new file mode 100644 index 0000000..f0bfa30 --- /dev/null +++ b/libs/hachoir_parser/image/xcf.py @@ -0,0 +1,331 @@ +""" +Gimp image parser (XCF file, ".xcf" extension). + +You can find informations about XCF file in Gimp source code. URL to read +CVS online: + http://cvs.gnome.org/viewcvs/gimp/app/xcf/ + \--> files xcf-read.c and xcf-load.c + +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, ParserError, + UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes) +from hachoir_parser.image.common import RGBA +from hachoir_core.endian import NETWORK_ENDIAN + +class XcfCompression(FieldSet): + static_size = 8 + COMPRESSION_NAME = { + 0: u"None", + 1: u"RLE", + 2: u"Zlib", + 3: u"Fractal" + } + + def createFields(self): + yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME) + +class XcfResolution(StaticFieldSet): + format = ( + (Float32, "xres", "X resolution in DPI"), + (Float32, "yres", "Y resolution in DPI") + ) + +class XcfTattoo(StaticFieldSet): + format = ((UInt32, "tattoo", "Tattoo"),) + +class LayerOffsets(StaticFieldSet): + format = ( + (UInt32, "ofst_x", "Offset X"), + (UInt32, "ofst_y", "Offset Y") + ) + +class LayerMode(FieldSet): + static_size = 32 + MODE_NAME = { + 0: u"Normal", + 1: u"Dissolve", + 2: u"Behind", + 3: u"Multiply", + 4: u"Screen", + 5: u"Overlay", + 6: u"Difference", + 7: u"Addition", + 8: u"Subtract", + 9: u"Darken only", + 10: u"Lighten only", + 11: u"Hue", + 12: u"Saturation", + 13: u"Color", + 14: u"Value", + 15: u"Divide", + 16: u"Dodge", + 17: u"Burn", + 18: u"Hard light", + 19: u"Soft light", + 20: u"Grain extract", + 21: u"Grain merge", + 22: u"Color erase" + } + + def createFields(self): + yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME) + +class GimpBoolean(UInt32): + def __init__(self, parent, name): + UInt32.__init__(self, parent, name) + + def createValue(self): + return 1 == UInt32.createValue(self) + +class XcfUnit(StaticFieldSet): + format = ((UInt32, "unit", "Unit"),) + +class XcfParasiteEntry(FieldSet): + def createFields(self): + yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8") + yield UInt32(self, "flags", "Flags") + yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8") + +class XcfLevel(FieldSet): + def createFields(self): + yield UInt32(self, "width", "Width in pixel") + yield UInt32(self, "height", "Height in pixel") + yield UInt32(self, "offset", "Offset") + offset = self["offset"].value + if offset == 0: + return + data_offsets = [] + while (self.absolute_address + self.current_size)/8 < offset: + chunk = UInt32(self, "data_offset[]", "Data offset") + yield chunk + if chunk.value == 0: + break + data_offsets.append(chunk) + if (self.absolute_address + self.current_size)/8 != offset: + raise ParserError("Problem with level offset.") + previous = offset + for chunk in data_offsets: + data_offset = chunk.value + size = data_offset - previous + yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name) + previous = data_offset + +class XcfHierarchy(FieldSet): + def createFields(self): + yield UInt32(self, "width", "Width") + yield UInt32(self, "height", "Height") + yield UInt32(self, "bpp", "Bits/pixel") + + offsets = [] + while True: + chunk = UInt32(self, "offset[]", "Level offset") + yield chunk + if chunk.value == 0: + break + offsets.append(chunk.value) + for offset in offsets: + padding = self.seekByte(offset, relative=False) + if padding is not None: + yield padding + yield XcfLevel(self, "level[]", "Level") +# yield XcfChannel(self, "channel[]", "Channel")) + +class XcfChannel(FieldSet): + def createFields(self): + yield UInt32(self, "width", "Channel width") + yield UInt32(self, "height", "Channel height") + yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8") + for field in readProperties(self): + yield field + yield UInt32(self, "hierarchy_ofs", "Hierarchy offset") + yield XcfHierarchy(self, "hierarchy", "Hierarchy") + + def createDescription(self): + return 'Channel "%s"' % self["name"].value + +class XcfLayer(FieldSet): + def createFields(self): + yield UInt32(self, "width", "Layer width in pixels") + yield UInt32(self, "height", "Layer height in pixels") + yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME) + yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8") + for prop in readProperties(self): + yield prop + + # -- + # TODO: Hack for Gimp 1.2 files + # -- + + yield UInt32(self, "hierarchy_ofs", "Hierarchy offset") + yield UInt32(self, "mask_ofs", "Layer mask offset") + padding = self.seekByte(self["hierarchy_ofs"].value, relative=False) + if padding is not None: + yield padding + yield XcfHierarchy(self, "hierarchy", "Hierarchy") + # TODO: Read layer mask if needed: self["mask_ofs"].value != 0 + + def createDescription(self): + return 'Layer "%s"' % self["name"].value + +class XcfParasites(FieldSet): + def createFields(self): + size = self["../size"].value * 8 + while self.current_size < size: + yield XcfParasiteEntry(self, "parasite[]", "Parasite") + +class XcfProperty(FieldSet): + PROP_COMPRESSION = 17 + PROP_RESOLUTION = 19 + PROP_PARASITES = 21 + TYPE_NAME = { + 0: u"End", + 1: u"Colormap", + 2: u"Active layer", + 3: u"Active channel", + 4: u"Selection", + 5: u"Floating selection", + 6: u"Opacity", + 7: u"Mode", + 8: u"Visible", + 9: u"Linked", + 10: u"Lock alpha", + 11: u"Apply mask", + 12: u"Edit mask", + 13: u"Show mask", + 14: u"Show masked", + 15: u"Offsets", + 16: u"Color", + 17: u"Compression", + 18: u"Guides", + 19: u"Resolution", + 20: u"Tattoo", + 21: u"Parasites", + 22: u"Unit", + 23: u"Paths", + 24: u"User unit", + 25: u"Vectors", + 26: u"Text layer flags", + } + + handler = { + 6: RGBA, + 7: LayerMode, + 8: GimpBoolean, + 9: GimpBoolean, + 10: GimpBoolean, + 11: GimpBoolean, + 12: GimpBoolean, + 13: GimpBoolean, + 15: LayerOffsets, + 17: XcfCompression, + 19: XcfResolution, + 20: XcfTattoo, + 21: XcfParasites, + 22: XcfUnit + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = (8 + self["size"].value) * 8 + + def createFields(self): + yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME) + yield UInt32(self, "size", "Property size") + + size = self["size"].value + if 0 < size: + cls = self.handler.get(self["type"].value, None) + if cls: + yield cls(self, "data", size=size*8) + else: + yield RawBytes(self, "data", size, "Data") + + def createDescription(self): + return "Property: %s" % self["type"].display + +def readProperties(parser): + while True: + prop = XcfProperty(parser, "property[]") + yield prop + if prop["type"].value == 0: + return + +class XcfFile(Parser): + PARSER_TAGS = { + "id": "xcf", + "category": "image", + "file_ext": ("xcf",), + "mime": (u"image/x-xcf", u"application/x-gimp-image"), + "min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset + "magic": ( + ('gimp xcf file\0', 0), + ('gimp xcf v002\0', 0), + ), + "description": "Gimp (XCF) picture" + } + endian = NETWORK_ENDIAN + IMAGE_TYPE_NAME = { + 0: u"RGB", + 1: u"Gray", + 2: u"Indexed" + } + + def validate(self): + if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'): + return "Wrong signature" + return True + + def createFields(self): + # Read signature + yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII") + + # Read image general informations (width, height, type) + yield UInt32(self, "width", "Image width") + yield UInt32(self, "height", "Image height") + yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME) + for prop in readProperties(self): + yield prop + + # Read layer offsets + layer_offsets = [] + while True: + chunk = UInt32(self, "layer_offset[]", "Layer offset") + yield chunk + if chunk.value == 0: + break + layer_offsets.append(chunk.value) + + # Read channel offsets + channel_offsets = [] + while True: + chunk = UInt32(self, "channel_offset[]", "Channel offset") + yield chunk + if chunk.value == 0: + break + channel_offsets.append(chunk.value) + + # Read layers + for index, offset in enumerate(layer_offsets): + if index+1 < len(layer_offsets): + size = (layer_offsets[index+1] - offset) * 8 + else: + size = None + padding = self.seekByte(offset, relative=False) + if padding: + yield padding + yield XcfLayer(self, "layer[]", size=size) + + # Read channels + for index, offset in enumerate(channel_offsets): + if index+1 < len(channel_offsets): + size = (channel_offsets[index+1] - offset) * 8 + else: + size = None + padding = self.seekByte(offset, relative=False) + if padding is not None: + yield padding + yield XcfChannel(self, "channel[]", "Channel", size=size) + diff --git a/libs/hachoir_parser/misc/__init__.py b/libs/hachoir_parser/misc/__init__.py new file mode 100644 index 0000000..10e98bb --- /dev/null +++ b/libs/hachoir_parser/misc/__init__.py @@ -0,0 +1,14 @@ +from hachoir_parser.misc.file_3do import File3do +from hachoir_parser.misc.file_3ds import File3ds +from hachoir_parser.misc.torrent import TorrentFile +from hachoir_parser.misc.ttf import TrueTypeFontFile +from hachoir_parser.misc.chm import ChmFile +from hachoir_parser.misc.lnk import LnkFile +from hachoir_parser.misc.pcf import PcfFile +from hachoir_parser.misc.ole2 import OLE2_File +from hachoir_parser.misc.pdf import PDFDocument +from hachoir_parser.misc.pifv import PIFVFile +from hachoir_parser.misc.hlp import HlpFile +from hachoir_parser.misc.gnome_keyring import GnomeKeyring +from hachoir_parser.misc.bplist import BPList + diff --git a/libs/hachoir_parser/misc/bplist.py b/libs/hachoir_parser/misc/bplist.py new file mode 100644 index 0000000..5411b48 --- /dev/null +++ b/libs/hachoir_parser/misc/bplist.py @@ -0,0 +1,299 @@ +""" +Apple/NeXT Binary Property List (BPLIST) parser. + +Also includes a .createXML() function which produces an XML representation of the object. +Note that it will discard unknown objects, nulls and fill values, but should work for most files. + +Documents: +- CFBinaryPList.c + http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c +- ForFoundationOnly.h (for structure formats) + http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h +- XML <-> BPList converter + http://scw.us/iPhone/plutil/plutil.pl +Author: Robert Xiao +Created: 2008-09-21 +""" + +from hachoir_parser import HachoirParser +from hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum, +Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import displayHandler +from hachoir_core.tools import humanDatetime +from datetime import datetime, timedelta + +class BPListTrailer(FieldSet): + def createFields(self): + yield NullBytes(self, "unused", 6) + yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table") + yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references") + yield UInt64(self, "numObjects", "Number of objects in this file") + yield UInt64(self, "topObject", "Top-level object reference") + yield UInt64(self, "offsetTableOffset", "File offset to the offset table") + + def createDescription(self): + return "Binary PList trailer" + +class BPListOffsetTable(FieldSet): + def createFields(self): + size = self["../trailer/offsetIntSize"].value*8 + for i in range(self["../trailer/numObjects"].value): + yield Bits(self, "offset[]", size) + +class BPListSize(FieldSet): + def createFields(self): + yield Bits(self, "size", 4) + if self['size'].value == 0xF: + yield BPListObject(self, "fullsize") + + def createValue(self): + if 'fullsize' in self: + return self['fullsize'].value + else: + return self['size'].value + +class BPListObjectRef(GenericInteger): + def __init__(self, parent, name, description=None): + size = parent['/trailer/objectRefSize'].value*8 + GenericInteger.__init__(self, parent, name, False, size, description) + + def getRef(self): + return self.parent['/object[' + str(self.value) + ']'] + + def createDisplay(self): + return self.getRef().display + + def createXML(self, prefix=''): + return self.getRef().createXML(prefix) + +class BPListArray(FieldSet): + def __init__(self, parent, name, size, description=None): + FieldSet.__init__(self, parent, name, description=description) + self.numels = size + + def createFields(self): + for i in range(self.numels): + yield BPListObjectRef(self, "ref[]") + + def createValue(self): + return self.array('ref') + + def createDisplay(self): + return '[' + ', '.join([x.display for x in self.value]) + ']' + + def createXML(self,prefix=''): + return prefix + '\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '' + +class BPListDict(FieldSet): + def __init__(self, parent, name, size, description=None): + FieldSet.__init__(self, parent, name, description=description) + self.numels = size + + def createFields(self): + for i in range(self.numels): + yield BPListObjectRef(self, "keyref[]") + for i in range(self.numels): + yield BPListObjectRef(self, "valref[]") + + def createValue(self): + return zip(self.array('keyref'),self.array('valref')) + + def createDisplay(self): + return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}' + + def createXML(self, prefix=''): + return prefix + '\n' + ''.join(['%s\t%s\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '' + +class BPListObject(FieldSet): + def createFields(self): + yield Enum(Bits(self, "marker_type", 4), + {0: "Simple", + 1: "Int", + 2: "Real", + 3: "Date", + 4: "Data", + 5: "ASCII String", + 6: "UTF-16-BE String", + 8: "UID", + 10: "Array", + 13: "Dict",}) + markertype = self['marker_type'].value + if markertype == 0: + # Simple (Null) + yield Enum(Bits(self, "value", 4), + {0: "Null", + 8: "False", + 9: "True", + 15: "Fill Byte",}) + if self['value'].display == "False": + self.xml=lambda prefix:prefix + "" + elif self['value'].display == "True": + self.xml=lambda prefix:prefix + "" + else: + self.xml=lambda prefix:prefix + "" + + elif markertype == 1: + # Int + yield Bits(self, "size", 4, "log2 of number of bytes") + size=self['size'].value + # 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned + # 64-bit (size=3) numbers are signed + yield GenericInteger(self, "value", (size>=3), (2**size)*8) + self.xml=lambda prefix:prefix + "%s"%self['value'].value + + elif markertype == 2: + # Real + yield Bits(self, "size", 4, "log2 of number of bytes") + if self['size'].value == 2: # 2**2 = 4 byte float + yield Float32(self, "value") + elif self['size'].value == 3: # 2**3 = 8 byte float + yield Float64(self, "value") + else: + # FIXME: What is the format of the real? + yield Bits(self, "value", (2**self['size'].value)*8) + self.xml=lambda prefix:prefix + "%s"%self['value'].value + + elif markertype == 3: + # Date + yield Bits(self, "extra", 4, "Extra value, should be 3") + # Use a heuristic to determine which epoch to use + def cvt_time(v): + v=timedelta(seconds=v) + epoch2001 = datetime(2001,1,1) + epoch1970 = datetime(1970,1,1) + if (epoch2001 + v - datetime.today()).days > 5*365: + return epoch1970 + v + return epoch2001 + v + yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x))) + self.xml=lambda prefix:prefix + "%sZ"%(cvt_time(self['value'].value).isoformat()) + + elif markertype == 4: + # Data + yield BPListSize(self, "size") + if self['size'].value: + yield Bytes(self, "value", self['size'].value) + self.xml=lambda prefix:prefix + "\n%s\n%s"%(self['value'].value.encode('base64').strip(),prefix) + else: + self.xml=lambda prefix:prefix + '' + + elif markertype == 5: + # ASCII String + yield BPListSize(self, "size") + if self['size'].value: + yield String(self, "value", self['size'].value, charset="ASCII") + self.xml=lambda prefix:prefix + "%s"%(self['value'].value.replace('&','&').encode('iso-8859-1')) + else: + self.xml=lambda prefix:prefix + '' + + elif markertype == 6: + # UTF-16-BE String + yield BPListSize(self, "size") + if self['size'].value: + yield String(self, "value", self['size'].value*2, charset="UTF-16-BE") + self.xml=lambda prefix:prefix + "%s"%(self['value'].value.replace('&','&').encode('utf-8')) + else: + self.xml=lambda prefix:prefix + '' + + elif markertype == 8: + # UID + yield Bits(self, "size", 4, "Number of bytes minus 1") + yield GenericInteger(self, "value", False, (self['size'].value + 1)*8) + self.xml=lambda prefix:prefix + "" # no equivalent? + + elif markertype == 10: + # Array + yield BPListSize(self, "size") + size = self['size'].value + if size: + yield BPListArray(self, "value", size) + self.xml=lambda prefix:self['value'].createXML(prefix) + + elif markertype == 13: + # Dict + yield BPListSize(self, "size") + yield BPListDict(self, "value", self['size'].value) + self.xml=lambda prefix:self['value'].createXML(prefix) + + else: + yield Bits(self, "value", 4) + self.xml=lambda prefix:'' + + def createValue(self): + if 'value' in self: + return self['value'].value + elif self['marker_type'].value in [4,5,6]: + return u'' + else: + return None + + def createDisplay(self): + if 'value' in self: + return unicode(self['value'].display) + elif self['marker_type'].value in [4,5,6]: + return u'' + else: + return None + + def createXML(self, prefix=''): + if 'value' in self: + try: + return self.xml(prefix) + except AttributeError: + return '' + return '' + + def getFieldType(self): + return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display) + +class BPList(HachoirParser, RootSeekableFieldSet): + endian = BIG_ENDIAN + MAGIC = "bplist00" + PARSER_TAGS = { + "id": "bplist", + "category": "misc", + "file_ext": ("plist",), + "magic": ((MAGIC, 0),), + "min_size": 8 + 32, # bplist00 + 32-byte trailer + "description": "Apple/NeXT Binary Property List", + } + + def __init__(self, stream, **args): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + HachoirParser.__init__(self, stream, **args) + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return "Invalid magic" + return True + + def createFields(self): + yield Bytes(self, "magic", 8, "File magic (bplist00)") + if self.size: + self.seekByte(self.size//8-32, True) + else: + # FIXME: UNTESTED + while True: + try: + self.seekByte(1024) + except: + break + self.seekByte(self.size//8-32) + yield BPListTrailer(self, "trailer") + self.seekByte(self['trailer/offsetTableOffset'].value) + yield BPListOffsetTable(self, "offset_table") + for i in self.array("offset_table/offset"): + if self.current_size > i.value*8: + self.seekByte(i.value) + elif self.current_size < i.value*8: + # try to detect files with gaps or unparsed content + yield RawBytes(self, "padding[]", i.value-self.current_size//8) + yield BPListObject(self, "object[]") + + def createXML(self, prefix=''): + return ''' + + +''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + ''' +''' + diff --git a/libs/hachoir_parser/misc/chm.py b/libs/hachoir_parser/misc/chm.py new file mode 100644 index 0000000..6bff555 --- /dev/null +++ b/libs/hachoir_parser/misc/chm.py @@ -0,0 +1,200 @@ +""" +InfoTech Storage Format (ITSF) parser, used by Microsoft's HTML Help (.chm) + +Document: +- Microsoft's HTML Help (.chm) format + http://www.wotsit.org (search "chm") +- chmlib library + http://www.jedrea.com/chmlib/ + +Author: Victor Stinner +Creation date: 2007-03-04 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (Field, FieldSet, ParserError, + Int32, UInt32, UInt64, + RawBytes, PaddingBytes, + Enum, String) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.win32 import GUID +from hachoir_parser.common.win32_lang_id import LANGUAGE_ID +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler + +class CWord(Field): + """ + Compressed double-word + """ + def __init__(self, parent, name, description=None): + Field.__init__(self, parent, name, 8, description) + + endian = self._parent.endian + stream = self._parent.stream + addr = self.absolute_address + + value = 0 + byte = stream.readBits(addr, 8, endian) + while byte & 0x80: + value <<= 7 + value += (byte & 0x7f) + self._size += 8 + if 64 < self._size: + raise ParserError("CHM: CWord is limited to 64 bits") + addr += 8 + byte = stream.readBits(addr, 8, endian) + value += byte + self.createValue = lambda: value + +class Filesize_Header(FieldSet): + def createFields(self): + yield textHandler(UInt32(self, "unknown[]", "0x01FE"), hexadecimal) + yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal) + yield filesizeHandler(UInt64(self, "file_size")) + yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal) + yield textHandler(UInt32(self, "unknown[]", "0x0"), hexadecimal) + +class ITSP(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + + def createFields(self): + yield String(self, "magic", 4, "ITSP", charset="ASCII") + yield UInt32(self, "version", "Version (=1)") + yield filesizeHandler(UInt32(self, "size", "Length (in bytes) of the directory header (84)")) + yield UInt32(self, "unknown[]", "(=10)") + yield filesizeHandler(UInt32(self, "block_size", "Directory block size")) + yield UInt32(self, "density", "Density of quickref section, usually 2") + yield UInt32(self, "index_depth", "Depth of the index tree") + yield Int32(self, "nb_dir", "Chunk number of root index chunk") + yield UInt32(self, "first_pmgl", "Chunk number of first PMGL (listing) chunk") + yield UInt32(self, "last_pmgl", "Chunk number of last PMGL (listing) chunk") + yield Int32(self, "unknown[]", "-1") + yield UInt32(self, "nb_dir_chunk", "Number of directory chunks (total)") + yield Enum(UInt32(self, "lang_id", "Windows language ID"), LANGUAGE_ID) + yield GUID(self, "system_uuid", "{5D02926A-212E-11D0-9DF9-00A0C922E6EC}") + yield filesizeHandler(UInt32(self, "size2", "Same value than size")) + yield Int32(self, "unknown[]", "-1") + yield Int32(self, "unknown[]", "-1") + yield Int32(self, "unknown[]", "-1") + +class ITSF(FieldSet): + def createFields(self): + yield String(self, "magic", 4, "ITSF", charset="ASCII") + yield UInt32(self, "version") + yield UInt32(self, "header_size", "Total header length (in bytes)") + yield UInt32(self, "one") + yield UInt32(self, "last_modified") + yield Enum(UInt32(self, "lang_id", "Windows Language ID"), LANGUAGE_ID) + yield GUID(self, "dir_uuid", "{7C01FD10-7BAA-11D0-9E0C-00A0-C922-E6EC}") + yield GUID(self, "stream_uuid", "{7C01FD11-7BAA-11D0-9E0C-00A0-C922-E6EC}") + yield UInt64(self, "filesize_offset") + yield filesizeHandler(UInt64(self, "filesize_len")) + yield UInt64(self, "dir_offset") + yield filesizeHandler(UInt64(self, "dir_len")) + if 3 <= self["version"].value: + yield UInt64(self, "data_offset") + +class PMGL_Entry(FieldSet): + def createFields(self): + yield CWord(self, "name_len") + yield String(self, "name", self["name_len"].value, charset="UTF-8") + yield CWord(self, "space") + yield CWord(self, "start") + yield filesizeHandler(CWord(self, "length")) + + def createDescription(self): + return "%s (%s)" % (self["name"].value, self["length"].display) + +class PMGL(FieldSet): + def createFields(self): + # Header + yield String(self, "magic", 4, "PMGL", charset="ASCII") + yield filesizeHandler(Int32(self, "free_space", + "Length of free space and/or quickref area at end of directory chunk")) + yield Int32(self, "unknown") + yield Int32(self, "previous", "Chunk number of previous listing chunk") + yield Int32(self, "next", "Chunk number of previous listing chunk") + + # Entries + stop = self.size - self["free_space"].value * 8 + while self.current_size < stop: + yield PMGL_Entry(self, "entry[]") + + # Padding + padding = (self.size - self.current_size) // 8 + if padding: + yield PaddingBytes(self, "padding", padding) + +class PMGI_Entry(FieldSet): + def createFields(self): + yield CWord(self, "name_len") + yield String(self, "name", self["name_len"].value, charset="UTF-8") + yield CWord(self, "page") + + def createDescription(self): + return "%s (page #%u)" % (self["name"].value, self["page"].value) + +class PMGI(FieldSet): + def createFields(self): + yield String(self, "magic", 4, "PMGI", charset="ASCII") + yield filesizeHandler(UInt32(self, "free_space", + "Length of free space and/or quickref area at end of directory chunk")) + + stop = self.size - self["free_space"].value * 8 + while self.current_size < stop: + yield PMGI_Entry(self, "entry[]") + + padding = (self.size - self.current_size) // 8 + if padding: + yield PaddingBytes(self, "padding", padding) + +class Directory(FieldSet): + def createFields(self): + yield ITSP(self, "itsp") + block_size = self["itsp/block_size"].value * 8 + + nb_dir = self["itsp/nb_dir"].value + + if nb_dir < 0: + nb_dir = 1 + for index in xrange(nb_dir): + yield PMGL(self, "pmgl[]", size=block_size) + + if self.current_size < self.size: + yield PMGI(self, "pmgi", size=block_size) + +class ChmFile(Parser): + PARSER_TAGS = { + "id": "chm", + "category": "misc", + "file_ext": ("chm",), + "min_size": 4*8, + "magic": (("ITSF\3\0\0\0", 0),), + "description": "Microsoft's HTML Help (.chm)", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 4) != "ITSF": + return "Invalid magic" + if self["itsf/version"].value != 3: + return "Invalid version" + return True + + def createFields(self): + yield ITSF(self, "itsf") + yield Filesize_Header(self, "file_size", size=self["itsf/filesize_len"].value*8) + + padding = self.seekByte(self["itsf/dir_offset"].value) + if padding: + yield padding + yield Directory(self, "dir", size=self["itsf/dir_len"].value*8) + + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "raw_end", size) + + def createContentSize(self): + return self["file_size/file_size"].value * 8 + diff --git a/libs/hachoir_parser/misc/common.py b/libs/hachoir_parser/misc/common.py new file mode 100644 index 0000000..38d9f82 --- /dev/null +++ b/libs/hachoir_parser/misc/common.py @@ -0,0 +1,13 @@ +from hachoir_core.field import StaticFieldSet, Float32 + +class Vertex(StaticFieldSet): + format = ((Float32, "x"), (Float32, "y"), (Float32, "z")) + + def createValue(self): + return (self["x"].value, self["y"].value, self["z"].value) + +class MapUV(StaticFieldSet): + format = ((Float32, "u"), (Float32, "v")) + + def createValue(self): + return (self["u"].value, self["v"].value) diff --git a/libs/hachoir_parser/misc/file_3do.py b/libs/hachoir_parser/misc/file_3do.py new file mode 100644 index 0000000..3108d0a --- /dev/null +++ b/libs/hachoir_parser/misc/file_3do.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- + +""" +3do model parser. + +Author: Cyril Zorin +Creation date: 28 september 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt32, Int32, String, Float32, + RawBytes, PaddingBytes) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_parser.misc.common import Vertex, MapUV + +class Vector(FieldSet): + def __init__(self, parent, name, + count, type, ename, edesc=None, description=None): + FieldSet.__init__(self, parent, name, description) + self.count = count + self.type = type + self.ename = ename+"[]" + self.edesc = edesc + try: + item_size = self.type.static_size(self.ename, self.edesc) + except TypeError: + item_size = self.type.static_size + if item_size: + self._size = item_size * self.count + + def createFields(self): + for index in xrange(self.count): + yield self.type(self, self.ename, self.edesc) + +class Face(FieldSet): + def createFields(self): + yield UInt32(self, "id") + yield UInt32(self, "type") + yield UInt32(self, "geometry_mode") + yield UInt32(self, "lighting_mode") + yield UInt32(self, "texture_mode") + yield UInt32(self, "nvertices") + yield Float32(self, "unknown[]", "unknown") + yield UInt32(self, "has_texture", "Has texture?") + yield UInt32(self, "has_material", "Has material?") + yield Vertex(self, "unknown[]") + yield Float32(self, "extra_light") + yield Vertex(self, "unknown[]") + yield Vertex(self, "normal") + if self["nvertices"].value: + yield Vector(self, "vertex_indices", + self["nvertices"].value, UInt32, "vertex") + if self["has_texture"].value: + yield Vector(self, "texture_vertex_indices", + self["nvertices"].value, UInt32, "texture_vertex") + if self["has_material"].value: + yield UInt32(self, "material_index", "material index") + + def createDescription(self): + return "Face: id=%s" % self["id"].value + +class Mesh(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + + def createFields(self): + yield String(self, "name", 32, strip="\0") + yield UInt32(self, "id") + yield UInt32(self, "geometry_mode") + yield UInt32(self, "lighting_mode") + yield UInt32(self, "texture_mode") + yield UInt32(self, "nmesh_vertices") + yield UInt32(self, "ntexture_vertices") + yield UInt32(self, "nfaces") + + nb_vert = self["nmesh_vertices"].value + if nb_vert: + yield Vector(self, "vertices", + nb_vert, Vertex, "vertex") + if self["ntexture_vertices"].value: + yield Vector(self, "texture vertices", + self["ntexture_vertices"].value, MapUV, "texture_vertex") + if nb_vert: + yield Vector(self, "light vertices", + nb_vert, Float32, "extra_light") + yield Vector(self, "unknown[]", + nb_vert, Float32, "unknown") + if self["nfaces"].value: + yield Vector(self, "faces", self["nfaces"].value, Face, "face") + if nb_vert: + yield Vector(self, "vertex normals", + nb_vert, Vertex, "normal") + + yield UInt32(self, "has_shadow") + yield Float32(self, "unknown[]") + yield Float32(self, "radius") + yield Vertex(self, "unknown[]") + yield Vertex(self, "unknown[]") + + def createDescription(self): + return 'Mesh "%s" (id %s)' % (self["name"].value, self["id"].value) + +class Geoset(FieldSet): + def createFields(self): + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield Mesh(self, "mesh[]") + + def createDescription(self): + return "Set of %s meshes" % self["count"].value + +class Node(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + size = (188-4)*8 + if self["parent_offset"].value != 0: + size += 32 + if self["first_child_offset"].value != 0: + size += 32 + if self["next_sibling_offset"].value != 0: + size += 32 + self._size = size + + def createFields(self): + yield String(self, "name", 32, strip="\0") + yield PaddingBytes(self, "unknown[]", 32, pattern="\xCC") + yield UInt32(self, "flags") + yield UInt32(self, "id") + yield UInt32(self, "type") + yield Int32(self, "mesh_id") + yield UInt32(self, "depth") + yield Int32(self, "parent_offset") + yield UInt32(self, "nchildren") + yield UInt32(self, "first_child_offset") + yield UInt32(self, "next_sibling_offset") + yield Vertex(self, "pivot") + yield Vertex(self, "position") + yield Float32(self, "pitch") + yield Float32(self, "yaw") + yield Float32(self, "roll") + for index in xrange(4): + yield Vertex(self, "unknown_vertex[]") + if self["parent_offset"].value != 0: + yield UInt32(self, "parent_id") + if self["first_child_offset"].value != 0: + yield UInt32(self, "first_child_id") + if self["next_sibling_offset"].value != 0: + yield UInt32(self, "next_sibling_id") + + def createDescription(self): + return 'Node "%s"' % self["name"].value + +class Nodes(FieldSet): + def createFields(self): + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield Node(self, "node[]") + + def createDescription(self): + return 'Nodes (%s)' % self["count"].value + +class Materials(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + count = self["count"] + self._size = count.size + count.value * (32*8) + + def createFields(self): + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield String(self, "filename[]", 32, "Material file name", strip="\0") + + def createDescription(self): + return 'Material file names (%s)' % self["count"].value + +class File3do(Parser): + PARSER_TAGS = { + "id": "3do", + "category": "misc", + "file_ext": ("3do",), + "mime": (u"image/x-3do",), + "min_size": 8*4, + "description": "renderdroid 3d model." + } + + endian = LITTLE_ENDIAN + + def validate(self): + signature = self.stream.readBytes(0, 4) + return signature in ('LDOM', 'MODL') # lazy endian-safe hack =D + + def createFields(self): + # Read file signature, and fix endian if needed + yield String(self, "file_sig", 4, "File signature", charset="ASCII") + if self["file_sig"].value == "MODL": + self.endian = BIG_ENDIAN + + # Read file content + yield Materials(self, "materials") + yield String(self, "model_name", 32, "model file name", strip="\0") + yield RawBytes(self, "unknown[]", 4) + yield UInt32(self, "ngeosets") + for index in xrange(self["ngeosets"].value): + yield Geoset(self, "geoset[]") + yield RawBytes(self, "unknown[]", 4) + yield Nodes(self, "nodes") + yield Float32(self, "model_radius") + yield Vertex(self, "insertion_offset") + + # Read the end of the file + if self.current_size < self._size: + yield self.seekBit(self._size, "end") + diff --git a/libs/hachoir_parser/misc/file_3ds.py b/libs/hachoir_parser/misc/file_3ds.py new file mode 100644 index 0000000..aaf4fbf --- /dev/null +++ b/libs/hachoir_parser/misc/file_3ds.py @@ -0,0 +1,177 @@ +""" +3D Studio Max file (.3ds) parser. +Author: Victor Stinner +""" + +from hachoir_parser import Parser +from hachoir_core.field import (StaticFieldSet, FieldSet, + UInt16, UInt32, RawBytes, Enum, CString) +from hachoir_parser.image.common import RGB +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.misc.common import Vertex, MapUV + +def readObject(parent): + yield CString(parent, "name", "Object name") + size = parent["size"].value * 8 + while parent.current_size < size: + yield Chunk(parent, "chunk[]") + +def readTextureFilename(parent): + yield CString(parent, "filename", "Texture filename") + +def readVersion(parent): + yield UInt32(parent, "version", "3DS file format version") + +def readMaterialName(parent): + yield CString(parent, "name", "Material name") + +class Polygon(StaticFieldSet): + format = ( + (UInt16, "a", "Vertex A"), + (UInt16, "b", "Vertex B"), + (UInt16, "c", "Vertex C"), + (UInt16, "flags", "Flags")) + +def readMapList(parent): + yield UInt16(parent, "count", "Map count") + for index in xrange(parent["count"].value): + yield MapUV(parent, "map_uv[]", "Mapping UV") + +def readColor(parent): + yield RGB(parent, "color") + +def readVertexList(parent): + yield UInt16(parent, "count", "Vertex count") + for index in range(0, parent["count"].value): + yield Vertex(parent, "vertex[]", "Vertex") + +def readPolygonList(parent): + count = UInt16(parent, "count", "Vertex count") + yield count + for i in range(0, count.value): + yield Polygon(parent, "polygon[]") + size = parent["size"].value * 8 + while parent.current_size < size: + yield Chunk(parent, "chunk[]") + +class Chunk(FieldSet): + # List of chunk type name + type_name = { + 0x0011: "Color", + 0x4D4D: "Main chunk", + 0x0002: "File version", + 0x3D3D: "Materials and objects", + 0x4000: "Object", + 0x4100: "Mesh (triangular)", + 0x4110: "Vertices list", + 0x4120: "Polygon (faces) list", + 0x4140: "Map UV list", + 0x4130: "Object material", + 0xAFFF: "New material", + 0xA000: "Material name", + 0xA010: "Material ambient", + 0xA020: "Material diffuse", + 0xA030: "Texture specular", + 0xA200: "Texture", + 0xA300: "Texture filename", + + # Key frames + 0xB000: "Keyframes", + 0xB002: "Object node tag", + 0xB006: "Light target node tag", + 0xB007: "Spot light node tag", + 0xB00A: "Keyframes header", + 0xB009: "Keyframe current time", + 0xB030: "Node identifier", + 0xB010: "Node header", + 0x7001: "Viewport layout" + } + + chunk_id_by_type = { + 0x4d4d: "main", + 0x0002: "version", + 0x3d3d: "obj_mat", + 0xb000: "keyframes", + 0xafff: "material[]", + 0x4000: "object[]", + 0x4110: "vertices_list", + 0x4120: "polygon_list", + 0x4140: "mapuv_list", + 0x4100: "mesh" + } + + # List of chunks which contains other chunks + sub_chunks = \ + (0x4D4D, 0x4100, 0x3D3D, 0xAFFF, 0xA200, + 0xB002, 0xB006, 0xB007, + 0xA010, 0xA030, 0xA020, 0xB000) + + # List of chunk type handlers + handlers = { + 0xA000: readMaterialName, + 0x4000: readObject, + 0xA300: readTextureFilename, + 0x0011: readColor, + 0x0002: readVersion, + 0x4110: readVertexList, + 0x4120: readPolygonList, + 0x4140: readMapList + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + + # Set description + self._description = "Chunk: %s" % self["type"].display + + # Set name based on type field + type = self["type"].value + if type in Chunk.chunk_id_by_type: + self._name = Chunk.chunk_id_by_type[type] + else: + self._name = "chunk_%04x" % type + + # Guess chunk size + self._size = self["size"].value * 8 + + def createFields(self): + yield Enum(textHandler(UInt16(self, "type", "Chunk type"), hexadecimal), Chunk.type_name) + yield UInt32(self, "size", "Chunk size (in bytes)") + content_size = self["size"].value - 6 + if content_size == 0: + return + type = self["type"].value + if type in Chunk.sub_chunks: + while self.current_size < self.size: + yield Chunk(self, "chunk[]") + else: + if type in Chunk.handlers: + fields = Chunk.handlers[type] (self) + for field in fields: + yield field + else: + yield RawBytes(self, "data", content_size) + +class File3ds(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "3ds", + "category": "misc", + "file_ext": ("3ds",), + "mime": (u"image/x-3ds",), + "min_size": 16*8, + "description": "3D Studio Max model" + } + + def validate(self): + if self.stream.readBytes(0, 2) != "MM": + return "Wrong signature" + if self["main/version/version"].value not in (2, 3): + return "Unknown format version" + return True + + def createFields(self): + while not self.eof: + yield Chunk(self, "chunk[]") + diff --git a/libs/hachoir_parser/misc/gnome_keyring.py b/libs/hachoir_parser/misc/gnome_keyring.py new file mode 100644 index 0000000..0bade36 --- /dev/null +++ b/libs/hachoir_parser/misc/gnome_keyring.py @@ -0,0 +1,200 @@ +""" +Gnome keyring parser. + +Sources: + - Gnome Keyring source code, + function generate_file() in keyrings/gkr-keyring.c, + +Author: Victor Stinner +Creation date: 2008-04-09 +""" + +from hachoir_core.tools import paddingSize +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Bit, NullBits, NullBytes, + UInt8, UInt32, String, RawBytes, Enum, + TimestampUnix64, CompressedField, + SubFile) +from hachoir_core.endian import BIG_ENDIAN + +try: + import hashlib + def sha256(data): + hash = hashlib.new('sha256') + hash.update(data) + return hash.digest() +except ImportError: + def sha256(data): + raise ImportError("hashlib module is missing") + +try: + from Crypto.Cipher import AES + class DeflateStream: + def __init__(self, stream): + hash_iterations = 1234 + password = "x" * 8 + salt = "\0" * 8 + key, iv = generate_key(password, salt, hash_iterations) + self.cipher = AES.new(key, AES.MODE_CBC, iv) + + def __call__(self, size, data=None): + if data is None: + return '' + return self.cipher.decrypt(data) + + def Deflate(field): + CompressedField(field, DeflateStream) + return field +except ImportError: + def Deflate(field): + return field + +class KeyringString(FieldSet): + def createFields(self): + yield UInt32(self, "length") + length = self["length"].value + if length == 0xffffffff: + return + yield String(self, "text", length, charset="UTF-8") + + def createValue(self): + if "text" in self: + return self["text"].value + else: + return u'' + + def createDescription(self): + if "text" in self: + return self["text"].value + else: + return u"(empty string)" + +class Attribute(FieldSet): + def createFields(self): + yield KeyringString(self, "name") + yield UInt32(self, "type") + type = self["type"].value + if type == 0: + yield KeyringString(self, "value") + elif type == 1: + yield UInt32(self, "value") + else: + raise TypeError("Unknown attribute type (%s)" % type) + + def createDescription(self): + return 'Attribute "%s"' % self["name"].value + +class ACL(FieldSet): + def createFields(self): + yield UInt32(self, "types_allowed") + yield KeyringString(self, "display_name") + yield KeyringString(self, "pathname") + yield KeyringString(self, "reserved[]") + yield UInt32(self, "reserved[]") + +class Item(FieldSet): + def createFields(self): + yield UInt32(self, "id") + yield UInt32(self, "type") + yield UInt32(self, "attr_count") + for index in xrange(self["attr_count"].value): + yield Attribute(self, "attr[]") + + def createDescription(self): + return "Item #%s: %s attributes" % (self["id"].value, self["attr_count"].value) + +class Items(FieldSet): + def createFields(self): + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield Item(self, "item[]") + +class EncryptedItem(FieldSet): + def createFields(self): + yield KeyringString(self, "display_name") + yield KeyringString(self, "secret") + yield TimestampUnix64(self, "mtime") + yield TimestampUnix64(self, "ctime") + yield KeyringString(self, "reserved[]") + for index in xrange(4): + yield UInt32(self, "reserved[]") + yield UInt32(self, "attr_count") + for index in xrange(self["attr_count"].value): + yield Attribute(self, "attr[]") + yield UInt32(self, "acl_count") + for index in xrange(self["acl_count"].value): + yield ACL(self, "acl[]") +# size = 8 # paddingSize((self.stream.size - self.current_size) // 8, 16) +# if size: +# yield NullBytes(self, "hash_padding", size, "16 bytes alignment") + +class EncryptedData(Parser): + PARSER_TAGS = { + "id": "gnomeencryptedkeyring", + "min_size": 16*8, + "description": u"Gnome encrypted keyring", + } + endian = BIG_ENDIAN + def validate(self): + return True + + def createFields(self): + yield RawBytes(self, "md5", 16) + while True: + size = (self.size - self.current_size) // 8 + if size < 77: + break + yield EncryptedItem(self, "item[]") + size = paddingSize(self.current_size // 8, 16) + if size: + yield NullBytes(self, "padding_align", size) + +class GnomeKeyring(Parser): + MAGIC = "GnomeKeyring\n\r\0\n" + PARSER_TAGS = { + "id": "gnomekeyring", + "category": "misc", + "magic": ((MAGIC, 0),), + "min_size": 47*8, + "description": u"Gnome keyring", + } + CRYPTO_NAMES = { + 0: u"AEL", + } + HASH_NAMES = { + 0: u"MD5", + } + + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return u"Invalid magic string" + return True + + def createFields(self): + yield String(self, "magic", len(self.MAGIC), 'Magic string (%r)' % self.MAGIC, charset="ASCII") + yield UInt8(self, "major_version") + yield UInt8(self, "minor_version") + yield Enum(UInt8(self, "crypto"), self.CRYPTO_NAMES) + yield Enum(UInt8(self, "hash"), self.HASH_NAMES) + yield KeyringString(self, "keyring_name") + yield TimestampUnix64(self, "mtime") + yield TimestampUnix64(self, "ctime") + yield Bit(self, "lock_on_idle") + yield NullBits(self, "reserved[]", 31, "Reserved for future flags") + yield UInt32(self, "lock_timeout") + yield UInt32(self, "hash_iterations") + yield RawBytes(self, "salt", 8) + yield NullBytes(self, "reserved[]", 16) + yield Items(self, "items") + yield UInt32(self, "encrypted_size") + yield Deflate(SubFile(self, "encrypted", self["encrypted_size"].value, "AES128 CBC", parser_class=EncryptedData)) + +def generate_key(password, salt, hash_iterations): + sha = sha256(password+salt) + for index in xrange(hash_iterations-1): + sha = sha256(sha) + return sha[:16], sha[16:] + diff --git a/libs/hachoir_parser/misc/hlp.py b/libs/hachoir_parser/misc/hlp.py new file mode 100644 index 0000000..167dc7a --- /dev/null +++ b/libs/hachoir_parser/misc/hlp.py @@ -0,0 +1,76 @@ +""" +Microsoft Windows Help (HLP) parser for Hachoir project. + +Documents: +- Windows Help File Format / Annotation File Format / SHG and MRB File Format + written by M. Winterhoff (100326.2776@compuserve.com) + found on http://www.wotsit.org/ + +Author: Victor Stinner +Creation date: 2007-09-03 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Bits, Int32, UInt16, UInt32, + NullBytes, RawBytes, PaddingBytes, String) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import (textHandler, hexadecimal, + displayHandler, humanFilesize) + +class FileEntry(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["res_space"].value * 8 + + def createFields(self): + yield displayHandler(UInt32(self, "res_space", "Reserved space"), humanFilesize) + yield displayHandler(UInt32(self, "used_space", "Used space"), humanFilesize) + yield Bits(self, "file_flags", 8, "(=4)") + + yield textHandler(UInt16(self, "magic"), hexadecimal) + yield Bits(self, "flags", 16) + yield displayHandler(UInt16(self, "page_size", "Page size in bytes"), humanFilesize) + yield String(self, "structure", 16, strip="\0", charset="ASCII") + yield NullBytes(self, "zero", 2) + yield UInt16(self, "nb_page_splits", "Number of page splits B+ tree has suffered") + yield UInt16(self, "root_page", "Page number of B+ tree root page") + yield PaddingBytes(self, "one", 2, pattern="\xFF") + yield UInt16(self, "nb_page", "Number of B+ tree pages") + yield UInt16(self, "nb_level", "Number of levels of B+ tree") + yield UInt16(self, "nb_entry", "Number of entries in B+ tree") + + size = (self.size - self.current_size)//8 + if size: + yield PaddingBytes(self, "reserved_space", size) + +class HlpFile(Parser): + PARSER_TAGS = { + "id": "hlp", + "category": "misc", + "file_ext": ("hlp",), + "min_size": 32, + "description": "Microsoft Windows Help (HLP)", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self["magic"].value != 0x00035F3F: + return "Invalid magic" + if self["filesize"].value != self.stream.size//8: + return "Invalid magic" + return True + + def createFields(self): + yield textHandler(UInt32(self, "magic"), hexadecimal) + yield UInt32(self, "dir_start", "Directory start") + yield Int32(self, "first_free_block", "First free block") + yield UInt32(self, "filesize", "File size in bytes") + + yield self.seekByte(self["dir_start"].value) + yield FileEntry(self, "file[]") + + size = (self.size - self.current_size)//8 + if size: + yield RawBytes(self, "end", size) + diff --git a/libs/hachoir_parser/misc/lnk.py b/libs/hachoir_parser/misc/lnk.py new file mode 100644 index 0000000..3844d37 --- /dev/null +++ b/libs/hachoir_parser/misc/lnk.py @@ -0,0 +1,613 @@ +""" +Windows Shortcut (.lnk) parser. + +Documents: +- The Windows Shortcut File Format (document version 1.0) + Reverse-engineered by Jesse Hager + http://www.i2s-lab.com/Papers/The_Windows_Shortcut_File_Format.pdf +- Wine source code: + http://source.winehq.org/source/include/shlobj.h (SHELL_LINK_DATA_FLAGS enum) + http://source.winehq.org/source/dlls/shell32/pidl.h +- Microsoft: + http://msdn2.microsoft.com/en-us/library/ms538128.aspx + +Author: Robert Xiao, Victor Stinner + +Changes: + 2007-06-27 - Robert Xiao + * Fixes to FileLocationInfo to correctly handle Unicode paths + 2007-06-13 - Robert Xiao + * ItemID, FileLocationInfo and ExtraInfo structs, correct Unicode string handling + 2007-03-15 - Victor Stinner + * Creation of the parser +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + CString, String, + UInt32, UInt16, UInt8, + Bit, Bits, PaddingBits, + TimestampWin64, DateTimeMSDOS32, + NullBytes, PaddingBytes, RawBytes, Enum) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.common.win32 import GUID +from hachoir_parser.common.msdos import MSDOSFileAttr16, MSDOSFileAttr32 +from hachoir_core.text_handler import filesizeHandler + +from hachoir_core.tools import paddingSize + +class ItemIdList(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = (self["size"].value+2) * 8 + + def createFields(self): + yield UInt16(self, "size", "Size of item ID list") + while True: + item = ItemId(self, "itemid[]") + yield item + if not item["length"].value: + break + +class ItemId(FieldSet): + ITEM_TYPE = { + 0x1F: "GUID", + 0x23: "Drive", + 0x25: "Drive", + 0x29: "Drive", + 0x2E: "Shell Extension", + 0x2F: "Drive", + 0x30: "Dir/File", + 0x31: "Directory", + 0x32: "File", + 0x34: "File [Unicode Name]", + 0x41: "Workgroup", + 0x42: "Computer", + 0x46: "Net Provider", + 0x47: "Whole Network", + 0x4C: "Web Folder", + 0x61: "MSITStore", + 0x70: "Printer/RAS Connection", + 0xB1: "History/Favorite", + 0xC3: "Network Share", + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + if self["length"].value: + self._size = self["length"].value * 8 + else: + self._size = 16 + + def createFields(self): + yield UInt16(self, "length", "Length of Item ID Entry") + if not self["length"].value: + return + + yield Enum(UInt8(self, "type"),self.ITEM_TYPE) + entrytype=self["type"].value + if entrytype in (0x1F, 0x70): + # GUID + yield RawBytes(self, "dummy", 1, "should be 0x50") + yield GUID(self, "guid") + + elif entrytype == 0x2E: + # Shell extension + yield RawBytes(self, "dummy", 1, "should be 0x50") + if self["dummy"].value == '\0': + yield UInt16(self, "length_data", "Length of shell extension-specific data") + if self["length_data"].value: + yield RawBytes(self, "data", self["length_data"].value, "Shell extension-specific data") + yield GUID(self, "handler_guid") + yield GUID(self, "guid") + + elif entrytype in (0x23, 0x25, 0x29, 0x2F): + # Drive + yield String(self, "drive", self["length"].value-3, strip="\0") + + elif entrytype in (0x30, 0x31, 0x32, 0x61, 0xb1): + yield RawBytes(self, "dummy", 1, "should be 0x00") + yield UInt32(self, "size", "size of file; 0 for folders") + yield DateTimeMSDOS32(self, "date_time", "File/folder date and time") + yield MSDOSFileAttr16(self, "attribs", "File/folder attributes") + yield CString(self, "name", "File/folder name") + if self.root.hasUnicodeNames(): + # Align to 2-bytes + n = paddingSize(self.current_size//8, 2) + if n: + yield PaddingBytes(self, "pad", n) + + yield UInt16(self, "length_w", "Length of wide struct member") + yield RawBytes(self, "unknown[]", 6) + yield DateTimeMSDOS32(self, "creation_date_time", "File/folder creation date and time") + yield DateTimeMSDOS32(self, "access_date_time", "File/folder last access date and time") + yield RawBytes(self, "unknown[]", 2) + yield UInt16(self, "length_next", "Length of next two strings (if zero, ignore this field)") + yield CString(self, "unicode_name", "File/folder name", charset="UTF-16-LE") + if self["length_next"].value: + yield CString(self, "localized_name", "Localized name") + yield RawBytes(self, "unknown[]", 2) + else: + yield CString(self, "name_short", "File/folder short name") + + elif entrytype in (0x41, 0x42, 0x46): + yield RawBytes(self, "unknown[]", 2) + yield CString(self, "name") + yield CString(self, "protocol") + yield RawBytes(self, "unknown[]", 2) + + elif entrytype == 0x47: + # Whole Network + yield RawBytes(self, "unknown[]", 2) + yield CString(self, "name") + + elif entrytype == 0xC3: + # Network Share + yield RawBytes(self, "unknown[]", 2) + yield CString(self, "name") + yield CString(self, "protocol") + yield CString(self, "description") + yield RawBytes(self, "unknown[]", 2) + + elif entrytype == 0x4C: + # Web Folder + yield RawBytes(self, "unknown[]", 5) + yield TimestampWin64(self, "modification_time") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + yield LnkString(self, "name") + yield RawBytes(self, "padding[]", 2) + yield LnkString(self, "address") + if self["address/length"].value: + yield RawBytes(self, "padding[]", 2) + + else: + yield RawBytes(self, "raw", self["length"].value-3) + + def createDescription(self): + if self["length"].value: + return "Item ID Entry: "+self.ITEM_TYPE.get(self["type"].value,"Unknown") + else: + return "End of Item ID List" + +def formatVolumeSerial(field): + val = field.value + return '%04X-%04X'%(val>>16, val&0xFFFF) + +class LocalVolumeTable(FieldSet): + VOLUME_TYPE={ + 1: "No root directory", + 2: "Removable (Floppy, Zip, etc.)", + 3: "Fixed (Hard disk)", + 4: "Remote (Network drive)", + 5: "CD-ROM", + 6: "Ram drive", + } + + def createFields(self): + yield UInt32(self, "length", "Length of this structure") + yield Enum(UInt32(self, "volume_type", "Volume Type"),self.VOLUME_TYPE) + yield textHandler(UInt32(self, "volume_serial", "Volume Serial Number"), formatVolumeSerial) + + yield UInt32(self, "label_offset", "Offset to volume label") + padding = self.seekByte(self["label_offset"].value) + if padding: + yield padding + yield CString(self, "drive") + + def hasValue(self): + return bool(self["drive"].value) + + def createValue(self): + return self["drive"].value + +class NetworkVolumeTable(FieldSet): + def createFields(self): + yield UInt32(self, "length", "Length of this structure") + yield UInt32(self, "unknown[]") + yield UInt32(self, "share_name_offset", "Offset to share name") + yield UInt32(self, "unknown[]") + yield UInt32(self, "unknown[]") + padding = self.seekByte(self["share_name_offset"].value) + if padding: + yield padding + yield CString(self, "share_name") + + def createValue(self): + return self["share_name"].value + +class FileLocationInfo(FieldSet): + def createFields(self): + yield UInt32(self, "length", "Length of this structure") + if not self["length"].value: + return + + yield UInt32(self, "first_offset_pos", "Position of first offset") + has_unicode_paths = (self["first_offset_pos"].value == 0x24) + yield Bit(self, "on_local_volume") + yield Bit(self, "on_network_volume") + yield PaddingBits(self, "reserved[]", 30) + yield UInt32(self, "local_info_offset", "Offset to local volume table; only meaningful if on_local_volume = 1") + yield UInt32(self, "local_pathname_offset", "Offset to local base pathname; only meaningful if on_local_volume = 1") + yield UInt32(self, "remote_info_offset", "Offset to network volume table; only meaningful if on_network_volume = 1") + yield UInt32(self, "pathname_offset", "Offset of remaining pathname") + if has_unicode_paths: + yield UInt32(self, "local_pathname_unicode_offset", "Offset to Unicode version of local base pathname; only meaningful if on_local_volume = 1") + yield UInt32(self, "pathname_unicode_offset", "Offset to Unicode version of remaining pathname") + if self["on_local_volume"].value: + padding = self.seekByte(self["local_info_offset"].value) + if padding: + yield padding + yield LocalVolumeTable(self, "local_volume_table", "Local Volume Table") + + padding = self.seekByte(self["local_pathname_offset"].value) + if padding: + yield padding + yield CString(self, "local_base_pathname", "Local Base Pathname") + if has_unicode_paths: + padding = self.seekByte(self["local_pathname_unicode_offset"].value) + if padding: + yield padding + yield CString(self, "local_base_pathname_unicode", "Local Base Pathname in Unicode", charset="UTF-16-LE") + + if self["on_network_volume"].value: + padding = self.seekByte(self["remote_info_offset"].value) + if padding: + yield padding + yield NetworkVolumeTable(self, "network_volume_table") + + padding = self.seekByte(self["pathname_offset"].value) + if padding: + yield padding + yield CString(self, "final_pathname", "Final component of the pathname") + + if has_unicode_paths: + padding = self.seekByte(self["pathname_unicode_offset"].value) + if padding: + yield padding + yield CString(self, "final_pathname_unicode", "Final component of the pathname in Unicode", charset="UTF-16-LE") + + padding=self.seekByte(self["length"].value) + if padding: + yield padding + +class LnkString(FieldSet): + def createFields(self): + yield UInt16(self, "length", "Length of this string") + if self["length"].value: + if self.root.hasUnicodeNames(): + yield String(self, "data", self["length"].value*2, charset="UTF-16-LE") + else: + yield String(self, "data", self["length"].value, charset="ASCII") + + def createValue(self): + if self["length"].value: + return self["data"].value + else: + return "" + +class ColorRef(FieldSet): + ''' COLORREF struct, 0x00bbggrr ''' + static_size=32 + def createFields(self): + yield UInt8(self, "red", "Red") + yield UInt8(self, "green", "Green") + yield UInt8(self, "blue", "Blue") + yield PaddingBytes(self, "pad", 1, "Padding (must be 0)") + def createDescription(self): + rgb = self["red"].value, self["green"].value, self["blue"].value + return "RGB Color: #%02X%02X%02X" % rgb + +class ColorTableIndex(Bits): + def __init__(self, parent, name, size, description=None): + Bits.__init__(self, parent, name, size, None) + self.desc=description + def createDescription(self): + assert hasattr(self, 'parent') and hasattr(self, 'value') + return "%s: %s"%(self.desc, + self.parent["color[%i]"%self.value].description) + +class ExtraInfo(FieldSet): + INFO_TYPE={ + 0xA0000001: "Link Target Information", # EXP_SZ_LINK_SIG + 0xA0000002: "Console Window Properties", # NT_CONSOLE_PROPS_SIG + 0xA0000003: "Hostname and Other Stuff", + 0xA0000004: "Console Codepage Information", # NT_FE_CONSOLE_PROPS_SIG + 0xA0000005: "Special Folder Info", # EXP_SPECIAL_FOLDER_SIG + 0xA0000006: "DarwinID (Windows Installer ID) Information", # EXP_DARWIN_ID_SIG + 0xA0000007: "Custom Icon Details", # EXP_LOGO3_ID_SIG or EXP_SZ_ICON_SIG + } + SPECIAL_FOLDER = { + 0: "DESKTOP", + 1: "INTERNET", + 2: "PROGRAMS", + 3: "CONTROLS", + 4: "PRINTERS", + 5: "PERSONAL", + 6: "FAVORITES", + 7: "STARTUP", + 8: "RECENT", + 9: "SENDTO", + 10: "BITBUCKET", + 11: "STARTMENU", + 16: "DESKTOPDIRECTORY", + 17: "DRIVES", + 18: "NETWORK", + 19: "NETHOOD", + 20: "FONTS", + 21: "TEMPLATES", + 22: "COMMON_STARTMENU", + 23: "COMMON_PROGRAMS", + 24: "COMMON_STARTUP", + 25: "COMMON_DESKTOPDIRECTORY", + 26: "APPDATA", + 27: "PRINTHOOD", + 28: "LOCAL_APPDATA", + 29: "ALTSTARTUP", + 30: "COMMON_ALTSTARTUP", + 31: "COMMON_FAVORITES", + 32: "INTERNET_CACHE", + 33: "COOKIES", + 34: "HISTORY", + 35: "COMMON_APPDATA", + 36: "WINDOWS", + 37: "SYSTEM", + 38: "PROGRAM_FILES", + 39: "MYPICTURES", + 40: "PROFILE", + 41: "SYSTEMX86", + 42: "PROGRAM_FILESX86", + 43: "PROGRAM_FILES_COMMON", + 44: "PROGRAM_FILES_COMMONX86", + 45: "COMMON_TEMPLATES", + 46: "COMMON_DOCUMENTS", + 47: "COMMON_ADMINTOOLS", + 48: "ADMINTOOLS", + 49: "CONNECTIONS", + 53: "COMMON_MUSIC", + 54: "COMMON_PICTURES", + 55: "COMMON_VIDEO", + 56: "RESOURCES", + 57: "RESOURCES_LOCALIZED", + 58: "COMMON_OEM_LINKS", + 59: "CDBURN_AREA", + 61: "COMPUTERSNEARME", + } + BOOL_ENUM = { + 0: "False", + 1: "True", + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + if self["length"].value: + self._size = self["length"].value * 8 + else: + self._size = 32 + + def createFields(self): + yield UInt32(self, "length", "Length of this structure") + if not self["length"].value: + return + + yield Enum(textHandler(UInt32(self, "signature", "Signature determining the function of this structure"),hexadecimal),self.INFO_TYPE) + + if self["signature"].value == 0xA0000003: + # Hostname and Other Stuff + yield UInt32(self, "remaining_length") + yield UInt32(self, "unknown[]") + yield String(self, "hostname", 16, "Computer hostname on which shortcut was last modified", strip="\0") + yield RawBytes(self, "unknown[]", 32) + yield RawBytes(self, "unknown[]", 32) + + elif self["signature"].value == 0xA0000005: + # Special Folder Info + yield Enum(UInt32(self, "special_folder_id", "ID of the special folder"),self.SPECIAL_FOLDER) + yield UInt32(self, "offset", "Offset to Item ID entry") + + elif self["signature"].value in (0xA0000001, 0xA0000006, 0xA0000007): + if self["signature"].value == 0xA0000001: # Link Target Information + object_name="target" + elif self["signature"].value == 0xA0000006: # DarwinID (Windows Installer ID) Information + object_name="darwinID" + else: # Custom Icon Details + object_name="icon_path" + yield CString(self, object_name, "Data (ASCII format)", charset="ASCII") + remaining = self["length"].value - self.current_size/8 - 260*2 # 260*2 = size of next part + if remaining: + yield RawBytes(self, "slack_space[]", remaining, "Data beyond end of string") + yield CString(self, object_name+'_unicode', "Data (Unicode format)", charset="UTF-16-LE", truncate="\0") + remaining = self["length"].value - self.current_size/8 + if remaining: + yield RawBytes(self, "slack_space[]", remaining, "Data beyond end of string") + + elif self["signature"].value == 0xA0000002: + # Console Window Properties + yield ColorTableIndex(self, "color_text", 4, "Screen text color index") + yield ColorTableIndex(self, "color_bg", 4, "Screen background color index") + yield NullBytes(self, "reserved[]", 1) + yield ColorTableIndex(self, "color_popup_text", 4, "Pop-up text color index") + yield ColorTableIndex(self, "color_popup_bg", 4, "Pop-up background color index") + yield NullBytes(self, "reserved[]", 1) + yield UInt16(self, "buffer_width", "Screen buffer width (character cells)") + yield UInt16(self, "buffer_height", "Screen buffer height (character cells)") + yield UInt16(self, "window_width", "Window width (character cells)") + yield UInt16(self, "window_height", "Window height (character cells)") + yield UInt16(self, "position_left", "Window distance from left edge (screen coords)") + yield UInt16(self, "position_top", "Window distance from top edge (screen coords)") + yield UInt32(self, "font_number") + yield UInt32(self, "input_buffer_size") + yield UInt16(self, "font_width", "Font width in pixels; 0 for a non-raster font") + yield UInt16(self, "font_height", "Font height in pixels; equal to the font size for non-raster fonts") + yield UInt32(self, "font_family") + yield UInt32(self, "font_weight") + yield String(self, "font_name_unicode", 64, "Font Name (Unicode format)", charset="UTF-16-LE", truncate="\0") + yield UInt32(self, "cursor_size", "Relative size of cursor (% of character size)") + yield Enum(UInt32(self, "full_screen", "Run console in full screen?"), self.BOOL_ENUM) + yield Enum(UInt32(self, "quick_edit", "Console uses quick-edit feature (using mouse to cut & paste)?"), self.BOOL_ENUM) + yield Enum(UInt32(self, "insert_mode", "Console uses insertion mode?"), self.BOOL_ENUM) + yield Enum(UInt32(self, "auto_position", "System automatically positions window?"), self.BOOL_ENUM) + yield UInt32(self, "history_size", "Size of the history buffer (in lines)") + yield UInt32(self, "history_count", "Number of history buffers (each process gets one up to this limit)") + yield Enum(UInt32(self, "history_no_dup", "Automatically eliminate duplicate lines in the history buffer?"), self.BOOL_ENUM) + for index in xrange(16): + yield ColorRef(self, "color[]") + + elif self["signature"].value == 0xA0000004: + # Console Codepage Information + yield UInt32(self, "codepage", "Console's code page") + + else: + yield RawBytes(self, "raw", self["length"].value-self.current_size/8) + + def createDescription(self): + if self["length"].value: + return "Extra Info Entry: "+self["signature"].display + else: + return "End of Extra Info" + +HOT_KEYS = { + 0x00: u'None', + 0x13: u'Pause', + 0x14: u'Caps Lock', + 0x21: u'Page Up', + 0x22: u'Page Down', + 0x23: u'End', + 0x24: u'Home', + 0x25: u'Left', + 0x26: u'Up', + 0x27: u'Right', + 0x28: u'Down', + 0x2d: u'Insert', + 0x2e: u'Delete', + 0x6a: u'Num *', + 0x6b: u'Num +', + 0x6d: u'Num -', + 0x6e: u'Num .', + 0x6f: u'Num /', + 0x90: u'Num Lock', + 0x91: u'Scroll Lock', + 0xba: u';', + 0xbb: u'=', + 0xbc: u',', + 0xbd: u'-', + 0xbe: u'.', + 0xbf: u'/', + 0xc0: u'`', + 0xdb: u'[', + 0xdc: u'\\', + 0xdd: u']', + 0xde: u"'", +} + +def text_hot_key(field): + assert hasattr(field, "value") + val=field.value + if 0x30 <= val <= 0x39: + return unichr(val) + elif 0x41 <= val <= 0x5A: + return unichr(val) + elif 0x60 <= val <= 0x69: + return u'Numpad %c' % unichr(val-0x30) + elif 0x70 <= val <= 0x87: + return 'F%i'%(val-0x6F) + elif val in HOT_KEYS: + return HOT_KEYS[val] + return str(val) + +class LnkFile(Parser): + MAGIC = "\x4C\0\0\0\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00\x46" + PARSER_TAGS = { + "id": "lnk", + "category": "misc", + "file_ext": ("lnk",), + "mime": (u"application/x-ms-shortcut",), + "magic": ((MAGIC, 0),), + "min_size": len(MAGIC)*8, # signature + guid = 20 bytes + "description": "Windows Shortcut (.lnk)", + } + endian = LITTLE_ENDIAN + + SHOW_WINDOW_STATE = { + 0: u"Hide", + 1: u"Show Normal", + 2: u"Show Minimized", + 3: u"Show Maximized", + 4: u"Show Normal, not activated", + 5: u"Show", + 6: u"Minimize", + 7: u"Show Minimized, not activated", + 8: u"Show, not activated", + 9: u"Restore", + 10: u"Show Default", + } + + def validate(self): + if self["signature"].value != 0x0000004C: + return "Invalid signature" + if self["guid"].value != "00021401-0000-0000-C000-000000000046": + return "Invalid GUID" + return True + + def hasUnicodeNames(self): + return self["has_unicode_names"].value + + def createFields(self): + yield UInt32(self, "signature", "Shortcut signature (0x0000004C)") + yield GUID(self, "guid", "Shortcut GUID (00021401-0000-0000-C000-000000000046)") + + yield Bit(self, "has_shell_id", "Is the Item ID List present?") + yield Bit(self, "target_is_file", "Is a file or a directory?") + yield Bit(self, "has_description", "Is the Description field present?") + yield Bit(self, "has_rel_path", "Is the relative path to the target available?") + yield Bit(self, "has_working_dir", "Is there a working directory?") + yield Bit(self, "has_cmd_line_args", "Are there any command line arguments?") + yield Bit(self, "has_custom_icon", "Is there a custom icon?") + yield Bit(self, "has_unicode_names", "Are Unicode names used?") + yield Bit(self, "force_no_linkinfo") + yield Bit(self, "has_exp_sz") + yield Bit(self, "run_in_separate") + yield Bit(self, "has_logo3id", "Is LOGO3 ID info present?") + yield Bit(self, "has_darwinid", "Is the DarwinID info present?") + yield Bit(self, "runas_user", "Is the target run as another user?") + yield Bit(self, "has_exp_icon_sz", "Is custom icon information available?") + yield Bit(self, "no_pidl_alias") + yield Bit(self, "force_unc_name") + yield Bit(self, "run_with_shim_layer") + yield PaddingBits(self, "reserved[]", 14, "Flag bits reserved for future use") + + yield MSDOSFileAttr32(self, "target_attr") + + yield TimestampWin64(self, "creation_time") + yield TimestampWin64(self, "modification_time") + yield TimestampWin64(self, "last_access_time") + yield filesizeHandler(UInt32(self, "target_filesize")) + yield UInt32(self, "icon_number") + yield Enum(UInt32(self, "show_window"), self.SHOW_WINDOW_STATE) + yield textHandler(UInt8(self, "hot_key", "Hot key used for quick access"),text_hot_key) + yield Bit(self, "hot_key_shift", "Hot key: is Shift used?") + yield Bit(self, "hot_key_ctrl", "Hot key: is Ctrl used?") + yield Bit(self, "hot_key_alt", "Hot key: is Alt used?") + yield PaddingBits(self, "hot_key_reserved", 21, "Hot key: (reserved)") + yield NullBytes(self, "reserved[]", 8) + + if self["has_shell_id"].value: + yield ItemIdList(self, "item_idlist", "Item ID List") + if self["target_is_file"].value: + yield FileLocationInfo(self, "file_location_info", "File Location Info") + if self["has_description"].value: + yield LnkString(self, "description") + if self["has_rel_path"].value: + yield LnkString(self, "relative_path", "Relative path to target") + if self["has_working_dir"].value: + yield LnkString(self, "working_dir", "Working directory (dir to start target in)") + if self["has_cmd_line_args"].value: + yield LnkString(self, "cmd_line_args", "Command Line Arguments") + if self["has_custom_icon"].value: + yield LnkString(self, "custom_icon", "Custom Icon Path") + + while not self.eof: + yield ExtraInfo(self, "extra_info[]") + diff --git a/libs/hachoir_parser/misc/msoffice.py b/libs/hachoir_parser/misc/msoffice.py new file mode 100644 index 0000000..90ca1ca --- /dev/null +++ b/libs/hachoir_parser/misc/msoffice.py @@ -0,0 +1,131 @@ +""" +Parsers for the different streams and fragments found in an OLE2 file. + +Documents: + - goffice source code + +Author: Robert Xiao, Victor Stinner +Creation: 2006-04-23 +""" + +from hachoir_parser import HachoirParser +from hachoir_core.field import FieldSet, RootSeekableFieldSet, RawBytes +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.stream import StringInputStream +from hachoir_parser.misc.msoffice_summary import SummaryFieldSet, CompObj +from hachoir_parser.misc.word_doc import WordDocumentFieldSet + +PROPERTY_NAME = { + u"\5DocumentSummaryInformation": "doc_summary", + u"\5SummaryInformation": "summary", + u"WordDocument": "word_doc", +} + +class OfficeRootEntry(HachoirParser, RootSeekableFieldSet): + PARSER_TAGS = { + "description": "Microsoft Office document subfragments", + } + endian = LITTLE_ENDIAN + + def __init__(self, stream, **args): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + HachoirParser.__init__(self, stream, **args) + + def validate(self): + return True + + def createFields(self): + for index, property in enumerate(self.ole2.properties): + if index == 0: + continue + try: + name = PROPERTY_NAME[property["name"].value] + except LookupError: + name = property.name+"content" + for field in self.parseProperty(index, property, name): + yield field + + def parseProperty(self, property_index, property, name_prefix): + ole2 = self.ole2 + if not property["size"].value: + return + if property["size"].value >= ole2["header/threshold"].value: + return + name = "%s[]" % name_prefix + first = None + previous = None + size = 0 + start = property["start"].value + chain = ole2.getChain(start, True) + blocksize = ole2.ss_size + desc_format = "Small blocks %s..%s (%s)" + while True: + try: + block = chain.next() + contiguous = False + if not first: + first = block + contiguous = True + if previous and block == (previous+1): + contiguous = True + if contiguous: + previous = block + size += blocksize + continue + except StopIteration: + block = None + self.seekSBlock(first) + desc = desc_format % (first, previous, previous-first+1) + size = min(size, property["size"].value*8) + if name_prefix in ("summary", "doc_summary"): + yield SummaryFieldSet(self, name, desc, size=size) + elif name_prefix == "word_doc": + yield WordDocumentFieldSet(self, name, desc, size=size) + elif property_index == 1: + yield CompObj(self, "comp_obj", desc, size=size) + else: + yield RawBytes(self, name, size//8, desc) + if block is None: + break + first = block + previous = block + size = ole2.sector_size + + def seekSBlock(self, block): + self.seekBit(block * self.ole2.ss_size) + +class FragmentGroup: + def __init__(self, parser): + self.items = [] + self.parser = parser + + def add(self, item): + self.items.append(item) + + def createInputStream(self): + # FIXME: Use lazy stream creation + data = [] + for item in self.items: + data.append( item["rawdata"].value ) + data = "".join(data) + + # FIXME: Use smarter code to send arguments + args = {"ole2": self.items[0].root} + tags = {"class": self.parser, "args": args} + tags = tags.iteritems() + return StringInputStream(data, "", tags=tags) + +class CustomFragment(FieldSet): + def __init__(self, parent, name, size, parser, description=None, group=None): + FieldSet.__init__(self, parent, name, description, size=size) + if not group: + group = FragmentGroup(parser) + self.group = group + self.group.add(self) + + def createFields(self): + yield RawBytes(self, "rawdata", self.size//8) + + def _createInputStream(self, **args): + return self.group.createInputStream() + diff --git a/libs/hachoir_parser/misc/msoffice_summary.py b/libs/hachoir_parser/misc/msoffice_summary.py new file mode 100644 index 0000000..dd3234a --- /dev/null +++ b/libs/hachoir_parser/misc/msoffice_summary.py @@ -0,0 +1,377 @@ +""" +Microsoft Document summaries structures. + +Documents +--------- + + - Apache POI (HPSF Internals): + http://poi.apache.org/hpsf/internals.html +""" +from hachoir_parser import HachoirParser +from hachoir_core.field import (FieldSet, ParserError, + RootSeekableFieldSet, SeekableFieldSet, + Bit, Bits, NullBits, + UInt8, UInt16, UInt32, TimestampWin64, TimedeltaWin64, Enum, + Bytes, RawBytes, NullBytes, String, + Int8, Int32, Float32, Float64, PascalString32) +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.tools import createDict +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_parser.common.win32 import GUID, PascalStringWin32, CODEPAGE_CHARSET +from hachoir_parser.image.bmp import BmpHeader, parseImageData + +MAX_SECTION_COUNT = 100 + +OS_MAC = 1 +OS_NAME = { + 0: "Windows 16-bit", + 1: "Macintosh", + 2: "Windows 32-bit", +} + +class OSConfig: + def __init__(self, big_endian): + if big_endian: + self.charset = "MacRoman" + self.utf16 = "UTF-16-BE" + else: + # FIXME: Don't guess the charset, use ISO-8859-1 or UTF-8 + #self.charset = "ISO-8859-1" + self.charset = None + self.utf16 = "UTF-16-LE" + +class PropertyIndex(FieldSet): + TAG_CODEPAGE = 1 + + COMMON_PROPERTY = { + 0: "Dictionary", + 1: "CodePage", + 0x80000000: "LOCALE_SYSTEM_DEFAULT", + 0x80000003: "CASE_SENSITIVE", + } + + DOCUMENT_PROPERTY = { + 2: "Category", + 3: "PresentationFormat", + 4: "NumBytes", + 5: "NumLines", + 6: "NumParagraphs", + 7: "NumSlides", + 8: "NumNotes", + 9: "NumHiddenSlides", + 10: "NumMMClips", + 11: "Scale", + 12: "HeadingPairs", + 13: "DocumentParts", + 14: "Manager", + 15: "Company", + 16: "LinksDirty", + 17: "DocSumInfo_17", + 18: "DocSumInfo_18", + 19: "DocSumInfo_19", + 20: "DocSumInfo_20", + 21: "DocSumInfo_21", + 22: "DocSumInfo_22", + 23: "DocSumInfo_23", + } + DOCUMENT_PROPERTY.update(COMMON_PROPERTY) + + COMPONENT_PROPERTY = { + 2: "Title", + 3: "Subject", + 4: "Author", + 5: "Keywords", + 6: "Comments", + 7: "Template", + 8: "LastSavedBy", + 9: "RevisionNumber", + 10: "TotalEditingTime", + 11: "LastPrinted", + 12: "CreateTime", + 13: "LastSavedTime", + 14: "NumPages", + 15: "NumWords", + 16: "NumCharacters", + 17: "Thumbnail", + 18: "AppName", + 19: "Security", + } + COMPONENT_PROPERTY.update(COMMON_PROPERTY) + + def createFields(self): + if self["../.."].name.startswith("doc_summary"): + enum = self.DOCUMENT_PROPERTY + else: + enum = self.COMPONENT_PROPERTY + yield Enum(UInt32(self, "id"), enum) + yield UInt32(self, "offset") + + def createDescription(self): + return "Property: %s" % self["id"].display + +class Bool(Int8): + def createValue(self): + value = Int8.createValue(self) + return (value == -1) + +class Thumbnail(FieldSet): + """ + Thumbnail. + + Documents: + - See Jakarta POI + http://jakarta.apache.org/poi/hpsf/thumbnails.html + http://www.penguin-soft.com/penguin/developer/poi/ + org/apache/poi/hpsf/Thumbnail.html#CF_BITMAP + - How To Extract Thumbnail Images + http://sparks.discreet.com/knowledgebase/public/ + solutions/ExtractThumbnailImg.htm + """ + FORMAT_CLIPBOARD = -1 + FORMAT_NAME = { + -1: "Windows clipboard", + -2: "Macintosh clipboard", + -3: "GUID that contains format identifier", + 0: "No data", + 2: "Bitmap", + 3: "Windows metafile format", + 8: "Device Independent Bitmap (DIB)", + 14: "Enhanced Windows metafile", + } + + DIB_BMP = 8 + DIB_FORMAT = { + 2: "Bitmap Obsolete (old BMP)", + 3: "Windows metafile format (WMF)", + 8: "Device Independent Bitmap (BMP)", + 14: "Enhanced Windows metafile (EMF)", + } + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + + def createFields(self): + yield filesizeHandler(UInt32(self, "size")) + yield Enum(Int32(self, "format"), self.FORMAT_NAME) + if self["format"].value == self.FORMAT_CLIPBOARD: + yield Enum(UInt32(self, "dib_format"), self.DIB_FORMAT) + if self["dib_format"].value == self.DIB_BMP: + yield BmpHeader(self, "bmp_header") + size = (self.size - self.current_size) // 8 + yield parseImageData(self, "pixels", size, self["bmp_header"]) + return + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "data", size) + +class PropertyContent(FieldSet): + TYPE_LPSTR = 30 + TYPE_INFO = { + 0: ("EMPTY", None), + 1: ("NULL", None), + 2: ("UInt16", UInt16), + 3: ("UInt32", UInt32), + 4: ("Float32", Float32), + 5: ("Float64", Float64), + 6: ("CY", None), + 7: ("DATE", None), + 8: ("BSTR", None), + 9: ("DISPATCH", None), + 10: ("ERROR", None), + 11: ("BOOL", Bool), + 12: ("VARIANT", None), + 13: ("UNKNOWN", None), + 14: ("DECIMAL", None), + 16: ("I1", None), + 17: ("UI1", None), + 18: ("UI2", None), + 19: ("UI4", None), + 20: ("I8", None), + 21: ("UI8", None), + 22: ("INT", None), + 23: ("UINT", None), + 24: ("VOID", None), + 25: ("HRESULT", None), + 26: ("PTR", None), + 27: ("SAFEARRAY", None), + 28: ("CARRAY", None), + 29: ("USERDEFINED", None), + 30: ("LPSTR", PascalString32), + 31: ("LPWSTR", PascalString32), + 64: ("FILETIME", TimestampWin64), + 65: ("BLOB", None), + 66: ("STREAM", None), + 67: ("STORAGE", None), + 68: ("STREAMED_OBJECT", None), + 69: ("STORED_OBJECT", None), + 70: ("BLOB_OBJECT", None), + 71: ("THUMBNAIL", Thumbnail), + 72: ("CLSID", None), + 0x1000: ("Vector", None), + } + TYPE_NAME = createDict(TYPE_INFO, 0) + + def createFields(self): + self.osconfig = self.parent.osconfig + if True: + yield Enum(Bits(self, "type", 12), self.TYPE_NAME) + yield Bit(self, "is_vector") + yield NullBits(self, "padding", 32-12-1) + else: + yield Enum(Bits(self, "type", 32), self.TYPE_NAME) + tag = self["type"].value + kw = {} + try: + handler = self.TYPE_INFO[tag][1] + if handler == PascalString32: + osconfig = self.osconfig + if tag == self.TYPE_LPSTR: + kw["charset"] = osconfig.charset + else: + kw["charset"] = osconfig.utf16 + elif handler == TimestampWin64: + if self.description == "TotalEditingTime": + handler = TimedeltaWin64 + except LookupError: + handler = None + if not handler: + raise ParserError("OLE2: Unable to parse property of type %s" \ + % self["type"].display) + if self["is_vector"].value: + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield handler(self, "item[]", **kw) + else: + yield handler(self, "value", **kw) + self.createValue = lambda: self["value"].value +PropertyContent.TYPE_INFO[12] = ("VARIANT", PropertyContent) + +class SummarySection(SeekableFieldSet): + def __init__(self, *args): + SeekableFieldSet.__init__(self, *args) + self._size = self["size"].value * 8 + + def createFields(self): + self.osconfig = self.parent.osconfig + yield UInt32(self, "size") + yield UInt32(self, "property_count") + for index in xrange(self["property_count"].value): + yield PropertyIndex(self, "property_index[]") + for index in xrange(self["property_count"].value): + findex = self["property_index[%u]" % index] + self.seekByte(findex["offset"].value) + field = PropertyContent(self, "property[]", findex["id"].display) + yield field + if not self.osconfig.charset \ + and findex['id'].value == PropertyIndex.TAG_CODEPAGE: + codepage = field['value'].value + if codepage in CODEPAGE_CHARSET: + self.osconfig.charset = CODEPAGE_CHARSET[codepage] + else: + self.warning("Unknown codepage: %r" % codepage) + +class SummaryIndex(FieldSet): + static_size = 20*8 + def createFields(self): + yield String(self, "name", 16) + yield UInt32(self, "offset") + +class BaseSummary: + endian = LITTLE_ENDIAN + + def __init__(self): + if self["endian"].value == "\xFF\xFE": + self.endian = BIG_ENDIAN + elif self["endian"].value == "\xFE\xFF": + self.endian = LITTLE_ENDIAN + else: + raise ParserError("OLE2: Invalid endian value") + self.osconfig = OSConfig(self["os_type"].value == OS_MAC) + + def createFields(self): + yield Bytes(self, "endian", 2, "Endian (0xFF 0xFE for Intel)") + yield UInt16(self, "format", "Format (0)") + yield UInt8(self, "os_version") + yield UInt8(self, "os_revision") + yield Enum(UInt16(self, "os_type"), OS_NAME) + yield GUID(self, "format_id") + yield UInt32(self, "section_count") + if MAX_SECTION_COUNT < self["section_count"].value: + raise ParserError("OLE2: Too much sections (%s)" % self["section_count"].value) + + section_indexes = [] + for index in xrange(self["section_count"].value): + section_index = SummaryIndex(self, "section_index[]") + yield section_index + section_indexes.append(section_index) + + for section_index in section_indexes: + self.seekByte(section_index["offset"].value) + yield SummarySection(self, "section[]") + + size = (self.size - self.current_size) // 8 + if 0 < size: + yield NullBytes(self, "end_padding", size) + +class SummaryParser(BaseSummary, HachoirParser, RootSeekableFieldSet): + PARSER_TAGS = { + "description": "Microsoft Office summary", + } + + def __init__(self, stream, **kw): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + HachoirParser.__init__(self, stream, **kw) + BaseSummary.__init__(self) + + def validate(self): + return True + +class SummaryFieldSet(BaseSummary, FieldSet): + def __init__(self, parent, name, description=None, size=None): + FieldSet.__init__(self, parent, name, description=description, size=size) + BaseSummary.__init__(self) + +class CompObj(FieldSet): + OS_VERSION = { + 0x0a03: "Windows 3.1", + } + def createFields(self): + # Header + yield UInt16(self, "version", "Version (=1)") + yield textHandler(UInt16(self, "endian", "Endian (0xFF 0xFE for Intel)"), hexadecimal) + yield UInt8(self, "os_version") + yield UInt8(self, "os_revision") + yield Enum(UInt16(self, "os_type"), OS_NAME) + yield Int32(self, "unused", "(=-1)") + yield GUID(self, "clsid") + + # User type + yield PascalString32(self, "user_type", strip="\0") + + # Clipboard format + if self["os_type"].value == OS_MAC: + yield Int32(self, "unused[]", "(=-2)") + yield String(self, "clipboard_format", 4) + else: + yield PascalString32(self, "clipboard_format", strip="\0") + if self.current_size == self.size: + return + + #-- OLE 2.01 --- + + # Program ID + yield PascalString32(self, "prog_id", strip="\0") + + if self["os_type"].value != OS_MAC: + # Magic number + yield textHandler(UInt32(self, "magic", "Magic number (0x71B239F4)"), hexadecimal) + + # Unicode version + yield PascalStringWin32(self, "user_type_unicode", strip="\0") + yield PascalStringWin32(self, "clipboard_format_unicode", strip="\0") + yield PascalStringWin32(self, "prog_id_unicode", strip="\0") + + size = (self.size - self.current_size) // 8 + if size: + yield NullBytes(self, "end_padding", size) + diff --git a/libs/hachoir_parser/misc/ole2.py b/libs/hachoir_parser/misc/ole2.py new file mode 100644 index 0000000..112b22b --- /dev/null +++ b/libs/hachoir_parser/misc/ole2.py @@ -0,0 +1,367 @@ +""" +Microsoft Office documents parser. + +Informations: +* wordole.c of AntiWord program (v0.35) + Copyright (C) 1998-2003 A.J. van Os + Released under GNU GPL + http://www.winfield.demon.nl/ +* File gsf-infile-msole.c of libgsf library (v1.14.0) + Copyright (C) 2002-2004 Jody Goldberg (jody@gnome.org) + Released under GNU LGPL 2.1 + http://freshmeat.net/projects/libgsf/ +* PDF from AAF Association + Copyright (C) 2004 AAF Association + Copyright (C) 1991-2003 Microsoft Corporation + http://www.aafassociation.org/html/specs/aafcontainerspec-v1.0.1.pdf + +Author: Victor Stinner +Creation: 2006-04-23 +""" + +from hachoir_parser import HachoirParser +from hachoir_core.field import ( + FieldSet, ParserError, SeekableFieldSet, RootSeekableFieldSet, + UInt8, UInt16, UInt32, UInt64, TimestampWin64, Enum, + Bytes, RawBytes, NullBytes, String) +from hachoir_core.text_handler import filesizeHandler +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_parser.common.win32 import GUID +from hachoir_parser.misc.msoffice import CustomFragment, OfficeRootEntry, PROPERTY_NAME +from hachoir_parser.misc.word_doc import WordDocumentParser +from hachoir_parser.misc.msoffice_summary import SummaryParser + +MIN_BIG_BLOCK_LOG2 = 6 # 512 bytes +MAX_BIG_BLOCK_LOG2 = 14 # 64 kB + +# Number of items in DIFAT +NB_DIFAT = 109 + +class SECT(UInt32): + UNUSED = 0xFFFFFFFF # -1 + END_OF_CHAIN = 0xFFFFFFFE # -2 + BFAT_SECTOR = 0xFFFFFFFD # -3 + DIFAT_SECTOR = 0xFFFFFFFC # -4 + SPECIALS = set((END_OF_CHAIN, UNUSED, BFAT_SECTOR, DIFAT_SECTOR)) + + special_value_name = { + UNUSED: "unused", + END_OF_CHAIN: "end of a chain", + BFAT_SECTOR: "BFAT sector (in a FAT)", + DIFAT_SECTOR: "DIFAT sector (in a FAT)", + } + + def __init__(self, parent, name, description=None): + UInt32.__init__(self, parent, name, description) + + def createDisplay(self): + val = self.value + return SECT.special_value_name.get(val, str(val)) + +class Property(FieldSet): + TYPE_ROOT = 5 + TYPE_NAME = { + 1: "storage", + 2: "stream", + 3: "ILockBytes", + 4: "IPropertyStorage", + 5: "root" + } + DECORATOR_NAME = { + 0: "red", + 1: "black", + } + static_size = 128 * 8 + + def createFields(self): + bytes = self.stream.readBytes(self.absolute_address, 4) + if bytes == "\0R\0\0": + charset = "UTF-16-BE" + else: + charset = "UTF-16-LE" + yield String(self, "name", 64, charset=charset, truncate="\0") + yield UInt16(self, "namelen", "Length of the name") + yield Enum(UInt8(self, "type", "Property type"), self.TYPE_NAME) + yield Enum(UInt8(self, "decorator", "Decorator"), self.DECORATOR_NAME) + yield SECT(self, "left") + yield SECT(self, "right") + yield SECT(self, "child", "Child node (valid for storage and root types)") + yield GUID(self, "clsid", "CLSID of this storage (valid for storage and root types)") + yield NullBytes(self, "flags", 4, "User flags") + yield TimestampWin64(self, "creation", "Creation timestamp(valid for storage and root types)") + yield TimestampWin64(self, "lastmod", "Modify timestamp (valid for storage and root types)") + yield SECT(self, "start", "Starting SECT of the stream (valid for stream and root types)") + if self["/header/bb_shift"].value == 9: + yield filesizeHandler(UInt32(self, "size", "Size in bytes (valid for stream and root types)")) + yield NullBytes(self, "padding", 4) + else: + yield filesizeHandler(UInt64(self, "size", "Size in bytes (valid for stream and root types)")) + + def createDescription(self): + name = self["name"].display + size = self["size"].display + return "Property: %s (%s)" % (name, size) + +class DIFat(SeekableFieldSet): + def __init__(self, parent, name, db_start, db_count, description=None): + SeekableFieldSet.__init__(self, parent, name, description) + self.start=db_start + self.count=db_count + + def createFields(self): + for index in xrange(NB_DIFAT): + yield SECT(self, "index[%u]" % index) + + for index in xrange(self.count): + # this is relative to real DIFAT start + self.seekBit(NB_DIFAT * SECT.static_size+self.parent.sector_size*(self.start+index)) + for sect_index in xrange(NB_DIFAT*(index+1),NB_DIFAT*(index+2)): + yield SECT(self, "index[%u]" % sect_index) + +class Header(FieldSet): + static_size = 68 * 8 + def createFields(self): + yield GUID(self, "clsid", "16 bytes GUID used by some apps") + yield UInt16(self, "ver_min", "Minor version") + yield UInt16(self, "ver_maj", "Minor version") + yield Bytes(self, "endian", 2, "Endian (0xFFFE for Intel)") + yield UInt16(self, "bb_shift", "Log, base 2, of the big block size") + yield UInt16(self, "sb_shift", "Log, base 2, of the small block size") + yield NullBytes(self, "reserved[]", 6, "(reserved)") + yield UInt32(self, "csectdir", "Number of SECTs in directory chain for 4 KB sectors (version 4)") + yield UInt32(self, "bb_count", "Number of Big Block Depot blocks") + yield SECT(self, "bb_start", "Root start block") + yield NullBytes(self, "transaction", 4, "Signature used for transactions (must be zero)") + yield UInt32(self, "threshold", "Maximum size for a mini stream (typically 4096 bytes)") + yield SECT(self, "sb_start", "Small Block Depot start block") + yield UInt32(self, "sb_count") + yield SECT(self, "db_start", "First block of DIFAT") + yield UInt32(self, "db_count", "Number of SECTs in DIFAT") + +# Header (ole_id, header, difat) size in bytes +HEADER_SIZE = 64 + Header.static_size + NB_DIFAT * SECT.static_size + +class SectFat(FieldSet): + def __init__(self, parent, name, start, count, description=None): + FieldSet.__init__(self, parent, name, description, size=count*32) + self.count = count + self.start = start + + def createFields(self): + for i in xrange(self.start, self.start + self.count): + yield SECT(self, "index[%u]" % i) + +class OLE2_File(HachoirParser, RootSeekableFieldSet): + PARSER_TAGS = { + "id": "ole2", + "category": "misc", + "file_ext": ( + "doc", "dot", # Microsoft Word + "ppt", "ppz", "pps", "pot", # Microsoft Powerpoint + "xls", "xla", # Microsoft Excel + "msi", # Windows installer + ), + "mime": ( + u"application/msword", + u"application/msexcel", + u"application/mspowerpoint", + ), + "min_size": 512*8, + "description": "Microsoft Office document", + "magic": (("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", 0),), + } + endian = LITTLE_ENDIAN + + def __init__(self, stream, **args): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + HachoirParser.__init__(self, stream, **args) + + def validate(self): + if self["ole_id"].value != "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1": + return "Invalid magic" + if self["header/ver_maj"].value not in (3, 4): + return "Unknown major version (%s)" % self["header/ver_maj"].value + if self["header/endian"].value not in ("\xFF\xFE", "\xFE\xFF"): + return "Unknown endian (%s)" % self["header/endian"].raw_display + if not(MIN_BIG_BLOCK_LOG2 <= self["header/bb_shift"].value <= MAX_BIG_BLOCK_LOG2): + return "Invalid (log 2 of) big block size (%s)" % self["header/bb_shift"].value + if self["header/bb_shift"].value < self["header/sb_shift"].value: + return "Small block size (log2=%s) is bigger than big block size (log2=%s)!" \ + % (self["header/sb_shift"].value, self["header/bb_shift"].value) + return True + + def createFields(self): + # Signature + yield Bytes(self, "ole_id", 8, "OLE object signature") + + header = Header(self, "header") + yield header + + # Configure values + self.sector_size = (8 << header["bb_shift"].value) + self.fat_count = header["bb_count"].value + self.items_per_bbfat = self.sector_size / SECT.static_size + self.ss_size = (8 << header["sb_shift"].value) + self.items_per_ssfat = self.items_per_bbfat + + # Read DIFAT (one level of indirection) + yield DIFat(self, "difat", header["db_start"].value, header["db_count"].value, "Double Indirection FAT") + + # Read FAT (one level of indirection) + for field in self.readBFAT(): + yield field + + # Read SFAT + for field in self.readSFAT(): + yield field + + # Read properties + chain = self.getChain(self["header/bb_start"].value) + prop_per_sector = self.sector_size // Property.static_size + self.properties = [] + for block in chain: + self.seekBlock(block) + for index in xrange(prop_per_sector): + property = Property(self, "property[]") + yield property + self.properties.append(property) + + # Parse first property + for index, property in enumerate(self.properties): + if index == 0: + name = "root" + else: + try: + name = PROPERTY_NAME[property["name"].value] + except LookupError: + name = property.name+"content" + for field in self.parseProperty(property, name): + yield field + + def parseProperty(self, property, name_prefix): + if not property["size"].value: + return + if property.name != "property[0]" \ + and (property["size"].value < self["header/threshold"].value): + # Field is stored in the ministream, skip it + return + name = "%s[]" % name_prefix + first = None + previous = None + size = 0 + fragment_group = None + chain = self.getChain(property["start"].value) + while True: + try: + block = chain.next() + contiguous = False + if not first: + first = block + contiguous = True + if previous and block == (previous+1): + contiguous = True + if contiguous: + previous = block + size += self.sector_size + continue + except StopIteration: + block = None + if first is None: + break + self.seekBlock(first) + desc = "Big blocks %s..%s (%s)" % (first, previous, previous-first+1) + desc += " of %s bytes" % (self.sector_size // 8) + if name_prefix in set(("root", "summary", "doc_summary", "word_doc")): + if name_prefix == "root": + parser = OfficeRootEntry + elif name_prefix == "word_doc": + parser = WordDocumentParser + else: + parser = SummaryParser + field = CustomFragment(self, name, size, parser, desc, fragment_group) + yield field + if not fragment_group: + fragment_group = field.group + else: + yield RawBytes(self, name, size//8, desc) + if block is None: + break + first = block + previous = block + size = self.sector_size + + def getChain(self, start, use_sfat=False): + if use_sfat: + fat = self.ss_fat + items_per_fat = self.items_per_ssfat + err_prefix = "SFAT chain" + else: + fat = self.bb_fat + items_per_fat = self.items_per_bbfat + err_prefix = "BFAT chain" + block = start + block_set = set() + previous = block + while block != SECT.END_OF_CHAIN: + if block in SECT.SPECIALS: + raise ParserError("%s: Invalid block index (0x%08x), previous=%s" % (err_prefix, block, previous)) + if block in block_set: + raise ParserError("%s: Found a loop (%s=>%s)" % (err_prefix, previous, block)) + block_set.add(block) + yield block + previous = block + index = block // items_per_fat + try: + block = fat[index]["index[%u]" % block].value + except LookupError: + break + + def readBFAT(self): + self.bb_fat = [] + start = 0 + count = self.items_per_bbfat + for index, block in enumerate(self.array("difat/index")): + block = block.value + if block == SECT.UNUSED: + break + + desc = "FAT %u/%u at block %u" % \ + (1+index, self["header/bb_count"].value, block) + + self.seekBlock(block) + field = SectFat(self, "bbfat[]", start, count, desc) + yield field + self.bb_fat.append(field) + + start += count + + def readSFAT(self): + chain = self.getChain(self["header/sb_start"].value) + start = 0 + self.ss_fat = [] + count = self.items_per_ssfat + for index, block in enumerate(chain): + self.seekBlock(block) + field = SectFat(self, "sfat[]", \ + start, count, \ + "SFAT %u/%u at block %u" % \ + (1+index, self["header/sb_count"].value, block)) + yield field + self.ss_fat.append(field) + start += count + + def createContentSize(self): + max_block = 0 + for fat in self.array("bbfat"): + for entry in fat: + block = entry.value + if block not in SECT.SPECIALS: + max_block = max(block, max_block) + if max_block in SECT.SPECIALS: + return None + else: + return HEADER_SIZE + (max_block+1) * self.sector_size + + def seekBlock(self, block): + self.seekBit(HEADER_SIZE + block * self.sector_size) + diff --git a/libs/hachoir_parser/misc/pcf.py b/libs/hachoir_parser/misc/pcf.py new file mode 100644 index 0000000..8d116bb --- /dev/null +++ b/libs/hachoir_parser/misc/pcf.py @@ -0,0 +1,170 @@ +""" +X11 Portable Compiled Font (pcf) parser. + +Documents: +- Format for X11 pcf bitmap font files + http://fontforge.sourceforge.net/pcf-format.html + (file is based on the X11 sources) + +Author: Victor Stinner +Creation date: 2007-03-20 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, Enum, + UInt8, UInt32, Bytes, RawBytes, NullBytes, + Bit, Bits, PaddingBits, CString) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.tools import paddingSize + +class TOC(FieldSet): + TYPE_NAME = { + 0x00000001: "Properties", + 0x00000002: "Accelerators", + 0x00000004: "Metrics", + 0x00000008: "Bitmaps", + 0x00000010: "Ink metrics", + 0x00000020: "BDF encodings", + 0x00000040: "SWidths", + 0x00000080: "Glyph names", + 0x00000100: "BDF accelerators", + } + + FORMAT_NAME = { + 0x00000000: "Default", + 0x00000200: "Ink bounds", + 0x00000100: "Accelerator W ink bounds", +# 0x00000200: "Compressed metrics", + } + + def createFields(self): + yield Enum(UInt32(self, "type"), self.TYPE_NAME) + yield UInt32(self, "format") + yield filesizeHandler(UInt32(self, "size")) + yield UInt32(self, "offset") + + def createDescription(self): + return "%s at %s (%s)" % ( + self["type"].display, self["offset"].value, self["size"].display) + +class PropertiesFormat(FieldSet): + static_size = 32 + endian = LITTLE_ENDIAN + def createFields(self): + yield Bits(self, "reserved[]", 2) + yield Bit(self, "byte_big_endian") + yield Bit(self, "bit_big_endian") + yield Bits(self, "scan_unit", 2) + yield textHandler(PaddingBits(self, "reserved[]", 26), hexadecimal) + +class Property(FieldSet): + def createFields(self): + yield UInt32(self, "name_offset") + yield UInt8(self, "is_string") + yield UInt32(self, "value_offset") + + def createDescription(self): + # FIXME: Use link or any better way to read name value + name = self["../name[%s]" % (self.index-2)].value + return "Property %s" % name + +class GlyphNames(FieldSet): + def __init__(self, parent, name, toc, description, size=None): + FieldSet.__init__(self, parent, name, description, size=size) + self.toc = toc + if self["format/byte_big_endian"].value: + self.endian = BIG_ENDIAN + else: + self.endian = LITTLE_ENDIAN + + def createFields(self): + yield PropertiesFormat(self, "format") + yield UInt32(self, "count") + offsets = [] + for index in xrange(self["count"].value): + offset = UInt32(self, "offset[]") + yield offset + offsets.append(offset.value) + yield UInt32(self, "total_str_length") + offsets.sort() + offset0 = self.current_size // 8 + for offset in offsets: + padding = self.seekByte(offset0+offset) + if padding: + yield padding + yield CString(self, "name[]") + padding = (self.size - self.current_size) // 8 + if padding: + yield NullBytes(self, "end_padding", padding) + +class Properties(GlyphNames): + def createFields(self): + yield PropertiesFormat(self, "format") + yield UInt32(self, "nb_prop") + properties = [] + for index in xrange(self["nb_prop"].value): + property = Property(self, "property[]") + yield property + properties.append(property) + padding = paddingSize(self.current_size//8, 4) + if padding: + yield NullBytes(self, "padding", padding) + yield UInt32(self, "total_str_length") + properties.sort(key=lambda entry: entry["name_offset"].value) + offset0 = self.current_size // 8 + for property in properties: + padding = self.seekByte(offset0+property["name_offset"].value) + if padding: + yield padding + yield CString(self, "name[]", "Name of %s" % property.name) + if property["is_string"].value: + yield CString(self, "value[]", "Value of %s" % property.name) + padding = (self.size - self.current_size) // 8 + if padding: + yield NullBytes(self, "end_padding", padding) + +class PcfFile(Parser): + MAGIC = "\1fcp" + PARSER_TAGS = { + "id": "pcf", + "category": "misc", + "file_ext": ("pcf",), + "magic": ((MAGIC, 0),), + "min_size": 32, # FIXME + "description": "X11 Portable Compiled Font (pcf)", + } + endian = LITTLE_ENDIAN + + def validate(self): + if self["signature"].value != self.MAGIC: + return "Invalid signature" + return True + + def createFields(self): + yield Bytes(self, "signature", 4, r'File signature ("\1pcf")') + yield UInt32(self, "nb_toc") + entries = [] + for index in xrange(self["nb_toc"].value): + entry = TOC(self, "toc[]") + yield entry + entries.append(entry) + entries.sort(key=lambda entry: entry["offset"].value) + for entry in entries: + size = entry["size"].value + padding = self.seekByte(entry["offset"].value) + if padding: + yield padding + maxsize = (self.size-self.current_size)//8 + if maxsize < size: + self.warning("Truncate content of %s to %s bytes (was %s)" % (entry.path, maxsize, size)) + size = maxsize + if not size: + continue + if entry["type"].value == 1: + yield Properties(self, "properties", entry, "Properties", size=size*8) + elif entry["type"].value == 128: + yield GlyphNames(self, "glyph_names", entry, "Glyph names", size=size*8) + else: + yield RawBytes(self, "data[]", size, "Content of %s" % entry.path) + diff --git a/libs/hachoir_parser/misc/pdf.py b/libs/hachoir_parser/misc/pdf.py new file mode 100644 index 0000000..e69ba85 --- /dev/null +++ b/libs/hachoir_parser/misc/pdf.py @@ -0,0 +1,442 @@ +""" +Adobe Portable Document Format (PDF) parser. + +Author: Christophe Gisquet +""" + +from hachoir_parser import Parser +from hachoir_core.field import ( + Field, FieldSet, + ParserError, + GenericVector, + UInt8, UInt16, UInt32, + String, + RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +MAGIC = "%PDF-" +ENDMAGIC = "%%EOF" + +def getLineEnd(s, pos=None): + if pos == None: + pos = (s.absolute_address+s.current_size)//8 + end = s.stream.searchBytesLength("\x0D", False, 8*pos) + other_end = s.stream.searchBytesLength("\x0A", False, 8*pos) + if end == None or (other_end != None and other_end < end): + return other_end + return end + +# TODO: rewrite to account for all possible terminations: ' ', '/', '\0XD' +# But this probably requires changing *ALL* of the places they are used, +# as ' ' is swallowed but not the others +def getElementEnd(s, limit=' ', offset=0): + addr = s.absolute_address+s.current_size + addr += 8*offset + pos = s.stream.searchBytesLength(limit, True, addr) + if pos == None: + #s.info("Can't find '%s' starting at %u" % (limit, addr)) + return None + return pos + +class PDFNumber(Field): + LIMITS = ['[', '/', '\x0D', ']'] + """ + sprintf("%i") or sprinf("%.?f") + """ + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, description=desc) + # Get size + size = getElementEnd(parent) + for limit in self.LIMITS: + other_size = getElementEnd(parent, limit) + if other_size != None: + other_size -= 1 + if size == None or other_size < size: + size = other_size + + self._size = 8*size + + # Get value + val = parent.stream.readBytes(self.absolute_address, size) + self.info("Number: size=%u value='%s'" % (size, val)) + if val.find('.') != -1: + self.createValue = lambda: float(val) + else: + self.createValue = lambda: int(val) + +class PDFString(Field): + """ + A string of the shape: + ( This string \ + uses 3 lines \ + with the CR(LF) inhibited ) + """ + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, description=desc) + val = "" + count = 1 + off = 1 + while not parent.eof: + char = parent.stream.readBytes(self.absolute_address+8*off, 1) + # Non-ASCII + if not char.isalpha() or char == '\\': + off += 1 + continue + if char == '(': + count += 1 + if char == ')': + count -= 1 + # Parenthesis block = 0 => end of string + if count == 0: + off += 1 + break + + # Add it to the string + val += char + + self._size = 8*off + self.createValue = lambda: val + +class PDFName(Field): + LIMITS = ['[', '/', '<', ']'] + """ + String starting with '/', where characters may be written using their + ASCII code (exemple: '#20' would be ' ' + ' ', ']' and '\0' are supposed not to be part of the name + """ + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, description=desc) + if parent.stream.readBytes(self.absolute_address, 1) != '/': + raise ParserError("Unknown PDFName '%s'" % + parent.stream.readBytes(self.absolute_address, 10)) + size = getElementEnd(parent, offset=1) + #other_size = getElementEnd(parent, '[')-1 + #if size == None or (other_size != None and other_size < size): + # size = other_size + for limit in self.LIMITS: + other_size = getElementEnd(parent, limit, 1) + if other_size != None: + other_size -= 1 + if size == None or other_size < size: + #self.info("New size: %u" % other_size) + size = other_size + + self._size = 8*(size+1) + # Value should be without the initial '/' and final ' ' + self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, size).strip(' ') + +class PDFID(Field): + """ + Not described as an object, but let's do as it was. + This ID has the shape + """ + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, description=desc) + self._size = 8*getElementEnd(parent, '>') + self.createValue = lambda: parent.stream.readBytes(self.absolute_address+8, (self._size//8)-1) + +class NotABool(Exception): pass +class PDFBool(Field): + """ + "true" or "false" string standing for the boolean value + """ + def __init__(self, parent, name, desc=None): + Field.__init__(self, parent, name, description=desc) + if parent.stream.readBytes(self.absolute_address, 4) == "true": + self._size = 4 + self.createValue = lambda: True + elif parent.stream.readBytes(self.absolute_address, 5) == "false": + self._size = 5 + self.createValue = lambda: False + raise NotABool + +class LineEnd(FieldSet): + """ + Made of 0x0A, 0x0D (we may include several line ends) + """ + def createFields(self): + while not self.eof: + addr = self.absolute_address+self.current_size + char = self.stream.readBytes(addr, 1) + if char == '\x0A': + yield UInt8(self, "lf", "Line feed") + elif char == '\x0D': + yield UInt8(self, "cr", "Line feed") + else: + self.info("Line ends at %u/%u, len %u" % + (addr, self.stream._size, self.current_size)) + break + +class PDFDictionaryPair(FieldSet): + def createFields(self): + yield PDFName(self, "name", getElementEnd(self)) + for field in parsePDFType(self): + yield field + +class PDFDictionary(FieldSet): + def createFields(self): + yield String(self, "dict_start", 2) + while not self.eof: + addr = self.absolute_address+self.current_size + if self.stream.readBytes(addr, 2) != '>>': + for field in parsePDFType(self): + yield field + else: + break + yield String(self, "dict_end", 2) + +class PDFArray(FieldSet): + """ + Array of possibly non-homogeneous elements, starting with '[' and ending + with ']' + """ + def createFields(self): + yield String(self, "array_start", 1) + while self.stream.readBytes(self.absolute_address+self.current_size, 1) != ']': + for field in parsePDFType(self): + yield field + yield String(self, "array_end", 1) + +def parsePDFType(s): + addr = s.absolute_address+s.current_size + char = s.stream.readBytes(addr, 1) + if char == '/': + yield PDFName(s, "type[]", getElementEnd(s)) + elif char == '<': + if s.stream.readBytes(addr+8, 1) == '<': + yield PDFDictionary(s, "dict[]") + else: + yield PDFID(s, "id[]") + elif char == '(': + yield PDFString(s, "string[]") + elif char == '[': + yield PDFArray(s, "array[]") + else: + # First parse size + size = getElementEnd(s) + for limit in ['/', '>', '<']: + other_size = getElementEnd(s, limit) + if other_size != None: + other_size -= 1 + if size == None or (other_size>0 and other_size < size): + size = other_size + + # Get element + name = s.stream.readBytes(addr, size) + char = s.stream.readBytes(addr+8*size+8, 1) + if name.count(' ') > 1 and char == '<': + # Probably a catalog + yield Catalog(s, "catalog[]") + elif name[0] in ('.','-','+', '0', '1', '2', '3', \ + '4', '5', '6', '7', '8', '9'): + s.info("Not a catalog: %u spaces and end='%s'" % (name.count(' '), char)) + yield PDFNumber(s, "integer[]") + else: + s.info("Trying to parse '%s': %u bytes" % \ + (s.stream.readBytes(s.absolute_address+s.current_size, 4), size)) + yield String(s, "unknown[]", size) + +class Header(FieldSet): + def createFields(self): + yield String(self, "marker", 5, MAGIC) + length = getLineEnd(self, 4) + if length != None: + #self.info("Found at position %08X" % len) + yield String(self, "version", length-1) + yield LineEnd(self, "line_end") + else: + self.warning("Can't determine version!") + def createDescription(self): + return "PDF version %s" % self["version"].display + +class Body(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + pos = self.stream.searchBytesLength(CrossReferenceTable.MAGIC, False) + if pos == None: + raise ParserError("Can't find xref starting at %u" % + (self.absolute_address//8)) + self._size = 8*pos-self.absolute_address + + def createFields(self): + while self.stream.readBytes(self.absolute_address+self.current_size, 1) == '%': + size = getLineEnd(self, 4) + if size == 2: + yield textHandler(UInt16(self, "crc32"), hexadecimal) + elif size == 4: + yield textHandler(UInt32(self, "crc32"), hexadecimal) + elif self.stream.readBytes(self.absolute_address+self.current_size, size).isalpha(): + yield String(self, "comment[]", size) + else: + RawBytes(self, "unknown_data[]", size) + yield LineEnd(self, "line_end[]") + + #abs_offset = self.current_size//8 + # TODO: yield objects that read offsets and deduce size from + # "/cross_ref_table/sub_section[]/entries/item[]" + offsets = [] + for subsection in self.array("/cross_ref_table/sub_section"): + for obj in subsection.array("entries/item"): + if "byte_offset" in obj: + # Could be inserted already sorted + offsets.append(obj["byte_offset"].value) + + offsets.append(self["/cross_ref_table"].absolute_address//8) + offsets.sort() + for index in xrange(len(offsets)-1): + yield Catalog(self, "object[]", size=offsets[index+1]-offsets[index]) + +class Entry(FieldSet): + static_size = 20*8 + def createFields(self): + typ = self.stream.readBytes(self.absolute_address+17*8, 1) + if typ == 'n': + yield PDFNumber(self, "byte_offset") + elif typ == 'f': + yield PDFNumber(self, "next_free_object_number") + else: + yield PDFNumber(self, "unknown_string") + yield PDFNumber(self, "generation_number") + yield UInt8(self, "type") + yield LineEnd(self, "line_end") + def createDescription(self): + if self["type"].value == 'n': + return "In-use entry at offset %u" % int(self["byte_offset"].value) + elif self["type"].value == 'f': + return "Free entry before in-use object %u" % \ + int(self["next_free_object_number"].value) + else: + return "unknown %s" % self["unknown_string"].value + +class SubSection(FieldSet): + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, desc) + self.info("Got entry count: '%s'" % self["entry_count"].value) + self._size = self.current_size + 8*20*int(self["entry_count"].value) \ + + self["line_end"].size + + def createFields(self): + yield PDFNumber(self, "start_number", + "Object number of first entry in subsection") + self.info("start_number = %i" % self["start_number"].value) + + yield PDFNumber(self, "entry_count", "Number of entries in subsection") + self.info("entry_count = %i" % self["entry_count"].value) + yield LineEnd(self, "line_end") + yield GenericVector(self, "entries", int(self["entry_count"].value), + Entry) + #yield LineEnd(self, "line_end[]") + def createDescription(self): + return "Subsection with %s elements, starting at %s" % \ + (self["entry_count"].value, self["start_number"]) + +class CrossReferenceTable(FieldSet): + MAGIC = "xref" + + def __init__(self, parent, name, desc=None): + FieldSet.__init__(self, parent, name, description=desc) + pos = self.stream.searchBytesLength(Trailer.MAGIC, False) + if pos == None: + raise ParserError("Can't find '%s' starting at %u" \ + (Trailer.MAGIC, self.absolute_address//8)) + self._size = 8*pos-self.absolute_address + + def createFields(self): + yield RawBytes(self, "marker", len(self.MAGIC)) + yield LineEnd(self, "line_end[]") + while not self.eof: + yield SubSection(self, "sub_section[]") + +class Catalog(FieldSet): + END_NAME = ['<', '/', '['] + def __init__(self, parent, name, size=None, desc=None): + FieldSet.__init__(self, parent, name, description=desc) + if size != None: + self._size = 8*size + # object catalogs are ended with "obj" + elif self["object"].value == "obj": + size = self.stream.searchBytesLength("endobj", False) + if size != None: + self._size = 8*(size+2) + def createFields(self): + yield PDFNumber(self, "index") + yield PDFNumber(self, "unknown[]") + length = getElementEnd(self) + for limit in self.END_NAME: + new_length = getElementEnd(self, limit)-len(limit) + if length == None or (new_length != None and new_length < length): + length = new_length + yield String(self, "object", length, strip=' ') + if self.stream.readBytes(self.absolute_address+self.current_size, 2) == '<<': + yield PDFDictionary(self, "key_list") + # End of catalog: this one has "endobj" + if self["object"].value == "obj": + yield LineEnd(self, "line_end[]") + yield String(self, "end_object", len("endobj")) + yield LineEnd(self, "line_end[]") + +class Trailer(FieldSet): + MAGIC = "trailer" + def createFields(self): + yield RawBytes(self, "marker", len(self.MAGIC)) + yield LineEnd(self, "line_end[]") + yield String(self, "start_attribute_marker", 2) + addr = self.absolute_address + self.current_size + while self.stream.readBytes(addr, 2) != '>>': + t = PDFName(self, "type[]") + yield t + name = t.value + self.info("Parsing PDFName '%s'" % name) + if name == "Size": + yield PDFNumber(self, "size", "Entries in the file cross-reference section") + elif name == "Prev": + yield PDFNumber(self, "offset") + elif name == "Root": + yield Catalog(self, "object_catalog") + elif name == "Info": + yield Catalog(self, "info") + elif name == "ID": + yield PDFArray(self, "id") + elif name == "Encrypt": + yield PDFDictionary(self, "decrypt") + else: + raise ParserError("Don't know trailer type '%s'" % name) + addr = self.absolute_address + self.current_size + yield String(self, "end_attribute_marker", 2) + yield LineEnd(self, "line_end[]") + yield String(self, "start_xref", 9) + yield LineEnd(self, "line_end[]") + yield PDFNumber(self, "cross_ref_table_start_address") + yield LineEnd(self, "line_end[]") + yield String(self, "end_marker", len(ENDMAGIC)) + yield LineEnd(self, "line_end[]") + +class PDFDocument(Parser): + endian = LITTLE_ENDIAN + PARSER_TAGS = { + "id": "pdf", + "category": "misc", + "file_ext": ("pdf",), + "mime": (u"application/pdf",), + "min_size": (5+4)*8, + "magic": ((MAGIC, 5),), + "description": "Portable Document Format (PDF) document" + } + + def validate(self): + if self.stream.readBytes(0, len(MAGIC)) != MAGIC: + return "Invalid magic string" + return True + + # Size is not always determined by position of "%%EOF": + # - updated documents have several of those + # - PDF files should be parsed from *end* + # => TODO: find when a document has been updated + + def createFields(self): + yield Header(self, "header") + yield Body(self, "body") + yield CrossReferenceTable(self, "cross_ref_table") + yield Trailer(self, "trailer") + diff --git a/libs/hachoir_parser/misc/pifv.py b/libs/hachoir_parser/misc/pifv.py new file mode 100644 index 0000000..d947473 --- /dev/null +++ b/libs/hachoir_parser/misc/pifv.py @@ -0,0 +1,241 @@ +""" +EFI Platform Initialization Firmware Volume parser. + +Author: Alexandre Boeglin +Creation date: 08 jul 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt16, UInt24, UInt32, UInt64, Enum, + CString, String, PaddingBytes, RawBytes, NullBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import paddingSize, humanFilesize +from hachoir_parser.common.win32 import GUID + +EFI_SECTION_COMPRESSION = 0x1 +EFI_SECTION_GUID_DEFINED = 0x2 +EFI_SECTION_PE32 = 0x10 +EFI_SECTION_PIC = 0x11 +EFI_SECTION_TE = 0x12 +EFI_SECTION_DXE_DEPEX = 0x13 +EFI_SECTION_VERSION = 0x14 +EFI_SECTION_USER_INTERFACE = 0x15 +EFI_SECTION_COMPATIBILITY16 = 0x16 +EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17 +EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18 +EFI_SECTION_RAW = 0x19 +EFI_SECTION_PEI_DEPEX = 0x1b + +EFI_SECTION_TYPE = { + EFI_SECTION_COMPRESSION: "Encapsulation section where other sections" \ + + " are compressed", + EFI_SECTION_GUID_DEFINED: "Encapsulation section where other sections" \ + + " have format defined by a GUID", + EFI_SECTION_PE32: "PE32+ Executable image", + EFI_SECTION_PIC: "Position-Independent Code", + EFI_SECTION_TE: "Terse Executable image", + EFI_SECTION_DXE_DEPEX: "DXE Dependency Expression", + EFI_SECTION_VERSION: "Version, Text and Numeric", + EFI_SECTION_USER_INTERFACE: "User-Friendly name of the driver", + EFI_SECTION_COMPATIBILITY16: "DOS-style 16-bit EXE", + EFI_SECTION_FIRMWARE_VOLUME_IMAGE: "PI Firmware Volume image", + EFI_SECTION_FREEFORM_SUBTYPE_GUID: "Raw data with GUID in header to" \ + + " define format", + EFI_SECTION_RAW: "Raw data", + EFI_SECTION_PEI_DEPEX: "PEI Dependency Expression", +} + +EFI_FV_FILETYPE_RAW = 0x1 +EFI_FV_FILETYPE_FREEFORM = 0x2 +EFI_FV_FILETYPE_SECURITY_CORE = 0x3 +EFI_FV_FILETYPE_PEI_CORE = 0x4 +EFI_FV_FILETYPE_DXE_CORE = 0x5 +EFI_FV_FILETYPE_PEIM = 0x6 +EFI_FV_FILETYPE_DRIVER = 0x7 +EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x8 +EFI_FV_FILETYPE_APPLICATION = 0x9 +EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0xb +EFI_FV_FILETYPE_FFS_PAD = 0xf0 + +EFI_FV_FILETYPE = { + EFI_FV_FILETYPE_RAW: "Binary data", + EFI_FV_FILETYPE_FREEFORM: "Sectioned data", + EFI_FV_FILETYPE_SECURITY_CORE: "Platform core code used during the SEC" \ + + " phase", + EFI_FV_FILETYPE_PEI_CORE: "PEI Foundation", + EFI_FV_FILETYPE_DXE_CORE: "DXE Foundation", + EFI_FV_FILETYPE_PEIM: "PEI module (PEIM)", + EFI_FV_FILETYPE_DRIVER: "DXE driver", + EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER: "Combined PEIM/DXE driver", + EFI_FV_FILETYPE_APPLICATION: "Application", + EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE: "Firmware volume image", + EFI_FV_FILETYPE_FFS_PAD: "Pad File For FFS", +} +for x in xrange(0xc0, 0xe0): + EFI_FV_FILETYPE[x] = "OEM File" +for x in xrange(0xe0, 0xf0): + EFI_FV_FILETYPE[x] = "Debug/Test File" +for x in xrange(0xf1, 0x100): + EFI_FV_FILETYPE[x] = "Firmware File System Specific File" + + +class BlockMap(FieldSet): + static_size = 8*8 + def createFields(self): + yield UInt32(self, "num_blocks") + yield UInt32(self, "len") + + def createDescription(self): + return "%d blocks of %s" % ( + self["num_blocks"].value, humanFilesize(self["len"].value)) + + +class FileSection(FieldSet): + COMPRESSION_TYPE = { + 0: 'Not Compressed', + 1: 'Standard Compression', + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["size"].value * 8 + section_type = self["type"].value + if section_type in (EFI_SECTION_DXE_DEPEX, EFI_SECTION_PEI_DEPEX): + # These sections can sometimes be longer than what their size + # claims! It's so nice to have so detailled specs and not follow + # them ... + if self.stream.readBytes(self.absolute_address + + self._size, 1) == '\0': + self._size = self._size + 16 + + def createFields(self): + # Header + yield UInt24(self, "size") + yield Enum(UInt8(self, "type"), EFI_SECTION_TYPE) + section_type = self["type"].value + + if section_type == EFI_SECTION_COMPRESSION: + yield UInt32(self, "uncomp_len") + yield Enum(UInt8(self, "comp_type"), self.COMPRESSION_TYPE) + elif section_type == EFI_SECTION_FREEFORM_SUBTYPE_GUID: + yield GUID(self, "sub_type_guid") + elif section_type == EFI_SECTION_GUID_DEFINED: + yield GUID(self, "section_definition_guid") + yield UInt16(self, "data_offset") + yield UInt16(self, "attributes") + elif section_type == EFI_SECTION_USER_INTERFACE: + yield CString(self, "file_name", charset="UTF-16-LE") + elif section_type == EFI_SECTION_VERSION: + yield UInt16(self, "build_number") + yield CString(self, "version", charset="UTF-16-LE") + + # Content + content_size = (self.size - self.current_size) // 8 + if content_size == 0: + return + + if section_type == EFI_SECTION_COMPRESSION: + compression_type = self["comp_type"].value + if compression_type == 1: + while not self.eof: + yield RawBytes(self, "compressed_content", content_size) + else: + while not self.eof: + yield FileSection(self, "section[]") + elif section_type == EFI_SECTION_FIRMWARE_VOLUME_IMAGE: + yield FirmwareVolume(self, "firmware_volume") + else: + yield RawBytes(self, "content", content_size, + EFI_SECTION_TYPE.get(self["type"].value, + "Unknown Section Type")) + + def createDescription(self): + return EFI_SECTION_TYPE.get(self["type"].value, + "Unknown Section Type") + + +class File(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = self["size"].value * 8 + + def createFields(self): + # Header + yield GUID(self, "name") + yield UInt16(self, "integrity_check") + yield Enum(UInt8(self, "type"), EFI_FV_FILETYPE) + yield UInt8(self, "attributes") + yield UInt24(self, "size") + yield UInt8(self, "state") + + # Content + while not self.eof: + yield FileSection(self, "section[]") + + def createDescription(self): + return "%s: %s containing %d section(s)" % ( + self["name"].value, + self["type"].display, + len(self.array("section"))) + + +class FirmwareVolume(FieldSet): + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + if not self._size: + self._size = self["volume_len"].value * 8 + + def createFields(self): + # Header + yield NullBytes(self, "zero_vector", 16) + yield GUID(self, "fs_guid") + yield UInt64(self, "volume_len") + yield String(self, "signature", 4) + yield UInt32(self, "attributes") + yield UInt16(self, "header_len") + yield UInt16(self, "checksum") + yield UInt16(self, "ext_header_offset") + yield UInt8(self, "reserved") + yield UInt8(self, "revision") + while True: + bm = BlockMap(self, "block_map[]") + yield bm + if bm['num_blocks'].value == 0 and bm['len'].value == 0: + break + # TODO must handle extended header + + # Content + while not self.eof: + padding = paddingSize(self.current_size // 8, 8) + if padding: + yield PaddingBytes(self, "padding[]", padding) + yield File(self, "file[]") + + def createDescription(self): + return "Firmware Volume containing %d file(s)" % len(self.array("file")) + + +class PIFVFile(Parser): + endian = LITTLE_ENDIAN + MAGIC = '_FVH' + PARSER_TAGS = { + "id": "pifv", + "category": "program", + "file_ext": ("bin", ""), + "min_size": 64*8, # smallest possible header + "magic_regex": (("\0{16}.{24}%s" % MAGIC, 0), ), + "description": "EFI Platform Initialization Firmware Volume", + } + + def validate(self): + if self.stream.readBytes(40*8, 4) != self.MAGIC: + return "Invalid magic number" + if self.stream.readBytes(0, 16) != "\0"*16: + return "Invalid zero vector" + return True + + def createFields(self): + while not self.eof: + yield FirmwareVolume(self, "firmware_volume[]") + diff --git a/libs/hachoir_parser/misc/torrent.py b/libs/hachoir_parser/misc/torrent.py new file mode 100644 index 0000000..88a1bea --- /dev/null +++ b/libs/hachoir_parser/misc/torrent.py @@ -0,0 +1,163 @@ +""" +.torrent metainfo file parser + +http://wiki.theory.org/BitTorrentSpecification#Metainfo_File_Structure + +Status: To statufy +Author: Christophe Gisquet +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + String, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.tools import makePrintable, timestampUNIX, humanFilesize + +# Maximum number of bytes for string length +MAX_STRING_LENGTH = 6 # length in 0..999999 + +# Maximum number of bytes for integer value +MAX_INTEGER_SIZE = 21 # 21 decimal digits (or "-" sign and 20 digits) + +class Integer(FieldSet): + # ie + def createFields(self): + yield String(self, "start", 1, "Integer start delimiter (i)", charset="ASCII") + + # Find integer end + addr = self.absolute_address+self.current_size + len = self.stream.searchBytesLength('e', False, addr, addr+(MAX_INTEGER_SIZE+1)*8) + if len is None: + raise ParserError("Torrent: Unable to find integer end delimiter (e)!") + if not len: + raise ParserError("Torrent: error, empty integer!") + + yield String(self, "value", len, "Integer value", charset="ASCII") + yield String(self, "end", 1, "Integer end delimiter") + + def createValue(self): + """Read integer value (may raise ValueError)""" + return int(self["value"].value) + +class TorrentString(FieldSet): + # : + + def createFields(self): + addr = self.absolute_address + len = self.stream.searchBytesLength(':', False, addr, addr+(MAX_STRING_LENGTH+1)*8) + if len is None: + raise ParserError("Torrent: unable to find string separator (':')") + if not len: + raise ParserError("Torrent: error: no string length!") + val = String(self, "length", len, "String length") + yield val + try: + len = int(val.value) + except ValueError: + len = -1 + if len < 0: + raise ParserError("Invalid string length (%s)" % makePrintable(val.value, "ASCII", to_unicode=True)) + yield String(self, "separator", 1, "String length/value separator") + if not len: + self.info("Empty string: len=%i" % len) + return + if len<512: + yield String(self, "value", len, "String value", charset="ISO-8859-1") + else: + # Probably raw data + yield RawBytes(self, "value", len, "Raw data") + + def createValue(self): + if "value" in self: + field = self["value"] + if field.__class__ != RawBytes: + return field.value + else: + return None + else: + return None + +class Dictionary(FieldSet): + # de + def createFields(self): + yield String(self, "start", 1, "Dictionary start delimiter (d)", charset="ASCII") + while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e": + yield DictionaryItem(self, "item[]") + yield String(self, "end", 1, "Dictionary end delimiter") + +class List(FieldSet): + # le + def createFields(self): + yield String(self, "start", 1, "List start delimiter") + while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e": + yield Entry(self, "item[]") + yield String(self, "end", 1, "List end delimiter") + +class DictionaryItem(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + + # TODO: Remove this because it's not lazy? + key = self["key"] + if not key.hasValue(): + return + key = key.value + self._name = str(key).replace(" ", "_") + + def createDisplay(self): + if not self["value"].hasValue(): + return None + if self._name in ("length", "piece_length"): + return humanFilesize(self.value) + return FieldSet.createDisplay(self) + + def createValue(self): + if not self["value"].hasValue(): + return None + if self._name == "creation_date": + return self.createTimestampValue() + else: + return self["value"].value + + def createFields(self): + yield Entry(self, "key") + yield Entry(self, "value") + + def createTimestampValue(self): + return timestampUNIX(self["value"].value) + +# Map first chunk byte => type +TAGS = {'d': Dictionary, 'i': Integer, 'l': List} +for index in xrange(1, 9+1): + TAGS[str(index)] = TorrentString + +# Create an entry +def Entry(parent, name): + addr = parent.absolute_address + parent.current_size + tag = parent.stream.readBytes(addr, 1) + if tag not in TAGS: + raise ParserError("Torrent: Entry of type %r not handled" % type) + cls = TAGS[tag] + return cls(parent, name) + +class TorrentFile(Parser): + endian = LITTLE_ENDIAN + MAGIC = "d8:announce" + PARSER_TAGS = { + "id": "torrent", + "category": "misc", + "file_ext": ("torrent",), + "min_size": 50*8, + "mime": (u"application/x-bittorrent",), + "magic": ((MAGIC, 0),), + "description": "Torrent metainfo file" + } + + def validate(self): + if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: + return "Invalid magic" + return True + + def createFields(self): + yield Dictionary(self, "root", size=self.size) + diff --git a/libs/hachoir_parser/misc/ttf.py b/libs/hachoir_parser/misc/ttf.py new file mode 100644 index 0000000..f1024aa --- /dev/null +++ b/libs/hachoir_parser/misc/ttf.py @@ -0,0 +1,277 @@ +""" +TrueType Font parser. + +Documents: + - "An Introduction to TrueType Fonts: A look inside the TTF format" + written by "NRSI: Computers & Writing Systems" + http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&item_id=IWS-Chapter08 + +Author: Victor Stinner +Creation date: 2007-02-08 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt16, UInt32, Bit, Bits, + PaddingBits, NullBytes, + String, RawBytes, Bytes, Enum, + TimestampMac32) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler + +MAX_NAME_COUNT = 300 +MIN_NB_TABLE = 3 +MAX_NB_TABLE = 30 + +DIRECTION_NAME = { + 0: u"Mixed directional", + 1: u"Left to right", + 2: u"Left to right + neutrals", + -1: u"Right to left", + -2: u"Right to left + neutrals", +} + +NAMEID_NAME = { + 0: u"Copyright notice", + 1: u"Font family name", + 2: u"Font subfamily name", + 3: u"Unique font identifier", + 4: u"Full font name", + 5: u"Version string", + 6: u"Postscript name", + 7: u"Trademark", + 8: u"Manufacturer name", + 9: u"Designer", + 10: u"Description", + 11: u"URL Vendor", + 12: u"URL Designer", + 13: u"License Description", + 14: u"License info URL", + 16: u"Preferred Family", + 17: u"Preferred Subfamily", + 18: u"Compatible Full", + 19: u"Sample text", + 20: u"PostScript CID findfont name", +} + +PLATFORM_NAME = { + 0: "Unicode", + 1: "Macintosh", + 2: "ISO", + 3: "Microsoft", + 4: "Custom", +} + +CHARSET_MAP = { + # (platform, encoding) => charset + 0: {3: "UTF-16-BE"}, + 1: {0: "MacRoman"}, + 3: {1: "UTF-16-BE"}, +} + +class TableHeader(FieldSet): + def createFields(self): + yield String(self, "tag", 4) + yield textHandler(UInt32(self, "checksum"), hexadecimal) + yield UInt32(self, "offset") + yield filesizeHandler(UInt32(self, "size")) + + def createDescription(self): + return "Table entry: %s (%s)" % (self["tag"].display, self["size"].display) + +class NameHeader(FieldSet): + def createFields(self): + yield Enum(UInt16(self, "platformID"), PLATFORM_NAME) + yield UInt16(self, "encodingID") + yield UInt16(self, "languageID") + yield Enum(UInt16(self, "nameID"), NAMEID_NAME) + yield UInt16(self, "length") + yield UInt16(self, "offset") + + def getCharset(self): + platform = self["platformID"].value + encoding = self["encodingID"].value + try: + return CHARSET_MAP[platform][encoding] + except KeyError: + self.warning("TTF: Unknown charset (%s,%s)" % (platform, encoding)) + return "ISO-8859-1" + + def createDescription(self): + platform = self["platformID"].display + name = self["nameID"].display + return "Name record: %s (%s)" % (name, platform) + +def parseFontHeader(self): + yield UInt16(self, "maj_ver", "Major version") + yield UInt16(self, "min_ver", "Minor version") + yield UInt16(self, "font_maj_ver", "Font major version") + yield UInt16(self, "font_min_ver", "Font minor version") + yield textHandler(UInt32(self, "checksum"), hexadecimal) + yield Bytes(self, "magic", 4, r"Magic string (\x5F\x0F\x3C\xF5)") + if self["magic"].value != "\x5F\x0F\x3C\xF5": + raise ParserError("TTF: invalid magic of font header") + + # Flags + yield Bit(self, "y0", "Baseline at y=0") + yield Bit(self, "x0", "Left sidebearing point at x=0") + yield Bit(self, "instr_point", "Instructions may depend on point size") + yield Bit(self, "ppem", "Force PPEM to integer values for all") + yield Bit(self, "instr_width", "Instructions may alter advance width") + yield Bit(self, "vertical", "e laid out vertically?") + yield PaddingBits(self, "reserved[]", 1) + yield Bit(self, "linguistic", "Requires layout for correct linguistic rendering?") + yield Bit(self, "gx", "Metamorphosis effects?") + yield Bit(self, "strong", "Contains strong right-to-left glyphs?") + yield Bit(self, "indic", "contains Indic-style rearrangement effects?") + yield Bit(self, "lossless", "Data is lossless (Agfa MicroType compression)") + yield Bit(self, "converted", "Font converted (produce compatible metrics)") + yield Bit(self, "cleartype", "Optimised for ClearType") + yield Bits(self, "adobe", 2, "(used by Adobe)") + + yield UInt16(self, "unit_per_em", "Units per em") + if not(16 <= self["unit_per_em"].value <= 16384): + raise ParserError("TTF: Invalid unit/em value") + yield UInt32(self, "created_high") + yield TimestampMac32(self, "created") + yield UInt32(self, "modified_high") + yield TimestampMac32(self, "modified") + yield UInt16(self, "xmin") + yield UInt16(self, "ymin") + yield UInt16(self, "xmax") + yield UInt16(self, "ymax") + + # Mac style + yield Bit(self, "bold") + yield Bit(self, "italic") + yield Bit(self, "underline") + yield Bit(self, "outline") + yield Bit(self, "shadow") + yield Bit(self, "condensed", "(narrow)") + yield Bit(self, "expanded") + yield PaddingBits(self, "reserved[]", 9) + + yield UInt16(self, "lowest", "Smallest readable size in pixels") + yield Enum(UInt16(self, "font_dir", "Font direction hint"), DIRECTION_NAME) + yield Enum(UInt16(self, "ofst_format"), {0: "short offsets", 1: "long"}) + yield UInt16(self, "glyph_format", "(=0)") + +def parseNames(self): + # Read header + yield UInt16(self, "format") + if self["format"].value != 0: + raise ParserError("TTF (names): Invalid format (%u)" % self["format"].value) + yield UInt16(self, "count") + yield UInt16(self, "offset") + if MAX_NAME_COUNT < self["count"].value: + raise ParserError("Invalid number of names (%s)" + % self["count"].value) + + # Read name index + entries = [] + for index in xrange(self["count"].value): + entry = NameHeader(self, "header[]") + yield entry + entries.append(entry) + + # Sort names by their offset + entries.sort(key=lambda field: field["offset"].value) + + # Read name value + last = None + for entry in entries: + # Skip duplicates values + new = (entry["offset"].value, entry["length"].value) + if last and last == new: + self.warning("Skip duplicate %s %s" % (entry.name, new)) + continue + last = (entry["offset"].value, entry["length"].value) + + # Skip negative offset + offset = entry["offset"].value + self["offset"].value + if offset < self.current_size//8: + self.warning("Skip value %s (negative offset)" % entry.name) + continue + + # Add padding if any + padding = self.seekByte(offset, relative=True, null=True) + if padding: + yield padding + + # Read value + size = entry["length"].value + if size: + yield String(self, "value[]", size, entry.description, charset=entry.getCharset()) + + padding = (self.size - self.current_size) // 8 + if padding: + yield NullBytes(self, "padding_end", padding) + +class Table(FieldSet): + TAG_INFO = { + "head": ("header", "Font header", parseFontHeader), + "name": ("names", "Names", parseNames), + } + + def __init__(self, parent, name, table, **kw): + FieldSet.__init__(self, parent, name, **kw) + self.table = table + tag = table["tag"].value + if tag in self.TAG_INFO: + self._name, self._description, self.parser = self.TAG_INFO[tag] + else: + self.parser = None + + def createFields(self): + if self.parser: + for field in self.parser(self): + yield field + else: + yield RawBytes(self, "content", self.size//8) + + def createDescription(self): + return "Table %s (%s)" % (self.table["tag"].value, self.table.path) + +class TrueTypeFontFile(Parser): + endian = BIG_ENDIAN + PARSER_TAGS = { + "id": "ttf", + "category": "misc", + "file_ext": ("ttf",), + "min_size": 10*8, # FIXME + "description": "TrueType font", + } + + def validate(self): + if self["maj_ver"].value != 1: + return "Invalid major version (%u)" % self["maj_ver"].value + if self["min_ver"].value != 0: + return "Invalid minor version (%u)" % self["min_ver"].value + if not (MIN_NB_TABLE <= self["nb_table"].value <= MAX_NB_TABLE): + return "Invalid number of table (%u)" % self["nb_table"].value + return True + + def createFields(self): + yield UInt16(self, "maj_ver", "Major version") + yield UInt16(self, "min_ver", "Minor version") + yield UInt16(self, "nb_table") + yield UInt16(self, "search_range") + yield UInt16(self, "entry_selector") + yield UInt16(self, "range_shift") + tables = [] + for index in xrange(self["nb_table"].value): + table = TableHeader(self, "table_hdr[]") + yield table + tables.append(table) + tables.sort(key=lambda field: field["offset"].value) + for table in tables: + padding = self.seekByte(table["offset"].value, null=True) + if padding: + yield padding + size = table["size"].value + if size: + yield Table(self, "table[]", table, size=size*8) + padding = self.seekBit(self.size, null=True) + if padding: + yield padding + diff --git a/libs/hachoir_parser/misc/word_doc.py b/libs/hachoir_parser/misc/word_doc.py new file mode 100644 index 0000000..88de4c2 --- /dev/null +++ b/libs/hachoir_parser/misc/word_doc.py @@ -0,0 +1,299 @@ +""" +Documents: + +* libwx source code: see fib.c source code +* "Microsoft Word 97 Binary File Format" + http://bio.gsi.de/DOCS/AIX/wword8.html + + Microsoft Word 97 (aka Version 8) for Windows and Macintosh. From the Office + book, found in the Microsoft Office Development section in the MSDN Online + Library. HTMLified June 1998. Revised Aug 1 1998, added missing Definitions + section. Revised Dec 21 1998, added missing Document Properties (section). +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + Bit, Bits, + UInt8, Int16, UInt16, UInt32, Int32, + NullBytes, RawBytes, PascalString16, + DateTimeMSDOS32) +from hachoir_core.endian import LITTLE_ENDIAN + +TIMESTAMP = DateTimeMSDOS32 + +class BaseWordDocument: + def createFields(self): + yield UInt16(self, "wIdent", 2) + yield UInt16(self, "nFib") + yield UInt16(self, "nProduct") + yield UInt16(self, "lid") + yield Int16(self, "pnNext") + + yield Bit(self, "fDot") + yield Bit(self, "fGlsy") + yield Bit(self, "fComplex") + yield Bit(self, "fHasPic") + yield Bits(self, "cQuickSaves", 4) + yield Bit(self, "fEncrypted") + yield Bit(self, "fWhichTblStm") + yield Bit(self, "fReadOnlyRecommanded") + yield Bit(self, "fWriteReservation") + yield Bit(self, "fExtChar") + yield Bit(self, "fLoadOverride") + yield Bit(self, "fFarEeast") + yield Bit(self, "fCrypto") + + yield UInt16(self, "nFibBack") + yield UInt32(self, "lKey") + yield UInt8(self, "envr") + + yield Bit(self, "fMac") + yield Bit(self, "fEmptySpecial") + yield Bit(self, "fLoadOverridePage") + yield Bit(self, "fFutureSavedUndo") + yield Bit(self, "fWord97Save") + yield Bits(self, "fSpare0", 3) + + yield UInt16(self, "chse") + yield UInt16(self, "chsTables") + yield UInt32(self, "fcMin") + yield UInt32(self, "fcMac") + + yield PascalString16(self, "file_creator", strip="\0") + + yield NullBytes(self, "reserved[]", 12) + + yield Int16(self, "lidFE") + yield UInt16(self, "clw") + yield Int32(self, "cbMac") + yield UInt32(self, "lProductCreated") + yield TIMESTAMP(self, "lProductRevised") + + yield UInt32(self, "ccpText") + yield Int32(self, "ccpFtn") + yield Int32(self, "ccpHdr") + yield Int32(self, "ccpMcr") + yield Int32(self, "ccpAtn") + yield Int32(self, "ccpEdn") + yield Int32(self, "ccpTxbx") + yield Int32(self, "ccpHdrTxbx") + yield Int32(self, "pnFbpChpFirst") + yield Int32(self, "pnChpFirst") + yield Int32(self, "cpnBteChp") + yield Int32(self, "pnFbpPapFirst") + yield Int32(self, "pnPapFirst") + yield Int32(self, "cpnBtePap") + yield Int32(self, "pnFbpLvcFirst") + yield Int32(self, "pnLvcFirst") + yield Int32(self, "cpnBteLvc") + yield Int32(self, "fcIslandFirst") + yield Int32(self, "fcIslandLim") + yield UInt16(self, "cfclcb") + yield Int32(self, "fcStshfOrig") + yield UInt32(self, "lcbStshfOrig") + yield Int32(self, "fcStshf") + yield UInt32(self, "lcbStshf") + + yield Int32(self, "fcPlcffndRef") + yield UInt32(self, "lcbPlcffndRef") + yield Int32(self, "fcPlcffndTxt") + yield UInt32(self, "lcbPlcffndTxt") + yield Int32(self, "fcPlcfandRef") + yield UInt32(self, "lcbPlcfandRef") + yield Int32(self, "fcPlcfandTxt") + yield UInt32(self, "lcbPlcfandTxt") + yield Int32(self, "fcPlcfsed") + yield UInt32(self, "lcbPlcfsed") + yield Int32(self, "fcPlcpad") + yield UInt32(self, "lcbPlcpad") + yield Int32(self, "fcPlcfphe") + yield UInt32(self, "lcbPlcfphe") + yield Int32(self, "fcSttbfglsy") + yield UInt32(self, "lcbSttbfglsy") + yield Int32(self, "fcPlcfglsy") + yield UInt32(self, "lcbPlcfglsy") + yield Int32(self, "fcPlcfhdd") + yield UInt32(self, "lcbPlcfhdd") + yield Int32(self, "fcPlcfbteChpx") + yield UInt32(self, "lcbPlcfbteChpx") + yield Int32(self, "fcPlcfbtePapx") + yield UInt32(self, "lcbPlcfbtePapx") + yield Int32(self, "fcPlcfsea") + yield UInt32(self, "lcbPlcfsea") + yield Int32(self, "fcSttbfffn") + yield UInt32(self, "lcbSttbfffn") + yield Int32(self, "fcPlcffldMom") + yield UInt32(self, "lcbPlcffldMom") + yield Int32(self, "fcPlcffldHdr") + yield UInt32(self, "lcbPlcffldHdr") + yield Int32(self, "fcPlcffldFtn") + yield UInt32(self, "lcbPlcffldFtn") + yield Int32(self, "fcPlcffldAtn") + yield UInt32(self, "lcbPlcffldAtn") + yield Int32(self, "fcPlcffldMcr") + yield UInt32(self, "lcbPlcffldMcr") + yield Int32(self, "fcSttbfbkmk") + yield UInt32(self, "lcbSttbfbkmk") + yield Int32(self, "fcPlcfbkf") + yield UInt32(self, "lcbPlcfbkf") + yield Int32(self, "fcPlcfbkl") + yield UInt32(self, "lcbPlcfbkl") + yield Int32(self, "fcCmds") + yield UInt32(self, "lcbCmds") + yield Int32(self, "fcPlcmcr") + yield UInt32(self, "lcbPlcmcr") + yield Int32(self, "fcSttbfmcr") + yield UInt32(self, "lcbSttbfmcr") + yield Int32(self, "fcPrDrvr") + yield UInt32(self, "lcbPrDrvr") + yield Int32(self, "fcPrEnvPort") + yield UInt32(self, "lcbPrEnvPort") + yield Int32(self, "fcPrEnvLand") + yield UInt32(self, "lcbPrEnvLand") + yield Int32(self, "fcWss") + yield UInt32(self, "lcbWss") + yield Int32(self, "fcDop") + yield UInt32(self, "lcbDop") + yield Int32(self, "fcSttbfAssoc") + yield UInt32(self, "lcbSttbfAssoc") + yield Int32(self, "fcClx") + yield UInt32(self, "lcbClx") + yield Int32(self, "fcPlcfpgdFtn") + yield UInt32(self, "lcbPlcfpgdFtn") + yield Int32(self, "fcAutosaveSource") + yield UInt32(self, "lcbAutosaveSource") + yield Int32(self, "fcGrpXstAtnOwners") + yield UInt32(self, "lcbGrpXstAtnOwners") + yield Int32(self, "fcSttbfAtnbkmk") + yield UInt32(self, "lcbSttbfAtnbkmk") + yield Int32(self, "fcPlcdoaMom") + yield UInt32(self, "lcbPlcdoaMom") + yield Int32(self, "fcPlcdoaHdr") + yield UInt32(self, "lcbPlcdoaHdr") + yield Int32(self, "fcPlcspaMom") + yield UInt32(self, "lcbPlcspaMom") + yield Int32(self, "fcPlcspaHdr") + yield UInt32(self, "lcbPlcspaHdr") + yield Int32(self, "fcPlcfAtnbkf") + yield UInt32(self, "lcbPlcfAtnbkf") + yield Int32(self, "fcPlcfAtnbkl") + yield UInt32(self, "lcbPlcfAtnbkl") + yield Int32(self, "fcPms") + yield UInt32(self, "lcbPms") + yield Int32(self, "fcFormFldSttbs") + yield UInt32(self, "lcbFormFldSttbs") + yield Int32(self, "fcPlcfendRef") + yield UInt32(self, "lcbPlcfendRef") + yield Int32(self, "fcPlcfendTxt") + yield UInt32(self, "lcbPlcfendTxt") + yield Int32(self, "fcPlcffldEdn") + yield UInt32(self, "lcbPlcffldEdn") + yield Int32(self, "fcPlcfpgdEdn") + yield UInt32(self, "lcbPlcfpgdEdn") + yield Int32(self, "fcDggInfo") + yield UInt32(self, "lcbDggInfo") + yield Int32(self, "fcSttbfRMark") + yield UInt32(self, "lcbSttbfRMark") + yield Int32(self, "fcSttbCaption") + yield UInt32(self, "lcbSttbCaption") + yield Int32(self, "fcSttbAutoCaption") + yield UInt32(self, "lcbSttbAutoCaption") + yield Int32(self, "fcPlcfwkb") + yield UInt32(self, "lcbPlcfwkb") + yield Int32(self, "fcPlcfspl") + yield UInt32(self, "lcbPlcfspl") + yield Int32(self, "fcPlcftxbxTxt") + yield UInt32(self, "lcbPlcftxbxTxt") + yield Int32(self, "fcPlcffldTxbx") + yield UInt32(self, "lcbPlcffldTxbx") + yield Int32(self, "fcPlcfhdrtxbxTxt") + yield UInt32(self, "lcbPlcfhdrtxbxTxt") + yield Int32(self, "fcPlcffldHdrTxbx") + yield UInt32(self, "lcbPlcffldHdrTxbx") + yield Int32(self, "fcStwUser") + yield UInt32(self, "lcbStwUser") + yield Int32(self, "fcSttbttmbd") + yield UInt32(self, "cbSttbttmbd") + yield Int32(self, "fcUnused") + yield UInt32(self, "lcbUnused") + yield Int32(self, "fcPgdMother") + yield UInt32(self, "lcbPgdMother") + yield Int32(self, "fcBkdMother") + yield UInt32(self, "lcbBkdMother") + yield Int32(self, "fcPgdFtn") + yield UInt32(self, "lcbPgdFtn") + yield Int32(self, "fcBkdFtn") + yield UInt32(self, "lcbBkdFtn") + yield Int32(self, "fcPgdEdn") + yield UInt32(self, "lcbPgdEdn") + yield Int32(self, "fcBkdEdn") + yield UInt32(self, "lcbBkdEdn") + yield Int32(self, "fcSttbfIntlFld") + yield UInt32(self, "lcbSttbfIntlFld") + yield Int32(self, "fcRouteSlip") + yield UInt32(self, "lcbRouteSlip") + yield Int32(self, "fcSttbSavedBy") + yield UInt32(self, "lcbSttbSavedBy") + yield Int32(self, "fcSttbFnm") + yield UInt32(self, "lcbSttbFnm") + yield Int32(self, "fcPlcfLst") + yield UInt32(self, "lcbPlcfLst") + yield Int32(self, "fcPlfLfo") + yield UInt32(self, "lcbPlfLfo") + yield Int32(self, "fcPlcftxbxBkd") + yield UInt32(self, "lcbPlcftxbxBkd") + yield Int32(self, "fcPlcftxbxHdrBkd") + yield UInt32(self, "lcbPlcftxbxHdrBkd") + yield Int32(self, "fcDocUndo") + yield UInt32(self, "lcbDocUndo") + yield Int32(self, "fcRgbuse") + yield UInt32(self, "lcbRgbuse") + yield Int32(self, "fcUsp") + yield UInt32(self, "lcbUsp") + yield Int32(self, "fcUskf") + yield UInt32(self, "lcbUskf") + yield Int32(self, "fcPlcupcRgbuse") + yield UInt32(self, "lcbPlcupcRgbuse") + yield Int32(self, "fcPlcupcUsp") + yield UInt32(self, "lcbPlcupcUsp") + yield Int32(self, "fcSttbGlsyStyle") + yield UInt32(self, "lcbSttbGlsyStyle") + yield Int32(self, "fcPlgosl") + yield UInt32(self, "lcbPlgosl") + yield Int32(self, "fcPlcocx") + yield UInt32(self, "lcbPlcocx") + yield Int32(self, "fcPlcfbteLvc") + yield UInt32(self, "lcbPlcfbteLvc") + yield TIMESTAMP(self, "ftModified") + yield Int32(self, "fcPlcflvc") + yield UInt32(self, "lcbPlcflvc") + yield Int32(self, "fcPlcasumy") + yield UInt32(self, "lcbPlcasumy") + yield Int32(self, "fcPlcfgram") + yield UInt32(self, "lcbPlcfgram") + yield Int32(self, "fcSttbListNames") + yield UInt32(self, "lcbSttbListNames") + yield Int32(self, "fcSttbfUssr") + yield UInt32(self, "lcbSttbfUssr") + + tail = (self.size - self.current_size) // 8 + if tail: + yield RawBytes(self, "tail", tail) + +class WordDocumentFieldSet(BaseWordDocument, FieldSet): + pass + +class WordDocumentParser(BaseWordDocument, Parser): + PARSER_TAGS = { + "id": "word_document", + "min_size": 8, + "description": "Microsoft Office Word document", + } + endian = LITTLE_ENDIAN + + def __init__(self, stream, **kw): + Parser.__init__(self, stream, **kw) + + def validate(self): + return True + diff --git a/libs/hachoir_parser/network/__init__.py b/libs/hachoir_parser/network/__init__.py new file mode 100644 index 0000000..a7fe247 --- /dev/null +++ b/libs/hachoir_parser/network/__init__.py @@ -0,0 +1,2 @@ +from hachoir_parser.network.tcpdump import TcpdumpFile + diff --git a/libs/hachoir_parser/network/common.py b/libs/hachoir_parser/network/common.py new file mode 100644 index 0000000..d6e9fea --- /dev/null +++ b/libs/hachoir_parser/network/common.py @@ -0,0 +1,118 @@ +from hachoir_core.field import FieldSet, Field, Bits +from hachoir_core.bits import str2hex +from hachoir_parser.network.ouid import REGISTERED_OUID +from hachoir_core.endian import BIG_ENDIAN +from socket import gethostbyaddr, herror as socket_host_error + +def ip2name(addr): + if not ip2name.resolve: + return addr + try: + if addr in ip2name.cache: + return ip2name.cache[addr] + # FIXME: Workaround Python bug + # Need double try/except to catch the bug + try: + name = gethostbyaddr(addr)[0] + except KeyboardInterrupt: + raise + except (socket_host_error, ValueError): + name = addr + except (socket_host_error, KeyboardInterrupt, ValueError): + ip2name.resolve = False + name = addr + ip2name.cache[addr] = name + return name +ip2name.cache = {} +ip2name.resolve = True + +class IPv4_Address(Field): + def __init__(self, parent, name, description=None): + Field.__init__(self, parent, name, 32, description) + + def createValue(self): + value = self._parent.stream.readBytes(self.absolute_address, 4) + return ".".join(( "%u" % ord(byte) for byte in value )) + + def createDisplay(self): + return ip2name(self.value) + +class IPv6_Address(Field): + def __init__(self, parent, name, description=None): + Field.__init__(self, parent, name, 128, description) + + def createValue(self): + value = self._parent.stream.readBits(self.absolute_address, 128, self.parent.endian) + parts = [] + for index in xrange(8): + part = "%04x" % (value & 0xffff) + value >>= 16 + parts.append(part) + return ':'.join(reversed(parts)) + + def createDisplay(self): + return self.value + +class OrganizationallyUniqueIdentifier(Bits): + """ + IEEE 24-bit Organizationally unique identifier + """ + static_size = 24 + + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 24, description=None) + + def createDisplay(self, human=True): + if human: + key = self.value + if key in REGISTERED_OUID: + return REGISTERED_OUID[key] + else: + return self.raw_display + else: + return self.raw_display + + def createRawDisplay(self): + value = self.value + a = value >> 16 + b = (value >> 8) & 0xFF + c = value & 0xFF + return "%02X-%02X-%02X" % (a, b, c) + +class NIC24(Bits): + static_size = 24 + + def __init__(self, parent, name, description=None): + Bits.__init__(self, parent, name, 24, description=None) + + def createDisplay(self): + value = self.value + a = value >> 16 + b = (value >> 8) & 0xFF + c = value & 0xFF + return "%02x:%02x:%02x" % (a, b, c) + + def createRawDisplay(self): + return "0x%06X" % self.value + +class MAC48_Address(FieldSet): + """ + IEEE 802 48-bit MAC address + """ + static_size = 48 + endian = BIG_ENDIAN + + def createFields(self): + yield OrganizationallyUniqueIdentifier(self, "organization") + yield NIC24(self, "nic") + + def hasValue(self): + return True + + def createValue(self): + bytes = self.stream.readBytes(self.absolute_address, 6) + return str2hex(bytes, format="%02x:")[:-1] + + def createDisplay(self): + return "%s [%s]" % (self["organization"].display, self["nic"].display) + diff --git a/libs/hachoir_parser/network/ouid.py b/libs/hachoir_parser/network/ouid.py new file mode 100644 index 0000000..cc912b5 --- /dev/null +++ b/libs/hachoir_parser/network/ouid.py @@ -0,0 +1,10110 @@ +# -*- coding: utf-8 -*- +""" +List of registered IEEE 24-bit Organizationally Unique IDentifiers. + +Original data file: +http://standards.ieee.org/regauth/oui/oui.txt +""" + +REGISTERED_OUID = { + 0x000000: u'XEROX CORPORATION', + 0x000001: u'XEROX CORPORATION', + 0x000002: u'XEROX CORPORATION', + 0x000003: u'XEROX CORPORATION', + 0x000004: u'XEROX CORPORATION', + 0x000005: u'XEROX CORPORATION', + 0x000006: u'XEROX CORPORATION', + 0x000007: u'XEROX CORPORATION', + 0x000008: u'XEROX CORPORATION', + 0x000009: u'XEROX CORPORATION', + 0x00000A: u'OMRON TATEISI ELECTRONICS CO.', + 0x00000B: u'MATRIX CORPORATION', + 0x00000C: u'CISCO SYSTEMS, INC.', + 0x00000D: u'FIBRONICS LTD.', + 0x00000E: u'FUJITSU LIMITED', + 0x00000F: u'NEXT, INC.', + 0x000010: u'SYTEK INC.', + 0x000011: u'NORMEREL SYSTEMES', + 0x000012: u'INFORMATION TECHNOLOGY LIMITED', + 0x000013: u'CAMEX', + 0x000014: u'NETRONIX', + 0x000015: u'DATAPOINT CORPORATION', + 0x000016: u'DU PONT PIXEL SYSTEMS.', + 0x000017: u'TEKELEC', + 0x000018: u'WEBSTER COMPUTER CORPORATION', + 0x000019: u'APPLIED DYNAMICS INTERNATIONAL', + 0x00001A: u'ADVANCED MICRO DEVICES', + 0x00001B: u'NOVELL INC.', + 0x00001C: u'BELL TECHNOLOGIES', + 0x00001D: u'CABLETRON SYSTEMS, INC.', + 0x00001E: u'TELSIST INDUSTRIA ELECTRONICA', + 0x00001F: u'Telco Systems, Inc.', + 0x000020: u'DATAINDUSTRIER DIAB AB', + 0x000021: u'SUREMAN COMP. & COMMUN. CORP.', + 0x000022: u'VISUAL TECHNOLOGY INC.', + 0x000023: u'ABB INDUSTRIAL SYSTEMS AB', + 0x000024: u'CONNECT AS', + 0x000025: u'RAMTEK CORP.', + 0x000026: u'SHA-KEN CO., LTD.', + 0x000027: u'JAPAN RADIO COMPANY', + 0x000028: u'PRODIGY SYSTEMS CORPORATION', + 0x000029: u'IMC NETWORKS CORP.', + 0x00002A: u'TRW - SEDD/INP', + 0x00002B: u'CRISP AUTOMATION, INC', + 0x00002C: u'AUTOTOTE LIMITED', + 0x00002D: u'CHROMATICS INC', + 0x00002E: u'SOCIETE EVIRA', + 0x00002F: u'TIMEPLEX INC.', + 0x000030: u'VG LABORATORY SYSTEMS LTD', + 0x000031: u'QPSX COMMUNICATIONS PTY LTD', + 0x000032: u'Marconi plc', + 0x000033: u'EGAN MACHINERY COMPANY', + 0x000034: u'NETWORK RESOURCES CORPORATION', + 0x000035: u'SPECTRAGRAPHICS CORPORATION', + 0x000036: u'ATARI CORPORATION', + 0x000037: u'OXFORD METRICS LIMITED', + 0x000038: u'CSS LABS', + 0x000039: u'TOSHIBA CORPORATION', + 0x00003A: u'CHYRON CORPORATION', + 0x00003B: u'i Controls, Inc.', + 0x00003C: u'AUSPEX SYSTEMS INC.', + 0x00003D: u'UNISYS', + 0x00003E: u'SIMPACT', + 0x00003F: u'SYNTREX, INC.', + 0x000040: u'APPLICON, INC.', + 0x000041: u'ICE CORPORATION', + 0x000042: u'METIER MANAGEMENT SYSTEMS LTD.', + 0x000043: u'MICRO TECHNOLOGY', + 0x000044: u'CASTELLE CORPORATION', + 0x000045: u'FORD AEROSPACE & COMM. CORP.', + 0x000046: u'OLIVETTI NORTH AMERICA', + 0x000047: u'NICOLET INSTRUMENTS CORP.', + 0x000048: u'SEIKO EPSON CORPORATION', + 0x000049: u'APRICOT COMPUTERS, LTD', + 0x00004A: u'ADC CODENOLL TECHNOLOGY CORP.', + 0x00004B: u'ICL DATA OY', + 0x00004C: u'NEC CORPORATION', + 0x00004D: u'DCI CORPORATION', + 0x00004E: u'AMPEX CORPORATION', + 0x00004F: u'LOGICRAFT, INC.', + 0x000050: u'RADISYS CORPORATION', + 0x000051: u'HOB ELECTRONIC GMBH & CO. KG', + 0x000052: u'Intrusion.com, Inc.', + 0x000053: u'COMPUCORP', + 0x000054: u'MODICON, INC.', + 0x000055: u'COMMISSARIAT A L`ENERGIE ATOM.', + 0x000056: u'DR. B. STRUCK', + 0x000057: u'SCITEX CORPORATION LTD.', + 0x000058: u'RACORE COMPUTER PRODUCTS INC.', + 0x000059: u'HELLIGE GMBH', + 0x00005A: u'SysKonnect GmbH', + 0x00005B: u'ELTEC ELEKTRONIK AG', + 0x00005C: u'TELEMATICS INTERNATIONAL INC.', + 0x00005D: u'CS TELECOM', + 0x00005E: u'USC INFORMATION SCIENCES INST', + 0x00005F: u'SUMITOMO ELECTRIC IND., LTD.', + 0x000060: u'KONTRON ELEKTRONIK GMBH', + 0x000061: u'GATEWAY COMMUNICATIONS', + 0x000062: u'BULL HN INFORMATION SYSTEMS', + 0x000063: u'BARCO CONTROL ROOMS GMBH', + 0x000064: u'YOKOGAWA DIGITAL COMPUTER CORP', + 0x000065: u'Network General Corporation', + 0x000066: u'TALARIS SYSTEMS, INC.', + 0x000067: u'SOFT * RITE, INC.', + 0x000068: u'ROSEMOUNT CONTROLS', + 0x000069: u'CONCORD COMMUNICATIONS INC', + 0x00006A: u'COMPUTER CONSOLES INC.', + 0x00006B: u'SILICON GRAPHICS INC./MIPS', + 0x00006C: u'PRIVATE', + 0x00006D: u'CRAY COMMUNICATIONS, LTD.', + 0x00006E: u'ARTISOFT, INC.', + 0x00006F: u'Madge Ltd.', + 0x000070: u'HCL LIMITED', + 0x000071: u'ADRA SYSTEMS INC.', + 0x000072: u'MINIWARE TECHNOLOGY', + 0x000073: u'SIECOR CORPORATION', + 0x000074: u'RICOH COMPANY LTD.', + 0x000075: u'Nortel Networks', + 0x000076: u'ABEKAS VIDEO SYSTEM', + 0x000077: u'INTERPHASE CORPORATION', + 0x000078: u'LABTAM LIMITED', + 0x000079: u'NETWORTH INCORPORATED', + 0x00007A: u'DANA COMPUTER INC.', + 0x00007B: u'RESEARCH MACHINES', + 0x00007C: u'AMPERE INCORPORATED', + 0x00007D: u'SUN MICROSYSTEMS, INC.', + 0x00007E: u'CLUSTRIX CORPORATION', + 0x00007F: u'LINOTYPE-HELL AG', + 0x000080: u'CRAY COMMUNICATIONS A/S', + 0x000081: u'BAY NETWORKS', + 0x000082: u'LECTRA SYSTEMES SA', + 0x000083: u'TADPOLE TECHNOLOGY PLC', + 0x000084: u'SUPERNET', + 0x000085: u'CANON INC.', + 0x000086: u'MEGAHERTZ CORPORATION', + 0x000087: u'HITACHI, LTD.', + 0x000088: u'COMPUTER NETWORK TECH. CORP.', + 0x000089: u'CAYMAN SYSTEMS INC.', + 0x00008A: u'DATAHOUSE INFORMATION SYSTEMS', + 0x00008B: u'INFOTRON', + 0x00008C: u'Alloy Computer Products (Australia) Pty Ltd', + 0x00008D: u'VERDIX CORPORATION', + 0x00008E: u'SOLBOURNE COMPUTER, INC.', + 0x00008F: u'RAYTHEON COMPANY', + 0x000090: u'MICROCOM', + 0x000091: u'ANRITSU CORPORATION', + 0x000092: u'COGENT DATA TECHNOLOGIES', + 0x000093: u'PROTEON INC.', + 0x000094: u'ASANTE TECHNOLOGIES', + 0x000095: u'SONY TEKTRONIX CORP.', + 0x000096: u'MARCONI ELECTRONICS LTD.', + 0x000097: u'EPOCH SYSTEMS', + 0x000098: u'CROSSCOMM CORPORATION', + 0x000099: u'MTX, INC.', + 0x00009A: u'RC COMPUTER A/S', + 0x00009B: u'INFORMATION INTERNATIONAL, INC', + 0x00009C: u'ROLM MIL-SPEC COMPUTERS', + 0x00009D: u'LOCUS COMPUTING CORPORATION', + 0x00009E: u'MARLI S.A.', + 0x00009F: u'AMERISTAR TECHNOLOGIES INC.', + 0x0000A0: u'SANYO Electric Co., Ltd.', + 0x0000A1: u'MARQUETTE ELECTRIC CO.', + 0x0000A2: u'BAY NETWORKS', + 0x0000A3: u'NETWORK APPLICATION TECHNOLOGY', + 0x0000A4: u'ACORN COMPUTERS LIMITED', + 0x0000A5: u'COMPATIBLE SYSTEMS CORP.', + 0x0000A6: u'NETWORK GENERAL CORPORATION', + 0x0000A7: u'NETWORK COMPUTING DEVICES INC.', + 0x0000A8: u'STRATUS COMPUTER INC.', + 0x0000A9: u'NETWORK SYSTEMS CORP.', + 0x0000AA: u'XEROX CORPORATION', + 0x0000AB: u'LOGIC MODELING CORPORATION', + 0x0000AC: u'CONWARE COMPUTER CONSULTING', + 0x0000AD: u'BRUKER INSTRUMENTS INC.', + 0x0000AE: u'DASSAULT ELECTRONIQUE', + 0x0000AF: u'NUCLEAR DATA INSTRUMENTATION', + 0x0000B0: u'RND-RAD NETWORK DEVICES', + 0x0000B1: u'ALPHA MICROSYSTEMS INC.', + 0x0000B2: u'TELEVIDEO SYSTEMS, INC.', + 0x0000B3: u'CIMLINC INCORPORATED', + 0x0000B4: u'EDIMAX COMPUTER COMPANY', + 0x0000B5: u'DATABILITY SOFTWARE SYS. INC.', + 0x0000B6: u'MICRO-MATIC RESEARCH', + 0x0000B7: u'DOVE COMPUTER CORPORATION', + 0x0000B8: u'SEIKOSHA CO., LTD.', + 0x0000B9: u'MCDONNELL DOUGLAS COMPUTER SYS', + 0x0000BA: u'SIIG, INC.', + 0x0000BB: u'TRI-DATA', + 0x0000BC: u'ALLEN-BRADLEY CO. INC.', + 0x0000BD: u'MITSUBISHI CABLE COMPANY', + 0x0000BE: u'THE NTI GROUP', + 0x0000BF: u'SYMMETRIC COMPUTER SYSTEMS', + 0x0000C0: u'WESTERN DIGITAL CORPORATION', + 0x0000C1: u'Madge Ltd.', + 0x0000C2: u'INFORMATION PRESENTATION TECH.', + 0x0000C3: u'HARRIS CORP COMPUTER SYS DIV', + 0x0000C4: u'WATERS DIV. OF MILLIPORE', + 0x0000C5: u'FARALLON COMPUTING/NETOPIA', + 0x0000C6: u'EON SYSTEMS', + 0x0000C7: u'ARIX CORPORATION', + 0x0000C8: u'ALTOS COMPUTER SYSTEMS', + 0x0000C9: u'EMULEX CORPORATION', + 0x0000CA: u'ARRIS International', + 0x0000CB: u'COMPU-SHACK ELECTRONIC GMBH', + 0x0000CC: u'DENSAN CO., LTD.', + 0x0000CD: u'Allied Telesyn Research Ltd.', + 0x0000CE: u'MEGADATA CORP.', + 0x0000CF: u'HAYES MICROCOMPUTER PRODUCTS', + 0x0000D0: u'DEVELCON ELECTRONICS LTD.', + 0x0000D1: u'ADAPTEC INCORPORATED', + 0x0000D2: u'SBE, INC.', + 0x0000D3: u'WANG LABORATORIES INC.', + 0x0000D4: u'PURE DATA LTD.', + 0x0000D5: u'MICROGNOSIS INTERNATIONAL', + 0x0000D6: u'PUNCH LINE HOLDING', + 0x0000D7: u'DARTMOUTH COLLEGE', + 0x0000D8: u'NOVELL, INC.', + 0x0000D9: u'NIPPON TELEGRAPH & TELEPHONE', + 0x0000DA: u'ATEX', + 0x0000DB: u'BRITISH TELECOMMUNICATIONS PLC', + 0x0000DC: u'HAYES MICROCOMPUTER PRODUCTS', + 0x0000DD: u'TCL INCORPORATED', + 0x0000DE: u'CETIA', + 0x0000DF: u'BELL & HOWELL PUB SYS DIV', + 0x0000E0: u'QUADRAM CORP.', + 0x0000E1: u'GRID SYSTEMS', + 0x0000E2: u'ACER TECHNOLOGIES CORP.', + 0x0000E3: u'INTEGRATED MICRO PRODUCTS LTD', + 0x0000E4: u'IN2 GROUPE INTERTECHNIQUE', + 0x0000E5: u'SIGMEX LTD.', + 0x0000E6: u'APTOR PRODUITS DE COMM INDUST', + 0x0000E7: u'STAR GATE TECHNOLOGIES', + 0x0000E8: u'ACCTON TECHNOLOGY CORP.', + 0x0000E9: u'ISICAD, INC.', + 0x0000EA: u'UPNOD AB', + 0x0000EB: u'MATSUSHITA COMM. IND. CO. LTD.', + 0x0000EC: u'MICROPROCESS', + 0x0000ED: u'APRIL', + 0x0000EE: u'NETWORK DESIGNERS, LTD.', + 0x0000EF: u'KTI', + 0x0000F0: u'SAMSUNG ELECTRONICS CO., LTD.', + 0x0000F1: u'MAGNA COMPUTER CORPORATION', + 0x0000F2: u'SPIDER COMMUNICATIONS', + 0x0000F3: u'GANDALF DATA LIMITED', + 0x0000F4: u'ALLIED TELESYN INTERNATIONAL', + 0x0000F5: u'DIAMOND SALES LIMITED', + 0x0000F6: u'APPLIED MICROSYSTEMS CORP.', + 0x0000F7: u'YOUTH KEEP ENTERPRISE CO LTD', + 0x0000F8: u'DIGITAL EQUIPMENT CORPORATION', + 0x0000F9: u'QUOTRON SYSTEMS INC.', + 0x0000FA: u'MICROSAGE COMPUTER SYSTEMS INC', + 0x0000FB: u'RECHNER ZUR KOMMUNIKATION', + 0x0000FC: u'MEIKO', + 0x0000FD: u'HIGH LEVEL HARDWARE', + 0x0000FE: u'ANNAPOLIS MICRO SYSTEMS', + 0x0000FF: u'CAMTEC ELECTRONICS LTD.', + 0x000100: u'EQUIP\'TRANS', + 0x000101: u'PRIVATE', + 0x000102: u'3COM CORPORATION', + 0x000103: u'3COM CORPORATION', + 0x000104: u'DVICO Co., Ltd.', + 0x000105: u'BECKHOFF GmbH', + 0x000106: u'Tews Datentechnik GmbH', + 0x000107: u'Leiser GmbH', + 0x000108: u'AVLAB Technology, Inc.', + 0x000109: u'Nagano Japan Radio Co., Ltd.', + 0x00010A: u'CIS TECHNOLOGY INC.', + 0x00010B: u'Space CyberLink, Inc.', + 0x00010C: u'System Talks Inc.', + 0x00010D: u'CORECO, INC.', + 0x00010E: u'Bri-Link Technologies Co., Ltd', + 0x00010F: u'McDATA Corporation', + 0x000110: u'Gotham Networks', + 0x000111: u'iDigm Inc.', + 0x000112: u'Shark Multimedia Inc.', + 0x000113: u'OLYMPUS CORPORATION', + 0x000114: u'KANDA TSUSHIN KOGYO CO., LTD.', + 0x000115: u'EXTRATECH CORPORATION', + 0x000116: u'Netspect Technologies, Inc.', + 0x000117: u'CANAL +', + 0x000118: u'EZ Digital Co., Ltd.', + 0x000119: u'RTUnet (Australia)', + 0x00011A: u'EEH DataLink GmbH', + 0x00011B: u'Unizone Technologies, Inc.', + 0x00011C: u'Universal Talkware Corporation', + 0x00011D: u'Centillium Communications', + 0x00011E: u'Precidia Technologies, Inc.', + 0x00011F: u'RC Networks, Inc.', + 0x000120: u'OSCILLOQUARTZ S.A.', + 0x000121: u'Watchguard Technologies, Inc.', + 0x000122: u'Trend Communications, Ltd.', + 0x000123: u'DIGITAL ELECTRONICS CORP.', + 0x000124: u'Acer Incorporated', + 0x000125: u'YAESU MUSEN CO., LTD.', + 0x000126: u'PAC Labs', + 0x000127: u'OPEN Networks Pty Ltd', + 0x000128: u'EnjoyWeb, Inc.', + 0x000129: u'DFI Inc.', + 0x00012A: u'Telematica Sistems Inteligente', + 0x00012B: u'TELENET Co., Ltd.', + 0x00012C: u'Aravox Technologies, Inc.', + 0x00012D: u'Komodo Technology', + 0x00012E: u'PC Partner Ltd.', + 0x00012F: u'Twinhead International Corp', + 0x000130: u'Extreme Networks', + 0x000131: u'Detection Systems, Inc.', + 0x000132: u'Dranetz - BMI', + 0x000133: u'KYOWA Electronic Instruments C', + 0x000134: u'SIG Positec Systems AG', + 0x000135: u'KDC Corp.', + 0x000136: u'CyberTAN Technology, Inc.', + 0x000137: u'IT Farm Corporation', + 0x000138: u'XAVi Technologies Corp.', + 0x000139: u'Point Multimedia Systems', + 0x00013A: u'SHELCAD COMMUNICATIONS, LTD.', + 0x00013B: u'BNA SYSTEMS', + 0x00013C: u'TIW SYSTEMS', + 0x00013D: u'RiscStation Ltd.', + 0x00013E: u'Ascom Tateco AB', + 0x00013F: u'Neighbor World Co., Ltd.', + 0x000140: u'Sendtek Corporation', + 0x000141: u'CABLE PRINT', + 0x000142: u'Cisco Systems, Inc.', + 0x000143: u'Cisco Systems, Inc.', + 0x000144: u'EMC Corporation', + 0x000145: u'WINSYSTEMS, INC.', + 0x000146: u'Tesco Controls, Inc.', + 0x000147: u'Zhone Technologies', + 0x000148: u'X-traWeb Inc.', + 0x000149: u'T.D.T. Transfer Data Test GmbH', + 0x00014A: u'Sony Corporation', + 0x00014B: u'Ennovate Networks, Inc.', + 0x00014C: u'Berkeley Process Control', + 0x00014D: u'Shin Kin Enterprises Co., Ltd', + 0x00014E: u'WIN Enterprises, Inc.', + 0x00014F: u'ADTRAN INC', + 0x000150: u'GILAT COMMUNICATIONS, LTD.', + 0x000151: u'Ensemble Communications', + 0x000152: u'CHROMATEK INC.', + 0x000153: u'ARCHTEK TELECOM CORPORATION', + 0x000154: u'G3M Corporation', + 0x000155: u'Promise Technology, Inc.', + 0x000156: u'FIREWIREDIRECT.COM, INC.', + 0x000157: u'SYSWAVE CO., LTD', + 0x000158: u'Electro Industries/Gauge Tech', + 0x000159: u'S1 Corporation', + 0x00015A: u'Digital Video Broadcasting', + 0x00015B: u'ITALTEL S.p.A/RF-UP-I', + 0x00015C: u'CADANT INC.', + 0x00015D: u'Sun Microsystems, Inc', + 0x00015E: u'BEST TECHNOLOGY CO., LTD.', + 0x00015F: u'DIGITAL DESIGN GmbH', + 0x000160: u'ELMEX Co., LTD.', + 0x000161: u'Meta Machine Technology', + 0x000162: u'Cygnet Technologies, Inc.', + 0x000163: u'Cisco Systems, Inc.', + 0x000164: u'Cisco Systems, Inc.', + 0x000165: u'AirSwitch Corporation', + 0x000166: u'TC GROUP A/S', + 0x000167: u'HIOKI E.E. CORPORATION', + 0x000168: u'VITANA CORPORATION', + 0x000169: u'Celestix Networks Pte Ltd.', + 0x00016A: u'ALITEC', + 0x00016B: u'LightChip, Inc.', + 0x00016C: u'FOXCONN', + 0x00016D: u'CarrierComm Inc.', + 0x00016E: u'Conklin Corporation', + 0x00016F: u'HAITAI ELECTRONICS CO., LTD.', + 0x000170: u'ESE Embedded System Engineer\'g', + 0x000171: u'Allied Data Technologies', + 0x000172: u'TechnoLand Co., LTD.', + 0x000173: u'AMCC', + 0x000174: u'CyberOptics Corporation', + 0x000175: u'Radiant Communications Corp.', + 0x000176: u'Orient Silver Enterprises', + 0x000177: u'EDSL', + 0x000178: u'MARGI Systems, Inc.', + 0x000179: u'WIRELESS TECHNOLOGY, INC.', + 0x00017A: u'Chengdu Maipu Electric Industrial Co., Ltd.', + 0x00017B: u'Heidelberger Druckmaschinen AG', + 0x00017C: u'AG-E GmbH', + 0x00017D: u'ThermoQuest', + 0x00017E: u'ADTEK System Science Co., Ltd.', + 0x00017F: u'Experience Music Project', + 0x000180: u'AOpen, Inc.', + 0x000181: u'Nortel Networks', + 0x000182: u'DICA TECHNOLOGIES AG', + 0x000183: u'ANITE TELECOMS', + 0x000184: u'SIEB & MEYER AG', + 0x000185: u'Aloka Co., Ltd.', + 0x000186: u'Uwe Disch', + 0x000187: u'i2SE GmbH', + 0x000188: u'LXCO Technologies ag', + 0x000189: u'Refraction Technology, Inc.', + 0x00018A: u'ROI COMPUTER AG', + 0x00018B: u'NetLinks Co., Ltd.', + 0x00018C: u'Mega Vision', + 0x00018D: u'AudeSi Technologies', + 0x00018E: u'Logitec Corporation', + 0x00018F: u'Kenetec, Inc.', + 0x000190: u'SMK-M', + 0x000191: u'SYRED Data Systems', + 0x000192: u'Texas Digital Systems', + 0x000193: u'Hanbyul Telecom Co., Ltd.', + 0x000194: u'Capital Equipment Corporation', + 0x000195: u'Sena Technologies, Inc.', + 0x000196: u'Cisco Systems, Inc.', + 0x000197: u'Cisco Systems, Inc.', + 0x000198: u'Darim Vision', + 0x000199: u'HeiSei Electronics', + 0x00019A: u'LEUNIG GmbH', + 0x00019B: u'Kyoto Microcomputer Co., Ltd.', + 0x00019C: u'JDS Uniphase Inc.', + 0x00019D: u'E-Control Systems, Inc.', + 0x00019E: u'ESS Technology, Inc.', + 0x00019F: u'Phonex Broadband', + 0x0001A0: u'Infinilink Corporation', + 0x0001A1: u'Mag-Tek, Inc.', + 0x0001A2: u'Logical Co., Ltd.', + 0x0001A3: u'GENESYS LOGIC, INC.', + 0x0001A4: u'Microlink Corporation', + 0x0001A5: u'Nextcomm, Inc.', + 0x0001A6: u'Scientific-Atlanta Arcodan A/S', + 0x0001A7: u'UNEX TECHNOLOGY CORPORATION', + 0x0001A8: u'Welltech Computer Co., Ltd.', + 0x0001A9: u'BMW AG', + 0x0001AA: u'Airspan Communications, Ltd.', + 0x0001AB: u'Main Street Networks', + 0x0001AC: u'Sitara Networks, Inc.', + 0x0001AD: u'Coach Master International d.b.a. CMI Worldwide, Inc.', + 0x0001AE: u'Trex Enterprises', + 0x0001AF: u'Motorola Computer Group', + 0x0001B0: u'Fulltek Technology Co., Ltd.', + 0x0001B1: u'General Bandwidth', + 0x0001B2: u'Digital Processing Systems, Inc.', + 0x0001B3: u'Precision Electronic Manufacturing', + 0x0001B4: u'Wayport, Inc.', + 0x0001B5: u'Turin Networks, Inc.', + 0x0001B6: u'SAEJIN T&M Co., Ltd.', + 0x0001B7: u'Centos, Inc.', + 0x0001B8: u'Netsensity, Inc.', + 0x0001B9: u'SKF Condition Monitoring', + 0x0001BA: u'IC-Net, Inc.', + 0x0001BB: u'Frequentis', + 0x0001BC: u'Brains Corporation', + 0x0001BD: u'Peterson Electro-Musical Products, Inc.', + 0x0001BE: u'Gigalink Co., Ltd.', + 0x0001BF: u'Teleforce Co., Ltd.', + 0x0001C0: u'CompuLab, Ltd.', + 0x0001C1: u'Vitesse Semiconductor Corporation', + 0x0001C2: u'ARK Research Corp.', + 0x0001C3: u'Acromag, Inc.', + 0x0001C4: u'NeoWave, Inc.', + 0x0001C5: u'Simpler Networks', + 0x0001C6: u'Quarry Technologies', + 0x0001C7: u'Cisco Systems, Inc.', + 0x0001C8: u'THOMAS CONRAD CORP.', + 0x0001C8: u'CONRAD CORP.', + 0x0001C9: u'Cisco Systems, Inc.', + 0x0001CA: u'Geocast Network Systems, Inc.', + 0x0001CB: u'EVR', + 0x0001CC: u'Japan Total Design Communication Co., Ltd.', + 0x0001CD: u'ARtem', + 0x0001CE: u'Custom Micro Products, Ltd.', + 0x0001CF: u'Alpha Data Parallel Systems, Ltd.', + 0x0001D0: u'VitalPoint, Inc.', + 0x0001D1: u'CoNet Communications, Inc.', + 0x0001D2: u'MacPower Peripherals, Ltd.', + 0x0001D3: u'PAXCOMM, Inc.', + 0x0001D4: u'Leisure Time, Inc.', + 0x0001D5: u'HAEDONG INFO & COMM CO., LTD', + 0x0001D6: u'MAN Roland Druckmaschinen AG', + 0x0001D7: u'F5 Networks, Inc.', + 0x0001D8: u'Teltronics, Inc.', + 0x0001D9: u'Sigma, Inc.', + 0x0001DA: u'WINCOMM Corporation', + 0x0001DB: u'Freecom Technologies GmbH', + 0x0001DC: u'Activetelco', + 0x0001DD: u'Avail Networks', + 0x0001DE: u'Trango Systems, Inc.', + 0x0001DF: u'ISDN Communications, Ltd.', + 0x0001E0: u'Fast Systems, Inc.', + 0x0001E1: u'Kinpo Electronics, Inc.', + 0x0001E2: u'Ando Electric Corporation', + 0x0001E3: u'Siemens AG', + 0x0001E4: u'Sitera, Inc.', + 0x0001E5: u'Supernet, Inc.', + 0x0001E6: u'Hewlett-Packard Company', + 0x0001E7: u'Hewlett-Packard Company', + 0x0001E8: u'Force10 Networks, Inc.', + 0x0001E9: u'Litton Marine Systems B.V.', + 0x0001EA: u'Cirilium Corp.', + 0x0001EB: u'C-COM Corporation', + 0x0001EC: u'Ericsson Group', + 0x0001ED: u'SETA Corp.', + 0x0001EE: u'Comtrol Europe, Ltd.', + 0x0001EF: u'Camtel Technology Corp.', + 0x0001F0: u'Tridium, Inc.', + 0x0001F1: u'Innovative Concepts, Inc.', + 0x0001F2: u'Mark of the Unicorn, Inc.', + 0x0001F3: u'QPS, Inc.', + 0x0001F4: u'Enterasys Networks', + 0x0001F5: u'ERIM S.A.', + 0x0001F6: u'Association of Musical Electronics Industry', + 0x0001F7: u'Image Display Systems, Inc.', + 0x0001F8: u'Adherent Systems, Ltd.', + 0x0001F9: u'TeraGlobal Communications Corp.', + 0x0001FA: u'HOROSCAS', + 0x0001FB: u'DoTop Technology, Inc.', + 0x0001FC: u'Keyence Corporation', + 0x0001FD: u'Digital Voice Systems, Inc.', + 0x0001FE: u'DIGITAL EQUIPMENT CORPORATION', + 0x0001FF: u'Data Direct Networks, Inc.', + 0x000200: u'Net & Sys Co., Ltd.', + 0x000201: u'IFM Electronic gmbh', + 0x000202: u'Amino Communications, Ltd.', + 0x000203: u'Woonsang Telecom, Inc.', + 0x000204: u'Bodmann Industries Elektronik GmbH', + 0x000205: u'Hitachi Denshi, Ltd.', + 0x000206: u'Telital R&D Denmark A/S', + 0x000207: u'VisionGlobal Network Corp.', + 0x000208: u'Unify Networks, Inc.', + 0x000209: u'Shenzhen SED Information Technology Co., Ltd.', + 0x00020A: u'Gefran Spa', + 0x00020B: u'Native Networks, Inc.', + 0x00020C: u'Metro-Optix', + 0x00020D: u'Micronpc.com', + 0x00020E: u'Laurel Networks, Inc.', + 0x00020F: u'AATR', + 0x000210: u'Fenecom', + 0x000211: u'Nature Worldwide Technology Corp.', + 0x000212: u'SierraCom', + 0x000213: u'S.D.E.L.', + 0x000214: u'DTVRO', + 0x000215: u'Cotas Computer Technology A/B', + 0x000216: u'Cisco Systems, Inc.', + 0x000217: u'Cisco Systems, Inc.', + 0x000218: u'Advanced Scientific Corp', + 0x000219: u'Paralon Technologies', + 0x00021A: u'Zuma Networks', + 0x00021B: u'Kollmorgen-Servotronix', + 0x00021C: u'Network Elements, Inc.', + 0x00021D: u'Data General Communication Ltd.', + 0x00021E: u'SIMTEL S.R.L.', + 0x00021F: u'Aculab PLC', + 0x000220: u'Canon Aptex, Inc.', + 0x000221: u'DSP Application, Ltd.', + 0x000222: u'Chromisys, Inc.', + 0x000223: u'ClickTV', + 0x000224: u'C-COR', + 0x000225: u'Certus Technology, Inc.', + 0x000226: u'XESystems, Inc.', + 0x000227: u'ESD GmbH', + 0x000228: u'Necsom, Ltd.', + 0x000229: u'Adtec Corporation', + 0x00022A: u'Asound Electronic', + 0x00022B: u'SAXA, Inc.', + 0x00022C: u'ABB Bomem, Inc.', + 0x00022D: u'Agere Systems', + 0x00022E: u'TEAC Corp. R& D', + 0x00022F: u'P-Cube, Ltd.', + 0x000230: u'Intersoft Electronics', + 0x000231: u'Ingersoll-Rand', + 0x000232: u'Avision, Inc.', + 0x000233: u'Mantra Communications, Inc.', + 0x000234: u'Imperial Technology, Inc.', + 0x000235: u'Paragon Networks International', + 0x000236: u'INIT GmbH', + 0x000237: u'Cosmo Research Corp.', + 0x000238: u'Serome Technology, Inc.', + 0x000239: u'Visicom', + 0x00023A: u'ZSK Stickmaschinen GmbH', + 0x00023B: u'Redback Networks', + 0x00023C: u'Creative Technology, Ltd.', + 0x00023D: u'NuSpeed, Inc.', + 0x00023E: u'Selta Telematica S.p.a', + 0x00023F: u'Compal Electronics, Inc.', + 0x000240: u'Seedek Co., Ltd.', + 0x000241: u'Amer.com', + 0x000242: u'Videoframe Systems', + 0x000243: u'Raysis Co., Ltd.', + 0x000244: u'SURECOM Technology Co.', + 0x000245: u'Lampus Co, Ltd.', + 0x000246: u'All-Win Tech Co., Ltd.', + 0x000247: u'Great Dragon Information Technology (Group) Co., Ltd.', + 0x000248: u'Pilz GmbH & Co.', + 0x000249: u'Aviv Infocom Co, Ltd.', + 0x00024A: u'Cisco Systems, Inc.', + 0x00024B: u'Cisco Systems, Inc.', + 0x00024C: u'SiByte, Inc.', + 0x00024D: u'Mannesman Dematic Colby Pty. Ltd.', + 0x00024E: u'Datacard Group', + 0x00024F: u'IPM Datacom S.R.L.', + 0x000250: u'Geyser Networks, Inc.', + 0x000251: u'Soma Networks, Inc.', + 0x000252: u'Carrier Corporation', + 0x000253: u'Televideo, Inc.', + 0x000254: u'WorldGate', + 0x000255: u'IBM Corporation', + 0x000256: u'Alpha Processor, Inc.', + 0x000257: u'Microcom Corp.', + 0x000258: u'Flying Packets Communications', + 0x000259: u'Tsann Kuen China (Shanghai)Enterprise Co., Ltd. IT Group', + 0x00025A: u'Catena Networks', + 0x00025B: u'Cambridge Silicon Radio', + 0x00025C: u'SCI Systems (Kunshan) Co., Ltd.', + 0x00025D: u'Calix Networks', + 0x00025E: u'High Technology Ltd', + 0x00025F: u'Nortel Networks', + 0x000260: u'Accordion Networks, Inc.', + 0x000261: u'Tilgin AB', + 0x000262: u'Soyo Group Soyo Com Tech Co., Ltd', + 0x000263: u'UPS Manufacturing SRL', + 0x000264: u'AudioRamp.com', + 0x000265: u'Virditech Co. Ltd.', + 0x000266: u'Thermalogic Corporation', + 0x000267: u'NODE RUNNER, INC.', + 0x000268: u'Harris Government Communications', + 0x000269: u'Nadatel Co., Ltd', + 0x00026A: u'Cocess Telecom Co., Ltd.', + 0x00026B: u'BCM Computers Co., Ltd.', + 0x00026C: u'Philips CFT', + 0x00026D: u'Adept Telecom', + 0x00026E: u'NeGeN Access, Inc.', + 0x00026F: u'Senao International Co., Ltd.', + 0x000270: u'Crewave Co., Ltd.', + 0x000271: u'Vpacket Communications', + 0x000272: u'CC&C Technologies, Inc.', + 0x000273: u'Coriolis Networks', + 0x000274: u'Tommy Technologies Corp.', + 0x000275: u'SMART Technologies, Inc.', + 0x000276: u'Primax Electronics Ltd.', + 0x000277: u'Cash Systemes Industrie', + 0x000278: u'Samsung Electro-Mechanics Co., Ltd.', + 0x000279: u'Control Applications, Ltd.', + 0x00027A: u'IOI Technology Corporation', + 0x00027B: u'Amplify Net, Inc.', + 0x00027C: u'Trilithic, Inc.', + 0x00027D: u'Cisco Systems, Inc.', + 0x00027E: u'Cisco Systems, Inc.', + 0x00027F: u'ask-technologies.com', + 0x000280: u'Mu Net, Inc.', + 0x000281: u'Madge Ltd.', + 0x000282: u'ViaClix, Inc.', + 0x000283: u'Spectrum Controls, Inc.', + 0x000284: u'AREVA T&D', + 0x000285: u'Riverstone Networks', + 0x000286: u'Occam Networks', + 0x000287: u'Adapcom', + 0x000288: u'GLOBAL VILLAGE COMMUNICATION', + 0x000289: u'DNE Technologies', + 0x00028A: u'Ambit Microsystems Corporation', + 0x00028B: u'VDSL Systems OY', + 0x00028C: u'Micrel-Synergy Semiconductor', + 0x00028D: u'Movita Technologies, Inc.', + 0x00028E: u'Rapid 5 Networks, Inc.', + 0x00028F: u'Globetek, Inc.', + 0x000290: u'Woorigisool, Inc.', + 0x000291: u'Open Network Co., Ltd.', + 0x000292: u'Logic Innovations, Inc.', + 0x000293: u'Solid Data Systems', + 0x000294: u'Tokyo Sokushin Co., Ltd.', + 0x000295: u'IP.Access Limited', + 0x000296: u'Lectron Co,. Ltd.', + 0x000297: u'C-COR.net', + 0x000298: u'Broadframe Corporation', + 0x000299: u'Apex, Inc.', + 0x00029A: u'Storage Apps', + 0x00029B: u'Kreatel Communications AB', + 0x00029C: u'3COM', + 0x00029D: u'Merix Corp.', + 0x00029E: u'Information Equipment Co., Ltd.', + 0x00029F: u'L-3 Communication Aviation Recorders', + 0x0002A0: u'Flatstack Ltd.', + 0x0002A1: u'World Wide Packets', + 0x0002A2: u'Hilscher GmbH', + 0x0002A3: u'ABB Power Automation', + 0x0002A4: u'AddPac Technology Co., Ltd.', + 0x0002A5: u'Compaq Computer Corporation', + 0x0002A6: u'Effinet Systems Co., Ltd.', + 0x0002A7: u'Vivace Networks', + 0x0002A8: u'Air Link Technology', + 0x0002A9: u'RACOM, s.r.o.', + 0x0002AA: u'PLcom Co., Ltd.', + 0x0002AB: u'CTC Union Technologies Co., Ltd.', + 0x0002AC: u'3PAR data', + 0x0002AD: u'Pentax Corpotation', + 0x0002AE: u'Scannex Electronics Ltd.', + 0x0002AF: u'TeleCruz Technology, Inc.', + 0x0002B0: u'Hokubu Communication & Industrial Co., Ltd.', + 0x0002B1: u'Anritsu, Ltd.', + 0x0002B2: u'Cablevision', + 0x0002B3: u'Intel Corporation', + 0x0002B4: u'DAPHNE', + 0x0002B5: u'Avnet, Inc.', + 0x0002B6: u'Acrosser Technology Co., Ltd.', + 0x0002B7: u'Watanabe Electric Industry Co., Ltd.', + 0x0002B8: u'WHI KONSULT AB', + 0x0002B9: u'Cisco Systems, Inc.', + 0x0002BA: u'Cisco Systems, Inc.', + 0x0002BB: u'Continuous Computing', + 0x0002BC: u'LVL 7 Systems, Inc.', + 0x0002BD: u'Bionet Co., Ltd.', + 0x0002BE: u'Totsu Engineering, Inc.', + 0x0002BF: u'dotRocket, Inc.', + 0x0002C0: u'Bencent Tzeng Industry Co., Ltd.', + 0x0002C1: u'Innovative Electronic Designs, Inc.', + 0x0002C2: u'Net Vision Telecom', + 0x0002C3: u'Arelnet Ltd.', + 0x0002C4: u'Vector International BUBA', + 0x0002C5: u'Evertz Microsystems Ltd.', + 0x0002C6: u'Data Track Technology PLC', + 0x0002C7: u'ALPS ELECTRIC Co., Ltd.', + 0x0002C8: u'Technocom Communications Technology (pte) Ltd', + 0x0002C9: u'Mellanox Technologies', + 0x0002CA: u'EndPoints, Inc.', + 0x0002CB: u'TriState Ltd.', + 0x0002CC: u'M.C.C.I', + 0x0002CD: u'TeleDream, Inc.', + 0x0002CE: u'FoxJet, Inc.', + 0x0002CF: u'ZyGate Communications, Inc.', + 0x0002D0: u'Comdial Corporation', + 0x0002D1: u'Vivotek, Inc.', + 0x0002D2: u'Workstation AG', + 0x0002D3: u'NetBotz, Inc.', + 0x0002D4: u'PDA Peripherals, Inc.', + 0x0002D5: u'ACR', + 0x0002D6: u'NICE Systems', + 0x0002D7: u'EMPEG Ltd', + 0x0002D8: u'BRECIS Communications Corporation', + 0x0002D9: u'Reliable Controls', + 0x0002DA: u'ExiO Communications, Inc.', + 0x0002DB: u'NETSEC', + 0x0002DC: u'Fujitsu General Limited', + 0x0002DD: u'Bromax Communications, Ltd.', + 0x0002DE: u'Astrodesign, Inc.', + 0x0002DF: u'Net Com Systems, Inc.', + 0x0002E0: u'ETAS GmbH', + 0x0002E1: u'Integrated Network Corporation', + 0x0002E2: u'NDC Infared Engineering', + 0x0002E3: u'LITE-ON Communications, Inc.', + 0x0002E4: u'JC HYUN Systems, Inc.', + 0x0002E5: u'Timeware Ltd.', + 0x0002E6: u'Gould Instrument Systems, Inc.', + 0x0002E7: u'CAB GmbH & Co KG', + 0x0002E8: u'E.D.&A.', + 0x0002E9: u'CS Systemes De Securite - C3S', + 0x0002EA: u'Focus Enhancements', + 0x0002EB: u'Pico Communications', + 0x0002EC: u'Maschoff Design Engineering', + 0x0002ED: u'DXO Telecom Co., Ltd.', + 0x0002EE: u'Nokia Danmark A/S', + 0x0002EF: u'CCC Network Systems Group Ltd.', + 0x0002F0: u'AME Optimedia Technology Co., Ltd.', + 0x0002F1: u'Pinetron Co., Ltd.', + 0x0002F2: u'eDevice, Inc.', + 0x0002F3: u'Media Serve Co., Ltd.', + 0x0002F4: u'PCTEL, Inc.', + 0x0002F5: u'VIVE Synergies, Inc.', + 0x0002F6: u'Equipe Communications', + 0x0002F7: u'ARM', + 0x0002F8: u'SEAKR Engineering, Inc.', + 0x0002F9: u'Mimos Semiconductor SDN BHD', + 0x0002FA: u'DX Antenna Co., Ltd.', + 0x0002FB: u'Baumuller Aulugen-Systemtechnik GmbH', + 0x0002FC: u'Cisco Systems, Inc.', + 0x0002FD: u'Cisco Systems, Inc.', + 0x0002FE: u'Viditec, Inc.', + 0x0002FF: u'Handan BroadInfoCom', + 0x000300: u'NetContinuum, Inc.', + 0x000301: u'Avantas Networks Corporation', + 0x000302: u'Charles Industries, Ltd.', + 0x000303: u'JAMA Electronics Co., Ltd.', + 0x000304: u'Pacific Broadband Communications', + 0x000305: u'Smart Network Devices GmbH', + 0x000306: u'Fusion In Tech Co., Ltd.', + 0x000307: u'Secure Works, Inc.', + 0x000308: u'AM Communications, Inc.', + 0x000309: u'Texcel Technology PLC', + 0x00030A: u'Argus Technologies', + 0x00030B: u'Hunter Technology, Inc.', + 0x00030C: u'Telesoft Technologies Ltd.', + 0x00030D: u'Uniwill Computer Corp.', + 0x00030E: u'Core Communications Co., Ltd.', + 0x00030F: u'Digital China (Shanghai) Networks Ltd.', + 0x000310: u'Link Evolution Corp.', + 0x000311: u'Micro Technology Co., Ltd.', + 0x000312: u'TR-Systemtechnik GmbH', + 0x000313: u'Access Media SPA', + 0x000314: u'Teleware Network Systems', + 0x000315: u'Cidco Incorporated', + 0x000316: u'Nobell Communications, Inc.', + 0x000317: u'Merlin Systems, Inc.', + 0x000318: u'Cyras Systems, Inc.', + 0x000319: u'Infineon AG', + 0x00031A: u'Beijing Broad Telecom Ltd., China', + 0x00031B: u'Cellvision Systems, Inc.', + 0x00031C: u'Svenska Hardvarufabriken AB', + 0x00031D: u'Taiwan Commate Computer, Inc.', + 0x00031E: u'Optranet, Inc.', + 0x00031F: u'Condev Ltd.', + 0x000320: u'Xpeed, Inc.', + 0x000321: u'Reco Research Co., Ltd.', + 0x000322: u'IDIS Co., Ltd.', + 0x000323: u'Cornet Technology, Inc.', + 0x000324: u'SANYO Multimedia Tottori Co., Ltd.', + 0x000325: u'Arima Computer Corp.', + 0x000326: u'Iwasaki Information Systems Co., Ltd.', + 0x000327: u'ACT\'L', + 0x000328: u'Mace Group, Inc.', + 0x000329: u'F3, Inc.', + 0x00032A: u'UniData Communication Systems, Inc.', + 0x00032B: u'GAI Datenfunksysteme GmbH', + 0x00032C: u'ABB Industrie AG', + 0x00032D: u'IBASE Technology, Inc.', + 0x00032E: u'Scope Information Management, Ltd.', + 0x00032F: u'Global Sun Technology, Inc.', + 0x000330: u'Imagenics, Co., Ltd.', + 0x000331: u'Cisco Systems, Inc.', + 0x000332: u'Cisco Systems, Inc.', + 0x000333: u'Digitel Co., Ltd.', + 0x000334: u'Newport Electronics', + 0x000335: u'Mirae Technology', + 0x000336: u'Zetes Technologies', + 0x000337: u'Vaone, Inc.', + 0x000338: u'Oak Technology', + 0x000339: u'Eurologic Systems, Ltd.', + 0x00033A: u'Silicon Wave, Inc.', + 0x00033B: u'TAMI Tech Co., Ltd.', + 0x00033C: u'Daiden Co., Ltd.', + 0x00033D: u'ILSHin Lab', + 0x00033E: u'Tateyama System Laboratory Co., Ltd.', + 0x00033F: u'BigBand Networks, Ltd.', + 0x000340: u'Floware Wireless Systems, Ltd.', + 0x000341: u'Axon Digital Design', + 0x000342: u'Nortel Networks', + 0x000343: u'Martin Professional A/S', + 0x000344: u'Tietech.Co., Ltd.', + 0x000345: u'Routrek Networks Corporation', + 0x000346: u'Hitachi Kokusai Electric, Inc.', + 0x000347: u'Intel Corporation', + 0x000348: u'Norscan Instruments, Ltd.', + 0x000349: u'Vidicode Datacommunicatie B.V.', + 0x00034A: u'RIAS Corporation', + 0x00034B: u'Nortel Networks', + 0x00034C: u'Shanghai DigiVision Technology Co., Ltd.', + 0x00034D: u'Chiaro Networks, Ltd.', + 0x00034E: u'Pos Data Company, Ltd.', + 0x00034F: u'Sur-Gard Security', + 0x000350: u'BTICINO SPA', + 0x000351: u'Diebold, Inc.', + 0x000352: u'Colubris Networks', + 0x000353: u'Mitac, Inc.', + 0x000354: u'Fiber Logic Communications', + 0x000355: u'TeraBeam Internet Systems', + 0x000356: u'Wincor Nixdorf GmbH & Co KG', + 0x000357: u'Intervoice-Brite, Inc.', + 0x000358: u'Hanyang Digitech Co., Ltd.', + 0x000359: u'DigitalSis', + 0x00035A: u'Photron Limited', + 0x00035B: u'BridgeWave Communications', + 0x00035C: u'Saint Song Corp.', + 0x00035D: u'Bosung Hi-Net Co., Ltd.', + 0x00035E: u'Metropolitan Area Networks, Inc.', + 0x00035F: u'Prueftechnik Condition Monitoring GmbH & Co. KG', + 0x000360: u'PAC Interactive Technology, Inc.', + 0x000361: u'Widcomm, Inc.', + 0x000362: u'Vodtel Communications, Inc.', + 0x000363: u'Miraesys Co., Ltd.', + 0x000364: u'Scenix Semiconductor, Inc.', + 0x000365: u'Kira Information & Communications, Ltd.', + 0x000366: u'ASM Pacific Technology', + 0x000367: u'Jasmine Networks, Inc.', + 0x000368: u'Embedone Co., Ltd.', + 0x000369: u'Nippon Antenna Co., Ltd.', + 0x00036A: u'Mainnet, Ltd.', + 0x00036B: u'Cisco Systems, Inc.', + 0x00036C: u'Cisco Systems, Inc.', + 0x00036D: u'Runtop, Inc.', + 0x00036E: u'Nicon Systems (Pty) Limited', + 0x00036F: u'Telsey SPA', + 0x000370: u'NXTV, Inc.', + 0x000371: u'Acomz Networks Corp.', + 0x000372: u'ULAN', + 0x000373: u'Aselsan A.S', + 0x000374: u'Hunter Watertech', + 0x000375: u'NetMedia, Inc.', + 0x000376: u'Graphtec Technology, Inc.', + 0x000377: u'Gigabit Wireless', + 0x000378: u'HUMAX Co., Ltd.', + 0x000379: u'Proscend Communications, Inc.', + 0x00037A: u'Taiyo Yuden Co., Ltd.', + 0x00037B: u'IDEC IZUMI Corporation', + 0x00037C: u'Coax Media', + 0x00037D: u'Stellcom', + 0x00037E: u'PORTech Communications, Inc.', + 0x00037F: u'Atheros Communications, Inc.', + 0x000380: u'SSH Communications Security Corp.', + 0x000381: u'Ingenico International', + 0x000382: u'A-One Co., Ltd.', + 0x000383: u'Metera Networks, Inc.', + 0x000384: u'AETA', + 0x000385: u'Actelis Networks, Inc.', + 0x000386: u'Ho Net, Inc.', + 0x000387: u'Blaze Network Products', + 0x000388: u'Fastfame Technology Co., Ltd.', + 0x000389: u'Plantronics', + 0x00038A: u'America Online, Inc.', + 0x00038B: u'PLUS-ONE I&T, Inc.', + 0x00038C: u'Total Impact', + 0x00038D: u'PCS Revenue Control Systems, Inc.', + 0x00038E: u'Atoga Systems, Inc.', + 0x00038F: u'Weinschel Corporation', + 0x000390: u'Digital Video Communications, Inc.', + 0x000391: u'Advanced Digital Broadcast, Ltd.', + 0x000392: u'Hyundai Teletek Co., Ltd.', + 0x000393: u'Apple Computer, Inc.', + 0x000394: u'Connect One', + 0x000395: u'California Amplifier', + 0x000396: u'EZ Cast Co., Ltd.', + 0x000397: u'Watchfront Electronics', + 0x000398: u'WISI', + 0x000399: u'Dongju Informations & Communications Co., Ltd.', + 0x00039A: u'SiConnect', + 0x00039B: u'NetChip Technology, Inc.', + 0x00039C: u'OptiMight Communications, Inc.', + 0x00039D: u'BENQ CORPORATION', + 0x00039E: u'Tera System Co., Ltd.', + 0x00039F: u'Cisco Systems, Inc.', + 0x0003A0: u'Cisco Systems, Inc.', + 0x0003A1: u'HIPER Information & Communication, Inc.', + 0x0003A2: u'Catapult Communications', + 0x0003A3: u'MAVIX, Ltd.', + 0x0003A4: u'Data Storage and Information Management', + 0x0003A5: u'Medea Corporation', + 0x0003A6: u'Traxit Technology, Inc.', + 0x0003A7: u'Unixtar Technology, Inc.', + 0x0003A8: u'IDOT Computers, Inc.', + 0x0003A9: u'AXCENT Media AG', + 0x0003AA: u'Watlow', + 0x0003AB: u'Bridge Information Systems', + 0x0003AC: u'Fronius Schweissmaschinen', + 0x0003AD: u'Emerson Energy Systems AB', + 0x0003AE: u'Allied Advanced Manufacturing Pte, Ltd.', + 0x0003AF: u'Paragea Communications', + 0x0003B0: u'Xsense Technology Corp.', + 0x0003B1: u'Hospira Inc.', + 0x0003B2: u'Radware', + 0x0003B3: u'IA Link Systems Co., Ltd.', + 0x0003B4: u'Macrotek International Corp.', + 0x0003B5: u'Entra Technology Co.', + 0x0003B6: u'QSI Corporation', + 0x0003B7: u'ZACCESS Systems', + 0x0003B8: u'NetKit Solutions, LLC', + 0x0003B9: u'Hualong Telecom Co., Ltd.', + 0x0003BA: u'Sun Microsystems', + 0x0003BB: u'Signal Communications Limited', + 0x0003BC: u'COT GmbH', + 0x0003BD: u'OmniCluster Technologies, Inc.', + 0x0003BE: u'Netility', + 0x0003BF: u'Centerpoint Broadband Technologies, Inc.', + 0x0003C0: u'RFTNC Co., Ltd.', + 0x0003C1: u'Packet Dynamics Ltd', + 0x0003C2: u'Solphone K.K.', + 0x0003C3: u'Micronik Multimedia', + 0x0003C4: u'Tomra Systems ASA', + 0x0003C5: u'Mobotix AG', + 0x0003C6: u'ICUE Systems, Inc.', + 0x0003C7: u'hopf Elektronik GmbH', + 0x0003C8: u'CML Emergency Services', + 0x0003C9: u'TECOM Co., Ltd.', + 0x0003CA: u'MTS Systems Corp.', + 0x0003CB: u'Nippon Systems Development Co., Ltd.', + 0x0003CC: u'Momentum Computer, Inc.', + 0x0003CD: u'Clovertech, Inc.', + 0x0003CE: u'ETEN Technologies, Inc.', + 0x0003CF: u'Muxcom, Inc.', + 0x0003D0: u'KOANKEISO Co., Ltd.', + 0x0003D1: u'Takaya Corporation', + 0x0003D2: u'Crossbeam Systems, Inc.', + 0x0003D3: u'Internet Energy Systems, Inc.', + 0x0003D4: u'Alloptic, Inc.', + 0x0003D5: u'Advanced Communications Co., Ltd.', + 0x0003D6: u'RADVision, Ltd.', + 0x0003D7: u'NextNet Wireless, Inc.', + 0x0003D8: u'iMPath Networks, Inc.', + 0x0003D9: u'Secheron SA', + 0x0003DA: u'Takamisawa Cybernetics Co., Ltd.', + 0x0003DB: u'Apogee Electronics Corp.', + 0x0003DC: u'Lexar Media, Inc.', + 0x0003DD: u'Comark Corp.', + 0x0003DE: u'OTC Wireless', + 0x0003DF: u'Desana Systems', + 0x0003E0: u'RadioFrame Networks, Inc.', + 0x0003E1: u'Winmate Communication, Inc.', + 0x0003E2: u'Comspace Corporation', + 0x0003E3: u'Cisco Systems, Inc.', + 0x0003E4: u'Cisco Systems, Inc.', + 0x0003E5: u'Hermstedt SG', + 0x0003E6: u'Entone Technologies, Inc.', + 0x0003E7: u'Logostek Co. Ltd.', + 0x0003E8: u'Wavelength Digital Limited', + 0x0003E9: u'Akara Canada, Inc.', + 0x0003EA: u'Mega System Technologies, Inc.', + 0x0003EB: u'Atrica', + 0x0003EC: u'ICG Research, Inc.', + 0x0003ED: u'Shinkawa Electric Co., Ltd.', + 0x0003EE: u'MKNet Corporation', + 0x0003EF: u'Oneline AG', + 0x0003F0: u'Redfern Broadband Networks', + 0x0003F1: u'Cicada Semiconductor, Inc.', + 0x0003F2: u'Seneca Networks', + 0x0003F3: u'Dazzle Multimedia, Inc.', + 0x0003F4: u'NetBurner', + 0x0003F5: u'Chip2Chip', + 0x0003F6: u'Allegro Networks, Inc.', + 0x0003F7: u'Plast-Control GmbH', + 0x0003F8: u'SanCastle Technologies, Inc.', + 0x0003F9: u'Pleiades Communications, Inc.', + 0x0003FA: u'TiMetra Networks', + 0x0003FB: u'Toko Seiki Company, Ltd.', + 0x0003FC: u'Intertex Data AB', + 0x0003FD: u'Cisco Systems, Inc.', + 0x0003FE: u'Cisco Systems, Inc.', + 0x0003FF: u'Microsoft Corporation', + 0x000400: u'LEXMARK INTERNATIONAL, INC.', + 0x000401: u'Osaki Electric Co., Ltd.', + 0x000402: u'Nexsan Technologies, Ltd.', + 0x000403: u'Nexsi Corporation', + 0x000404: u'Makino Milling Machine Co., Ltd.', + 0x000405: u'ACN Technologies', + 0x000406: u'Fa. Metabox AG', + 0x000407: u'Topcon Positioning Systems, Inc.', + 0x000408: u'Sanko Electronics Co., Ltd.', + 0x000409: u'Cratos Networks', + 0x00040A: u'Sage Systems', + 0x00040B: u'3com Europe Ltd.', + 0x00040C: u'KANNO Work\'s Ltd.', + 0x00040D: u'Avaya, Inc.', + 0x00040E: u'AVM GmbH', + 0x00040F: u'Asus Network Technologies, Inc.', + 0x000410: u'Spinnaker Networks, Inc.', + 0x000411: u'Inkra Networks, Inc.', + 0x000412: u'WaveSmith Networks, Inc.', + 0x000413: u'SNOM Technology AG', + 0x000414: u'Umezawa Musen Denki Co., Ltd.', + 0x000415: u'Rasteme Systems Co., Ltd.', + 0x000416: u'Parks S/A Comunicacoes Digitais', + 0x000417: u'ELAU AG', + 0x000418: u'Teltronic S.A.U.', + 0x000419: u'Fibercycle Networks, Inc.', + 0x00041A: u'ines GmbH', + 0x00041B: u'Digital Interfaces Ltd.', + 0x00041C: u'ipDialog, Inc.', + 0x00041D: u'Corega of America', + 0x00041E: u'Shikoku Instrumentation Co., Ltd.', + 0x00041F: u'Sony Computer Entertainment, Inc.', + 0x000420: u'Slim Devices, Inc.', + 0x000421: u'Ocular Networks', + 0x000422: u'Gordon Kapes, Inc.', + 0x000423: u'Intel Corporation', + 0x000424: u'TMC s.r.l.', + 0x000425: u'Atmel Corporation', + 0x000426: u'Autosys', + 0x000427: u'Cisco Systems, Inc.', + 0x000428: u'Cisco Systems, Inc.', + 0x000429: u'Pixord Corporation', + 0x00042A: u'Wireless Networks, Inc.', + 0x00042B: u'IT Access Co., Ltd.', + 0x00042C: u'Minet, Inc.', + 0x00042D: u'Sarian Systems, Ltd.', + 0x00042E: u'Netous Technologies, Ltd.', + 0x00042F: u'International Communications Products, Inc.', + 0x000430: u'Netgem', + 0x000431: u'GlobalStreams, Inc.', + 0x000432: u'Voyetra Turtle Beach, Inc.', + 0x000433: u'Cyberboard A/S', + 0x000434: u'Accelent Systems, Inc.', + 0x000435: u'Comptek International, Inc.', + 0x000436: u'ELANsat Technologies, Inc.', + 0x000437: u'Powin Information Technology, Inc.', + 0x000438: u'Nortel Networks', + 0x000439: u'Rosco Entertainment Technology, Inc.', + 0x00043A: u'Intelligent Telecommunications, Inc.', + 0x00043B: u'Lava Computer Mfg., Inc.', + 0x00043C: u'SONOS Co., Ltd.', + 0x00043D: u'INDEL AG', + 0x00043E: u'Telencomm', + 0x00043F: u'Electronic Systems Technology, Inc.', + 0x000440: u'cyberPIXIE, Inc.', + 0x000441: u'Half Dome Systems, Inc.', + 0x000442: u'NACT', + 0x000443: u'Agilent Technologies, Inc.', + 0x000444: u'Western Multiplex Corporation', + 0x000445: u'LMS Skalar Instruments GmbH', + 0x000446: u'CYZENTECH Co., Ltd.', + 0x000447: u'Acrowave Systems Co., Ltd.', + 0x000448: u'Polaroid Professional Imaging', + 0x000449: u'Mapletree Networks', + 0x00044A: u'iPolicy Networks, Inc.', + 0x00044B: u'NVIDIA', + 0x00044C: u'JENOPTIK', + 0x00044D: u'Cisco Systems, Inc.', + 0x00044E: u'Cisco Systems, Inc.', + 0x00044F: u'Leukhardt Systemelektronik GmbH', + 0x000450: u'DMD Computers SRL', + 0x000451: u'Medrad, Inc.', + 0x000452: u'RocketLogix, Inc.', + 0x000453: u'YottaYotta, Inc.', + 0x000454: u'Quadriga UK', + 0x000455: u'ANTARA.net', + 0x000456: u'PipingHot Networks', + 0x000457: u'Universal Access Technology, Inc.', + 0x000458: u'Fusion X Co., Ltd.', + 0x000459: u'Veristar Corporation', + 0x00045A: u'The Linksys Group, Inc.', + 0x00045B: u'Techsan Electronics Co., Ltd.', + 0x00045C: u'Mobiwave Pte Ltd', + 0x00045D: u'BEKA Elektronik', + 0x00045E: u'PolyTrax Information Technology AG', + 0x00045F: u'Evalue Technology, Inc.', + 0x000460: u'Knilink Technology, Inc.', + 0x000461: u'EPOX Computer Co., Ltd.', + 0x000462: u'DAKOS Data & Communication Co., Ltd.', + 0x000463: u'Bosch Security Systems', + 0x000464: u'Fantasma Networks, Inc.', + 0x000465: u'i.s.t isdn-support technik GmbH', + 0x000466: u'ARMITEL Co.', + 0x000467: u'Wuhan Research Institute of MII', + 0x000468: u'Vivity, Inc.', + 0x000469: u'Innocom, Inc.', + 0x00046A: u'Navini Networks', + 0x00046B: u'Palm Wireless, Inc.', + 0x00046C: u'Cyber Technology Co., Ltd.', + 0x00046D: u'Cisco Systems, Inc.', + 0x00046E: u'Cisco Systems, Inc.', + 0x00046F: u'Digitel S/A Industria Eletronica', + 0x000470: u'ipUnplugged AB', + 0x000471: u'IPrad', + 0x000472: u'Telelynx, Inc.', + 0x000473: u'Photonex Corporation', + 0x000474: u'LEGRAND', + 0x000475: u'3 Com Corporation', + 0x000476: u'3 Com Corporation', + 0x000477: u'Scalant Systems, Inc.', + 0x000478: u'G. Star Technology Corporation', + 0x000479: u'Radius Co., Ltd.', + 0x00047A: u'AXXESSIT ASA', + 0x00047B: u'Schlumberger', + 0x00047C: u'Skidata AG', + 0x00047D: u'Pelco', + 0x00047E: u'Optelecom=NKF', + 0x00047F: u'Chr. Mayr GmbH & Co. KG', + 0x000480: u'Foundry Networks, Inc.', + 0x000481: u'Econolite Control Products, Inc.', + 0x000482: u'Medialogic Corp.', + 0x000483: u'Deltron Technology, Inc.', + 0x000484: u'Amann GmbH', + 0x000485: u'PicoLight', + 0x000486: u'ITTC, University of Kansas', + 0x000487: u'Cogency Semiconductor, Inc.', + 0x000488: u'Eurotherm Controls', + 0x000489: u'YAFO Networks, Inc.', + 0x00048A: u'Temia Vertriebs GmbH', + 0x00048B: u'Poscon Corporation', + 0x00048C: u'Nayna Networks, Inc.', + 0x00048D: u'Tone Commander Systems, Inc.', + 0x00048E: u'Ohm Tech Labs, Inc.', + 0x00048F: u'TD Systems Corp.', + 0x000490: u'Optical Access', + 0x000491: u'Technovision, Inc.', + 0x000492: u'Hive Internet, Ltd.', + 0x000493: u'Tsinghua Unisplendour Co., Ltd.', + 0x000494: u'Breezecom, Ltd.', + 0x000495: u'Tejas Networks', + 0x000496: u'Extreme Networks', + 0x000497: u'MacroSystem Digital Video AG', + 0x000498: u'Mahi Networks', + 0x000499: u'Chino Corporation', + 0x00049A: u'Cisco Systems, Inc.', + 0x00049B: u'Cisco Systems, Inc.', + 0x00049C: u'Surgient Networks, Inc.', + 0x00049D: u'Ipanema Technologies', + 0x00049E: u'Wirelink Co., Ltd.', + 0x00049F: u'Freescale Semiconductor', + 0x0004A0: u'Verity Instruments, Inc.', + 0x0004A1: u'Pathway Connectivity', + 0x0004A2: u'L.S.I. Japan Co., Ltd.', + 0x0004A3: u'Microchip Technology, Inc.', + 0x0004A4: u'NetEnabled, Inc.', + 0x0004A5: u'Barco Projection Systems NV', + 0x0004A6: u'SAF Tehnika Ltd.', + 0x0004A7: u'FabiaTech Corporation', + 0x0004A8: u'Broadmax Technologies, Inc.', + 0x0004A9: u'SandStream Technologies, Inc.', + 0x0004AA: u'Jetstream Communications', + 0x0004AB: u'Comverse Network Systems, Inc.', + 0x0004AC: u'IBM CORP.', + 0x0004AD: u'Malibu Networks', + 0x0004AE: u'Liquid Metronics', + 0x0004AF: u'Digital Fountain, Inc.', + 0x0004B0: u'ELESIGN Co., Ltd.', + 0x0004B1: u'Signal Technology, Inc.', + 0x0004B2: u'ESSEGI SRL', + 0x0004B3: u'Videotek, Inc.', + 0x0004B4: u'CIAC', + 0x0004B5: u'Equitrac Corporation', + 0x0004B6: u'Stratex Networks, Inc.', + 0x0004B7: u'AMB i.t. Holding', + 0x0004B8: u'Kumahira Co., Ltd.', + 0x0004B9: u'S.I. Soubou, Inc.', + 0x0004BA: u'KDD Media Will Corporation', + 0x0004BB: u'Bardac Corporation', + 0x0004BC: u'Giantec, Inc.', + 0x0004BD: u'Motorola BCS', + 0x0004BE: u'OptXCon, Inc.', + 0x0004BF: u'VersaLogic Corp.', + 0x0004C0: u'Cisco Systems, Inc.', + 0x0004C1: u'Cisco Systems, Inc.', + 0x0004C2: u'Magnipix, Inc.', + 0x0004C3: u'CASTOR Informatique', + 0x0004C4: u'Allen & Heath Limited', + 0x0004C5: u'ASE Technologies, USA', + 0x0004C6: u'Yamaha Motor Co., Ltd.', + 0x0004C7: u'NetMount', + 0x0004C8: u'LIBA Maschinenfabrik GmbH', + 0x0004C9: u'Micro Electron Co., Ltd.', + 0x0004CA: u'FreeMs Corp.', + 0x0004CB: u'Tdsoft Communication, Ltd.', + 0x0004CC: u'Peek Traffic B.V.', + 0x0004CD: u'Informedia Research Group', + 0x0004CE: u'Patria Ailon', + 0x0004CF: u'Seagate Technology', + 0x0004D0: u'Softlink s.r.o.', + 0x0004D1: u'Drew Technologies, Inc.', + 0x0004D2: u'Adcon Telemetry GmbH', + 0x0004D3: u'Toyokeiki Co., Ltd.', + 0x0004D4: u'Proview Electronics Co., Ltd.', + 0x0004D5: u'Hitachi Communication Systems, Inc.', + 0x0004D6: u'Takagi Industrial Co., Ltd.', + 0x0004D7: u'Omitec Instrumentation Ltd.', + 0x0004D8: u'IPWireless, Inc.', + 0x0004D9: u'Titan Electronics, Inc.', + 0x0004DA: u'Relax Technology, Inc.', + 0x0004DB: u'Tellus Group Corp.', + 0x0004DC: u'Nortel Networks', + 0x0004DD: u'Cisco Systems, Inc.', + 0x0004DE: u'Cisco Systems, Inc.', + 0x0004DF: u'Teracom Telematica Ltda.', + 0x0004E0: u'Procket Networks', + 0x0004E1: u'Infinior Microsystems', + 0x0004E2: u'SMC Networks, Inc.', + 0x0004E3: u'Accton Technology Corp.', + 0x0004E4: u'Daeryung Ind., Inc.', + 0x0004E5: u'Glonet Systems, Inc.', + 0x0004E6: u'Banyan Network Private Limited', + 0x0004E7: u'Lightpointe Communications, Inc', + 0x0004E8: u'IER, Inc.', + 0x0004E9: u'Infiniswitch Corporation', + 0x0004EA: u'Hewlett-Packard Company', + 0x0004EB: u'Paxonet Communications, Inc.', + 0x0004EC: u'Memobox SA', + 0x0004ED: u'Billion Electric Co., Ltd.', + 0x0004EE: u'Lincoln Electric Company', + 0x0004EF: u'Polestar Corp.', + 0x0004F0: u'International Computers, Ltd', + 0x0004F1: u'WhereNet', + 0x0004F2: u'Polycom', + 0x0004F3: u'FS FORTH-SYSTEME GmbH', + 0x0004F4: u'Infinite Electronics Inc.', + 0x0004F5: u'SnowShore Networks, Inc.', + 0x0004F6: u'Amphus', + 0x0004F7: u'Omega Band, Inc.', + 0x0004F8: u'QUALICABLE TV Industria E Com., Ltda', + 0x0004F9: u'Xtera Communications, Inc.', + 0x0004FA: u'NBS Technologies Inc.', + 0x0004FB: u'Commtech, Inc.', + 0x0004FC: u'Stratus Computer (DE), Inc.', + 0x0004FD: u'Japan Control Engineering Co., Ltd.', + 0x0004FE: u'Pelago Networks', + 0x0004FF: u'Acronet Co., Ltd.', + 0x000500: u'Cisco Systems, Inc.', + 0x000501: u'Cisco Systems, Inc.', + 0x000502: u'APPLE COMPUTER', + 0x000503: u'ICONAG', + 0x000504: u'Naray Information & Communication Enterprise', + 0x000505: u'Systems Integration Solutions, Inc.', + 0x000506: u'Reddo Networks AB', + 0x000507: u'Fine Appliance Corp.', + 0x000508: u'Inetcam, Inc.', + 0x000509: u'AVOC Nishimura Ltd.', + 0x00050A: u'ICS Spa', + 0x00050B: u'SICOM Systems, Inc.', + 0x00050C: u'Network Photonics, Inc.', + 0x00050D: u'Midstream Technologies, Inc.', + 0x00050E: u'3ware, Inc.', + 0x00050F: u'Tanaka S/S Ltd.', + 0x000510: u'Infinite Shanghai Communication Terminals Ltd.', + 0x000511: u'Complementary Technologies Ltd', + 0x000512: u'MeshNetworks, Inc.', + 0x000513: u'VTLinx Multimedia Systems, Inc.', + 0x000514: u'KDT Systems Co., Ltd.', + 0x000515: u'Nuark Co., Ltd.', + 0x000516: u'SMART Modular Technologies', + 0x000517: u'Shellcomm, Inc.', + 0x000518: u'Jupiters Technology', + 0x000519: u'Siemens Building Technologies AG,', + 0x00051A: u'3Com Europe Ltd.', + 0x00051B: u'Magic Control Technology Corporation', + 0x00051C: u'Xnet Technology Corp.', + 0x00051D: u'Airocon, Inc.', + 0x00051E: u'Brocade Communications Systems, Inc.', + 0x00051F: u'Taijin Media Co., Ltd.', + 0x000520: u'Smartronix, Inc.', + 0x000521: u'Control Microsystems', + 0x000522: u'LEA*D Corporation, Inc.', + 0x000523: u'AVL List GmbH', + 0x000524: u'BTL System (HK) Limited', + 0x000525: u'Puretek Industrial Co., Ltd.', + 0x000526: u'IPAS GmbH', + 0x000527: u'SJ Tek Co. Ltd', + 0x000528: u'New Focus, Inc.', + 0x000529: u'Shanghai Broadan Communication Technology Co., Ltd', + 0x00052A: u'Ikegami Tsushinki Co., Ltd.', + 0x00052B: u'HORIBA, Ltd.', + 0x00052C: u'Supreme Magic Corporation', + 0x00052D: u'Zoltrix International Limited', + 0x00052E: u'Cinta Networks', + 0x00052F: u'Leviton Voice and Data', + 0x000530: u'Andiamo Systems, Inc.', + 0x000531: u'Cisco Systems, Inc.', + 0x000532: u'Cisco Systems, Inc.', + 0x000533: u'Sanera Systems, Inc.', + 0x000534: u'Northstar Engineering Ltd.', + 0x000535: u'Chip PC Ltd.', + 0x000536: u'Danam Communications, Inc.', + 0x000537: u'Nets Technology Co., Ltd.', + 0x000538: u'Merilus, Inc.', + 0x000539: u'A Brand New World in Sweden AB', + 0x00053A: u'Willowglen Services Pte Ltd', + 0x00053B: u'Harbour Networks Ltd., Co. Beijing', + 0x00053C: u'Xircom', + 0x00053D: u'Agere Systems', + 0x00053E: u'KID Systeme GmbH', + 0x00053F: u'VisionTek, Inc.', + 0x000540: u'FAST Corporation', + 0x000541: u'Advanced Systems Co., Ltd.', + 0x000542: u'Otari, Inc.', + 0x000543: u'IQ Wireless GmbH', + 0x000544: u'Valley Technologies, Inc.', + 0x000545: u'Internet Photonics', + 0x000546: u'KDDI Network & Solultions Inc.', + 0x000547: u'Starent Networks', + 0x000548: u'Disco Corporation', + 0x000549: u'Salira Optical Network Systems', + 0x00054A: u'Ario Data Networks, Inc.', + 0x00054B: u'Micro Innovation AG', + 0x00054C: u'RF Innovations Pty Ltd', + 0x00054D: u'Brans Technologies, Inc.', + 0x00054E: u'Philips Components', + 0x00054F: u'PRIVATE', + 0x000550: u'Vcomms Limited', + 0x000551: u'F & S Elektronik Systeme GmbH', + 0x000552: u'Xycotec Computer GmbH', + 0x000553: u'DVC Company, Inc.', + 0x000554: u'Rangestar Wireless', + 0x000555: u'Japan Cash Machine Co., Ltd.', + 0x000556: u'360 Systems', + 0x000557: u'Agile TV Corporation', + 0x000558: u'Synchronous, Inc.', + 0x000559: u'Intracom S.A.', + 0x00055A: u'Power Dsine Ltd.', + 0x00055B: u'Charles Industries, Ltd.', + 0x00055C: u'Kowa Company, Ltd.', + 0x00055D: u'D-Link Systems, Inc.', + 0x00055E: u'Cisco Systems, Inc.', + 0x00055F: u'Cisco Systems, Inc.', + 0x000560: u'LEADER COMM.CO., LTD', + 0x000561: u'nac Image Technology, Inc.', + 0x000562: u'Digital View Limited', + 0x000563: u'J-Works, Inc.', + 0x000564: u'Tsinghua Bitway Co., Ltd.', + 0x000565: u'Tailyn Communication Company Ltd.', + 0x000566: u'Secui.com Corporation', + 0x000567: u'Etymonic Design, Inc.', + 0x000568: u'Piltofish Networks AB', + 0x000569: u'VMWARE, Inc.', + 0x00056A: u'Heuft Systemtechnik GmbH', + 0x00056B: u'C.P. Technology Co., Ltd.', + 0x00056C: u'Hung Chang Co., Ltd.', + 0x00056D: u'Pacific Corporation', + 0x00056E: u'National Enhance Technology, Inc.', + 0x00056F: u'Innomedia Technologies Pvt. Ltd.', + 0x000570: u'Baydel Ltd.', + 0x000571: u'Seiwa Electronics Co.', + 0x000572: u'Deonet Co., Ltd.', + 0x000573: u'Cisco Systems, Inc.', + 0x000574: u'Cisco Systems, Inc.', + 0x000575: u'CDS-Electronics BV', + 0x000576: u'NSM Technology Ltd.', + 0x000577: u'SM Information & Communication', + 0x000578: u'PRIVATE', + 0x000579: u'Universal Control Solution Corp.', + 0x00057A: u'Hatteras Networks', + 0x00057B: u'Chung Nam Electronic Co., Ltd.', + 0x00057C: u'RCO Security AB', + 0x00057D: u'Sun Communications, Inc.', + 0x00057E: u'Eckelmann Steuerungstechnik GmbH', + 0x00057F: u'Acqis Technology', + 0x000580: u'Fibrolan Ltd.', + 0x000581: u'Snell & Wilcox Ltd.', + 0x000582: u'ClearCube Technology', + 0x000583: u'ImageCom Limited', + 0x000584: u'AbsoluteValue Systems, Inc.', + 0x000585: u'Juniper Networks, Inc.', + 0x000586: u'Lucent Technologies', + 0x000587: u'Locus, Incorporated', + 0x000588: u'Sensoria Corp.', + 0x000589: u'National Datacomputer', + 0x00058A: u'Netcom Co., Ltd.', + 0x00058B: u'IPmental, Inc.', + 0x00058C: u'Opentech Inc.', + 0x00058D: u'Lynx Photonic Networks, Inc.', + 0x00058E: u'Flextronics International GmbH & Co. Nfg. KG', + 0x00058F: u'CLCsoft co.', + 0x000590: u'Swissvoice Ltd.', + 0x000591: u'Active Silicon Ltd.', + 0x000592: u'Pultek Corp.', + 0x000593: u'Grammar Engine Inc.', + 0x000594: u'IXXAT Automation GmbH', + 0x000595: u'Alesis Corporation', + 0x000596: u'Genotech Co., Ltd.', + 0x000597: u'Eagle Traffic Control Systems', + 0x000598: u'CRONOS S.r.l.', + 0x000599: u'DRS Test and Energy Management or DRS-TEM', + 0x00059A: u'Cisco Systems, Inc.', + 0x00059B: u'Cisco Systems, Inc.', + 0x00059C: u'Kleinknecht GmbH, Ing. Buero', + 0x00059D: u'Daniel Computing Systems, Inc.', + 0x00059E: u'Zinwell Corporation', + 0x00059F: u'Yotta Networks, Inc.', + 0x0005A0: u'MOBILINE Kft.', + 0x0005A1: u'Zenocom', + 0x0005A2: u'CELOX Networks', + 0x0005A3: u'QEI, Inc.', + 0x0005A4: u'Lucid Voice Ltd.', + 0x0005A5: u'KOTT', + 0x0005A6: u'Extron Electronics', + 0x0005A7: u'Hyperchip, Inc.', + 0x0005A8: u'WYLE ELECTRONICS', + 0x0005A9: u'Princeton Networks, Inc.', + 0x0005AA: u'Moore Industries International Inc.', + 0x0005AB: u'Cyber Fone, Inc.', + 0x0005AC: u'Northern Digital, Inc.', + 0x0005AD: u'Topspin Communications, Inc.', + 0x0005AE: u'Mediaport USA', + 0x0005AF: u'InnoScan Computing A/S', + 0x0005B0: u'Korea Computer Technology Co., Ltd.', + 0x0005B1: u'ASB Technology BV', + 0x0005B2: u'Medison Co., Ltd.', + 0x0005B3: u'Asahi-Engineering Co., Ltd.', + 0x0005B4: u'Aceex Corporation', + 0x0005B5: u'Broadcom Technologies', + 0x0005B6: u'INSYS Microelectronics GmbH', + 0x0005B7: u'Arbor Technology Corp.', + 0x0005B8: u'Electronic Design Associates, Inc.', + 0x0005B9: u'Airvana, Inc.', + 0x0005BA: u'Area Netwoeks, Inc.', + 0x0005BB: u'Myspace AB', + 0x0005BC: u'Resorsys Ltd.', + 0x0005BD: u'ROAX BV', + 0x0005BE: u'Kongsberg Seatex AS', + 0x0005BF: u'JustEzy Technology, Inc.', + 0x0005C0: u'Digital Network Alacarte Co., Ltd.', + 0x0005C1: u'A-Kyung Motion, Inc.', + 0x0005C2: u'Soronti, Inc.', + 0x0005C3: u'Pacific Instruments, Inc.', + 0x0005C4: u'Telect, Inc.', + 0x0005C5: u'Flaga HF', + 0x0005C6: u'Triz Communications', + 0x0005C7: u'I/F-COM A/S', + 0x0005C8: u'VERYTECH', + 0x0005C9: u'LG Innotek', + 0x0005CA: u'Hitron Technology, Inc.', + 0x0005CB: u'ROIS Technologies, Inc.', + 0x0005CC: u'Sumtel Communications, Inc.', + 0x0005CD: u'Denon, Ltd.', + 0x0005CE: u'Prolink Microsystems Corporation', + 0x0005CF: u'Thunder River Technologies, Inc.', + 0x0005D0: u'Solinet Systems', + 0x0005D1: u'Metavector Technologies', + 0x0005D2: u'DAP Technologies', + 0x0005D3: u'eProduction Solutions, Inc.', + 0x0005D4: u'FutureSmart Networks, Inc.', + 0x0005D5: u'Speedcom Wireless', + 0x0005D6: u'Titan Wireless', + 0x0005D7: u'Vista Imaging, Inc.', + 0x0005D8: u'Arescom, Inc.', + 0x0005D9: u'Techno Valley, Inc.', + 0x0005DA: u'Apex Automationstechnik', + 0x0005DB: u'Nentec GmbH', + 0x0005DC: u'Cisco Systems, Inc.', + 0x0005DD: u'Cisco Systems, Inc.', + 0x0005DE: u'Gi Fone Korea, Inc.', + 0x0005DF: u'Electronic Innovation, Inc.', + 0x0005E0: u'Empirix Corp.', + 0x0005E1: u'Trellis Photonics, Ltd.', + 0x0005E2: u'Creativ Network Technologies', + 0x0005E3: u'LightSand Communications, Inc.', + 0x0005E4: u'Red Lion Controls L.P.', + 0x0005E5: u'Renishaw PLC', + 0x0005E6: u'Egenera, Inc.', + 0x0005E7: u'Netrake Corp.', + 0x0005E8: u'TurboWave, Inc.', + 0x0005E9: u'Unicess Network, Inc.', + 0x0005EA: u'Rednix', + 0x0005EB: u'Blue Ridge Networks, Inc.', + 0x0005EC: u'Mosaic Systems Inc.', + 0x0005ED: u'Technikum Joanneum GmbH', + 0x0005EE: u'BEWATOR Group', + 0x0005EF: u'ADOIR Digital Technology', + 0x0005F0: u'SATEC', + 0x0005F1: u'Vrcom, Inc.', + 0x0005F2: u'Power R, Inc.', + 0x0005F3: u'Weboyn', + 0x0005F4: u'System Base Co., Ltd.', + 0x0005F5: u'OYO Geospace Corp.', + 0x0005F6: u'Young Chang Co. Ltd.', + 0x0005F7: u'Analog Devices, Inc.', + 0x0005F8: u'Real Time Access, Inc.', + 0x0005F9: u'TOA Corporation', + 0x0005FA: u'IPOptical, Inc.', + 0x0005FB: u'ShareGate, Inc.', + 0x0005FC: u'Schenck Pegasus Corp.', + 0x0005FD: u'PacketLight Networks Ltd.', + 0x0005FE: u'Traficon N.V.', + 0x0005FF: u'SNS Solutions, Inc.', + 0x000600: u'Toshiba Teli Corporation', + 0x000601: u'Otanikeiki Co., Ltd.', + 0x000602: u'Cirkitech Electronics Co.', + 0x000603: u'Baker Hughes Inc.', + 0x000604: u'@Track Communications, Inc.', + 0x000605: u'Inncom International, Inc.', + 0x000606: u'RapidWAN, Inc.', + 0x000607: u'Omni Directional Control Technology Inc.', + 0x000608: u'At-Sky SAS', + 0x000609: u'Crossport Systems', + 0x00060A: u'Blue2space', + 0x00060B: u'Paceline Systems Corporation', + 0x00060C: u'Melco Industries, Inc.', + 0x00060D: u'Wave7 Optics', + 0x00060E: u'IGYS Systems, Inc.', + 0x00060F: u'Narad Networks Inc', + 0x000610: u'Abeona Networks Inc', + 0x000611: u'Zeus Wireless, Inc.', + 0x000612: u'Accusys, Inc.', + 0x000613: u'Kawasaki Microelectronics Incorporated', + 0x000614: u'Prism Holdings', + 0x000615: u'Kimoto Electric Co., Ltd.', + 0x000616: u'Tel Net Co., Ltd.', + 0x000617: u'Redswitch Inc.', + 0x000618: u'DigiPower Manufacturing Inc.', + 0x000619: u'Connection Technology Systems', + 0x00061A: u'Zetari Inc.', + 0x00061B: u'Portable Systems, IBM Japan Co, Ltd', + 0x00061C: u'Hoshino Metal Industries, Ltd.', + 0x00061D: u'MIP Telecom, Inc.', + 0x00061E: u'Maxan Systems', + 0x00061F: u'Vision Components GmbH', + 0x000620: u'Serial System Ltd.', + 0x000621: u'Hinox, Co., Ltd.', + 0x000622: u'Chung Fu Chen Yeh Enterprise Corp.', + 0x000623: u'MGE UPS Systems France', + 0x000624: u'Gentner Communications Corp.', + 0x000625: u'The Linksys Group, Inc.', + 0x000626: u'MWE GmbH', + 0x000627: u'Uniwide Technologies, Inc.', + 0x000628: u'Cisco Systems, Inc.', + 0x000629: u'IBM CORPORATION', + 0x00062A: u'Cisco Systems, Inc.', + 0x00062B: u'INTRASERVER TECHNOLOGY', + 0x00062C: u'Network Robots, Inc.', + 0x00062D: u'TouchStar Technologies, L.L.C.', + 0x00062E: u'Aristos Logic Corp.', + 0x00062F: u'Pivotech Systems Inc.', + 0x000630: u'Adtranz Sweden', + 0x000631: u'Optical Solutions, Inc.', + 0x000632: u'Mesco Engineering GmbH', + 0x000633: u'Smiths Heimann Biometric Systems', + 0x000634: u'GTE Airfone Inc.', + 0x000635: u'PacketAir Networks, Inc.', + 0x000636: u'Jedai Broadband Networks', + 0x000637: u'Toptrend-Meta Information (ShenZhen) Inc.', + 0x000638: u'Sungjin C&C Co., Ltd.', + 0x000639: u'Newtec', + 0x00063A: u'Dura Micro, Inc.', + 0x00063B: u'Arcturus Networks, Inc.', + 0x00063C: u'NMI Electronics Ltd', + 0x00063D: u'Microwave Data Systems Inc.', + 0x00063E: u'Opthos Inc.', + 0x00063F: u'Everex Communications Inc.', + 0x000640: u'White Rock Networks', + 0x000641: u'ITCN', + 0x000642: u'Genetel Systems Inc.', + 0x000643: u'SONO Computer Co., Ltd.', + 0x000644: u'NEIX Inc.', + 0x000645: u'Meisei Electric Co. Ltd.', + 0x000646: u'ShenZhen XunBao Network Technology Co Ltd', + 0x000647: u'Etrali S.A.', + 0x000648: u'Seedsware, Inc.', + 0x000649: u'Quante', + 0x00064A: u'Honeywell Co., Ltd. (KOREA)', + 0x00064B: u'Alexon Co., Ltd.', + 0x00064C: u'Invicta Networks, Inc.', + 0x00064D: u'Sencore', + 0x00064E: u'Broad Net Technology Inc.', + 0x00064F: u'PRO-NETS Technology Corporation', + 0x000650: u'Tiburon Networks, Inc.', + 0x000651: u'Aspen Networks Inc.', + 0x000652: u'Cisco Systems, Inc.', + 0x000653: u'Cisco Systems, Inc.', + 0x000654: u'Maxxio Technologies', + 0x000655: u'Yipee, Inc.', + 0x000656: u'Tactel AB', + 0x000657: u'Market Central, Inc.', + 0x000658: u'Helmut Fischer GmbH & Co. KG', + 0x000659: u'EAL (Apeldoorn) B.V.', + 0x00065A: u'Strix Systems', + 0x00065B: u'Dell Computer Corp.', + 0x00065C: u'Malachite Technologies, Inc.', + 0x00065D: u'Heidelberg Web Systems', + 0x00065E: u'Photuris, Inc.', + 0x00065F: u'ECI Telecom - NGTS Ltd.', + 0x000660: u'NADEX Co., Ltd.', + 0x000661: u'NIA Home Technologies Corp.', + 0x000662: u'MBM Technology Ltd.', + 0x000663: u'Human Technology Co., Ltd.', + 0x000664: u'Fostex Corporation', + 0x000665: u'Sunny Giken, Inc.', + 0x000666: u'Roving Networks', + 0x000667: u'Tripp Lite', + 0x000668: u'Vicon Industries Inc.', + 0x000669: u'Datasound Laboratories Ltd', + 0x00066A: u'InfiniCon Systems, Inc.', + 0x00066B: u'Sysmex Corporation', + 0x00066C: u'Robinson Corporation', + 0x00066D: u'Compuprint S.P.A.', + 0x00066E: u'Delta Electronics, Inc.', + 0x00066F: u'Korea Data Systems', + 0x000670: u'Upponetti Oy', + 0x000671: u'Softing AG', + 0x000672: u'Netezza', + 0x000673: u'Optelecom-nkf', + 0x000674: u'Spectrum Control, Inc.', + 0x000675: u'Banderacom, Inc.', + 0x000676: u'Novra Technologies Inc.', + 0x000677: u'SICK AG', + 0x000678: u'Marantz Japan, Inc.', + 0x000679: u'Konami Corporation', + 0x00067A: u'JMP Systems', + 0x00067B: u'Toplink C&C Corporation', + 0x00067C: u'CISCO SYSTEMS, INC.', + 0x00067D: u'Takasago Ltd.', + 0x00067E: u'WinCom Systems, Inc.', + 0x00067F: u'Rearden Steel Technologies', + 0x000680: u'Card Access, Inc.', + 0x000681: u'Goepel Electronic GmbH', + 0x000682: u'Convedia', + 0x000683: u'Bravara Communications, Inc.', + 0x000684: u'Biacore AB', + 0x000685: u'NetNearU Corporation', + 0x000686: u'ZARDCOM Co., Ltd.', + 0x000687: u'Omnitron Systems Technology, Inc.', + 0x000688: u'Telways Communication Co., Ltd.', + 0x000689: u'yLez Technologies Pte Ltd', + 0x00068A: u'NeuronNet Co. Ltd. R&D Center', + 0x00068B: u'AirRunner Technologies, Inc.', + 0x00068C: u'3Com Corporation', + 0x00068D: u'SEPATON, Inc.', + 0x00068E: u'HID Corporation', + 0x00068F: u'Telemonitor, Inc.', + 0x000690: u'Euracom Communication GmbH', + 0x000691: u'PT Inovacao', + 0x000692: u'Intruvert Networks, Inc.', + 0x000693: u'Flexus Computer Technology, Inc.', + 0x000694: u'Mobillian Corporation', + 0x000695: u'Ensure Technologies, Inc.', + 0x000696: u'Advent Networks', + 0x000697: u'R & D Center', + 0x000698: u'egnite Software GmbH', + 0x000699: u'Vida Design Co.', + 0x00069A: u'e & Tel', + 0x00069B: u'AVT Audio Video Technologies GmbH', + 0x00069C: u'Transmode Systems AB', + 0x00069D: u'Petards Mobile Intelligence', + 0x00069E: u'UNIQA, Inc.', + 0x00069F: u'Kuokoa Networks', + 0x0006A0: u'Mx Imaging', + 0x0006A1: u'Celsian Technologies, Inc.', + 0x0006A2: u'Microtune, Inc.', + 0x0006A3: u'Bitran Corporation', + 0x0006A4: u'INNOWELL Corp.', + 0x0006A5: u'PINON Corp.', + 0x0006A6: u'Artistic Licence (UK) Ltd', + 0x0006A7: u'Primarion', + 0x0006A8: u'KC Technology, Inc.', + 0x0006A9: u'Universal Instruments Corp.', + 0x0006AA: u'Miltope Corporation', + 0x0006AB: u'W-Link Systems, Inc.', + 0x0006AC: u'Intersoft Co.', + 0x0006AD: u'KB Electronics Ltd.', + 0x0006AE: u'Himachal Futuristic Communications Ltd', + 0x0006AF: u'PRIVATE', + 0x0006B0: u'Comtech EF Data Corp.', + 0x0006B1: u'Sonicwall', + 0x0006B2: u'Linxtek Co.', + 0x0006B3: u'Diagraph Corporation', + 0x0006B4: u'Vorne Industries, Inc.', + 0x0006B5: u'Luminent, Inc.', + 0x0006B6: u'Nir-Or Israel Ltd.', + 0x0006B7: u'TELEM GmbH', + 0x0006B8: u'Bandspeed Pty Ltd', + 0x0006B9: u'A5TEK Corp.', + 0x0006BA: u'Westwave Communications', + 0x0006BB: u'ATI Technologies Inc.', + 0x0006BC: u'Macrolink, Inc.', + 0x0006BD: u'BNTECHNOLOGY Co., Ltd.', + 0x0006BE: u'Baumer Optronic GmbH', + 0x0006BF: u'Accella Technologies Co., Ltd.', + 0x0006C0: u'United Internetworks, Inc.', + 0x0006C1: u'CISCO SYSTEMS, INC.', + 0x0006C2: u'Smartmatic Corporation', + 0x0006C3: u'Schindler Elevators Ltd.', + 0x0006C4: u'Piolink Inc.', + 0x0006C5: u'INNOVI Technologies Limited', + 0x0006C6: u'lesswire AG', + 0x0006C7: u'RFNET Technologies Pte Ltd (S)', + 0x0006C8: u'Sumitomo Metal Micro Devices, Inc.', + 0x0006C9: u'Technical Marketing Research, Inc.', + 0x0006CA: u'American Computer & Digital Components, Inc. (ACDC)', + 0x0006CB: u'Jotron Electronics A/S', + 0x0006CC: u'JMI Electronics Co., Ltd.', + 0x0006CD: u'Kodak IL Ltd.', + 0x0006CE: u'DATENO', + 0x0006CF: u'Thales Avionics In-Flight Systems, LLC', + 0x0006D0: u'Elgar Electronics Corp.', + 0x0006D1: u'Tahoe Networks, Inc.', + 0x0006D2: u'Tundra Semiconductor Corp.', + 0x0006D3: u'Alpha Telecom, Inc. U.S.A.', + 0x0006D4: u'Interactive Objects, Inc.', + 0x0006D5: u'Diamond Systems Corp.', + 0x0006D6: u'Cisco Systems, Inc.', + 0x0006D7: u'Cisco Systems, Inc.', + 0x0006D8: u'Maple Optical Systems', + 0x0006D9: u'IPM-Net S.p.A.', + 0x0006DA: u'ITRAN Communications Ltd.', + 0x0006DB: u'ICHIPS Co., Ltd.', + 0x0006DC: u'Syabas Technology (Amquest)', + 0x0006DD: u'AT & T Laboratories - Cambridge Ltd', + 0x0006DE: u'Flash Technology', + 0x0006DF: u'AIDONIC Corporation', + 0x0006E0: u'MAT Co., Ltd.', + 0x0006E1: u'Techno Trade s.a', + 0x0006E2: u'Ceemax Technology Co., Ltd.', + 0x0006E3: u'Quantitative Imaging Corporation', + 0x0006E4: u'Citel Technologies Ltd.', + 0x0006E5: u'Fujian Newland Computer Ltd. Co.', + 0x0006E6: u'DongYang Telecom Co., Ltd.', + 0x0006E7: u'Bit Blitz Communications Inc.', + 0x0006E8: u'Optical Network Testing, Inc.', + 0x0006E9: u'Intime Corp.', + 0x0006EA: u'ELZET80 Mikrocomputer GmbH&Co. KG', + 0x0006EB: u'Global Data', + 0x0006EC: u'M/A COM Private Radio System Inc.', + 0x0006ED: u'Inara Networks', + 0x0006EE: u'Shenyang Neu-era Information & Technology Stock Co., Ltd', + 0x0006EF: u'Maxxan Systems, Inc.', + 0x0006F0: u'Digeo, Inc.', + 0x0006F1: u'Optillion', + 0x0006F2: u'Platys Communications', + 0x0006F3: u'AcceLight Networks', + 0x0006F4: u'Prime Electronics & Satellitics Inc.', + 0x0006F8: u'CPU Technology, Inc.', + 0x0006F9: u'Mitsui Zosen Systems Research Inc.', + 0x0006FA: u'IP SQUARE Co, Ltd.', + 0x0006FB: u'Hitachi Printing Solutions, Ltd.', + 0x0006FC: u'Fnet Co., Ltd.', + 0x0006FD: u'Comjet Information Systems Corp.', + 0x0006FE: u'Celion Networks, Inc.', + 0x0006FF: u'Sheba Systems Co., Ltd.', + 0x000700: u'Zettamedia Korea', + 0x000701: u'RACAL-DATACOM', + 0x000702: u'Varian Medical Systems', + 0x000703: u'CSEE Transport', + 0x000705: u'Endress & Hauser GmbH & Co', + 0x000706: u'Sanritz Corporation', + 0x000707: u'Interalia Inc.', + 0x000708: u'Bitrage Inc.', + 0x000709: u'Westerstrand Urfabrik AB', + 0x00070A: u'Unicom Automation Co., Ltd.', + 0x00070B: u'Octal, SA', + 0x00070C: u'SVA-Intrusion.com Co. Ltd.', + 0x00070D: u'Cisco Systems Inc.', + 0x00070E: u'Cisco Systems Inc.', + 0x00070F: u'Fujant, Inc.', + 0x000710: u'Adax, Inc.', + 0x000711: u'Acterna', + 0x000712: u'JAL Information Technology', + 0x000713: u'IP One, Inc.', + 0x000714: u'Brightcom', + 0x000715: u'General Research of Electronics, Inc.', + 0x000716: u'J & S Marine Ltd.', + 0x000717: u'Wieland Electric GmbH', + 0x000718: u'iCanTek Co., Ltd.', + 0x000719: u'Mobiis Co., Ltd.', + 0x00071A: u'Finedigital Inc.', + 0x00071B: u'Position Technology Inc.', + 0x00071C: u'AT&T Fixed Wireless Services', + 0x00071D: u'Satelsa Sistemas Y Aplicaciones De Telecomunicaciones, S.A.', + 0x00071E: u'Tri-M Engineering / Nupak Dev. Corp.', + 0x00071F: u'European Systems Integration', + 0x000720: u'Trutzschler GmbH & Co. KG', + 0x000721: u'Formac Elektronik GmbH', + 0x000722: u'Nielsen Media Research', + 0x000723: u'ELCON Systemtechnik GmbH', + 0x000724: u'Telemax Co., Ltd.', + 0x000725: u'Bematech International Corp.', + 0x000727: u'Zi Corporation (HK) Ltd.', + 0x000728: u'Neo Telecom', + 0x000729: u'Kistler Instrumente AG', + 0x00072A: u'Innovance Networks', + 0x00072B: u'Jung Myung Telecom Co., Ltd.', + 0x00072C: u'Fabricom', + 0x00072D: u'CNSystems', + 0x00072E: u'North Node AB', + 0x00072F: u'Intransa, Inc.', + 0x000730: u'Hutchison OPTEL Telecom Technology Co., Ltd.', + 0x000731: u'Spiricon, Inc.', + 0x000732: u'AAEON Technology Inc.', + 0x000733: u'DANCONTROL Engineering', + 0x000734: u'ONStor, Inc.', + 0x000735: u'Flarion Technologies, Inc.', + 0x000736: u'Data Video Technologies Co., Ltd.', + 0x000737: u'Soriya Co. Ltd.', + 0x000738: u'Young Technology Co., Ltd.', + 0x000739: u'Motion Media Technology Ltd.', + 0x00073A: u'Inventel Systemes', + 0x00073B: u'Tenovis GmbH & Co KG', + 0x00073C: u'Telecom Design', + 0x00073D: u'Nanjing Postel Telecommunications Co., Ltd.', + 0x00073E: u'China Great-Wall Computer Shenzhen Co., Ltd.', + 0x00073F: u'Woojyun Systec Co., Ltd.', + 0x000740: u'Melco Inc.', + 0x000741: u'Sierra Automated Systems', + 0x000742: u'Current Technologies', + 0x000743: u'Chelsio Communications', + 0x000744: u'Unico, Inc.', + 0x000745: u'Radlan Computer Communications Ltd.', + 0x000746: u'TURCK, Inc.', + 0x000747: u'Mecalc', + 0x000748: u'The Imaging Source Europe', + 0x000749: u'CENiX Inc.', + 0x00074A: u'Carl Valentin GmbH', + 0x00074B: u'Daihen Corporation', + 0x00074C: u'Beicom Inc.', + 0x00074D: u'Zebra Technologies Corp.', + 0x00074E: u'Naughty boy co., Ltd.', + 0x00074F: u'Cisco Systems, Inc.', + 0x000750: u'Cisco Systems, Inc.', + 0x000751: u'm.u.t. - GmbH', + 0x000752: u'Rhythm Watch Co., Ltd.', + 0x000753: u'Beijing Qxcomm Technology Co., Ltd.', + 0x000754: u'Xyterra Computing, Inc.', + 0x000755: u'Lafon SA', + 0x000756: u'Juyoung Telecom', + 0x000757: u'Topcall International AG', + 0x000758: u'Dragonwave', + 0x000759: u'Boris Manufacturing Corp.', + 0x00075A: u'Air Products and Chemicals, Inc.', + 0x00075B: u'Gibson Guitars', + 0x00075C: u'Eastman Kodak Company', + 0x00075D: u'Celleritas Inc.', + 0x00075E: u'Ametek Power Instruments', + 0x00075F: u'VCS Video Communication Systems AG', + 0x000760: u'TOMIS Information & Telecom Corp.', + 0x000761: u'Logitech SA', + 0x000762: u'Group Sense Limited', + 0x000763: u'Sunniwell Cyber Tech. Co., Ltd.', + 0x000764: u'YoungWoo Telecom Co. Ltd.', + 0x000765: u'Jade Quantum Technologies, Inc.', + 0x000766: u'Chou Chin Industrial Co., Ltd.', + 0x000767: u'Yuxing Electronics Company Limited', + 0x000768: u'Danfoss A/S', + 0x000769: u'Italiana Macchi SpA', + 0x00076A: u'NEXTEYE Co., Ltd.', + 0x00076B: u'Stralfors AB', + 0x00076C: u'Daehanet, Inc.', + 0x00076D: u'Flexlight Networks', + 0x00076E: u'Sinetica Corporation Limited', + 0x00076F: u'Synoptics Limited', + 0x000770: u'Locusnetworks Corporation', + 0x000771: u'Embedded System Corporation', + 0x000772: u'Alcatel Shanghai Bell Co., Ltd.', + 0x000773: u'Ascom Powerline Communications Ltd.', + 0x000774: u'GuangZhou Thinker Technology Co. Ltd.', + 0x000775: u'Valence Semiconductor, Inc.', + 0x000776: u'Federal APD', + 0x000777: u'Motah Ltd.', + 0x000778: u'GERSTEL GmbH & Co. KG', + 0x000779: u'Sungil Telecom Co., Ltd.', + 0x00077A: u'Infoware System Co., Ltd.', + 0x00077B: u'Millimetrix Broadband Networks', + 0x00077C: u'OnTime Networks', + 0x00077E: u'Elrest GmbH', + 0x00077F: u'J Communications Co., Ltd.', + 0x000780: u'Bluegiga Technologies OY', + 0x000781: u'Itron Inc.', + 0x000782: u'Nauticus Networks, Inc.', + 0x000783: u'SynCom Network, Inc.', + 0x000784: u'Cisco Systems Inc.', + 0x000785: u'Cisco Systems Inc.', + 0x000786: u'Wireless Networks Inc.', + 0x000787: u'Idea System Co., Ltd.', + 0x000788: u'Clipcomm, Inc.', + 0x000789: u'Eastel Systems Corporation', + 0x00078A: u'Mentor Data System Inc.', + 0x00078B: u'Wegener Communications, Inc.', + 0x00078C: u'Elektronikspecialisten i Borlange AB', + 0x00078D: u'NetEngines Ltd.', + 0x00078E: u'Garz & Friche GmbH', + 0x00078F: u'Emkay Innovative Products', + 0x000790: u'Tri-M Technologies (s) Limited', + 0x000791: u'International Data Communications, Inc.', + 0x000792: u'Suetron Electronic GmbH', + 0x000793: u'Shin Satellite Public Company Limited', + 0x000794: u'Simple Devices, Inc.', + 0x000795: u'Elitegroup Computer System Co. (ECS)', + 0x000796: u'LSI Systems, Inc.', + 0x000797: u'Netpower Co., Ltd.', + 0x000798: u'Selea SRL', + 0x000799: u'Tipping Point Technologies, Inc.', + 0x00079A: u'SmartSight Networks Inc.', + 0x00079B: u'Aurora Networks', + 0x00079C: u'Golden Electronics Technology Co., Ltd.', + 0x00079D: u'Musashi Co., Ltd.', + 0x00079E: u'Ilinx Co., Ltd.', + 0x00079F: u'Action Digital Inc.', + 0x0007A0: u'e-Watch Inc.', + 0x0007A1: u'VIASYS Healthcare GmbH', + 0x0007A2: u'Opteon Corporation', + 0x0007A3: u'Ositis Software, Inc.', + 0x0007A4: u'GN Netcom Ltd.', + 0x0007A5: u'Y.D.K Co. Ltd.', + 0x0007A6: u'Home Automation, Inc.', + 0x0007A7: u'A-Z Inc.', + 0x0007A8: u'Haier Group Technologies Ltd.', + 0x0007A9: u'Novasonics', + 0x0007AA: u'Quantum Data Inc.', + 0x0007AC: u'Eolring', + 0x0007AD: u'Pentacon GmbH Foto-und Feinwerktechnik', + 0x0007AE: u'Britestream Networks, Inc.', + 0x0007AF: u'N-Tron Corp.', + 0x0007B0: u'Office Details, Inc.', + 0x0007B1: u'Equator Technologies', + 0x0007B2: u'Transaccess S.A.', + 0x0007B3: u'Cisco Systems Inc.', + 0x0007B4: u'Cisco Systems Inc.', + 0x0007B5: u'Any One Wireless Ltd.', + 0x0007B6: u'Telecom Technology Ltd.', + 0x0007B7: u'Samurai Ind. Prods Eletronicos Ltda', + 0x0007B8: u'American Predator Corp.', + 0x0007B9: u'Ginganet Corporation', + 0x0007BA: u'UTStarcom, Inc.', + 0x0007BB: u'Candera Inc.', + 0x0007BC: u'Identix Inc.', + 0x0007BD: u'Radionet Ltd.', + 0x0007BE: u'DataLogic SpA', + 0x0007BF: u'Armillaire Technologies, Inc.', + 0x0007C0: u'NetZerver Inc.', + 0x0007C1: u'Overture Networks, Inc.', + 0x0007C2: u'Netsys Telecom', + 0x0007C3: u'Cirpack', + 0x0007C4: u'JEAN Co. Ltd.', + 0x0007C5: u'Gcom, Inc.', + 0x0007C6: u'VDS Vosskuhler GmbH', + 0x0007C7: u'Synectics Systems Limited', + 0x0007C8: u'Brain21, Inc.', + 0x0007C9: u'Technol Seven Co., Ltd.', + 0x0007CA: u'Creatix Polymedia Ges Fur Kommunikaitonssysteme', + 0x0007CB: u'Freebox SA', + 0x0007CC: u'Kaba Benzing GmbH', + 0x0007CD: u'NMTEL Co., Ltd.', + 0x0007CE: u'Cabletime Limited', + 0x0007CF: u'Anoto AB', + 0x0007D0: u'Automat Engenharia de Automaoa Ltda.', + 0x0007D1: u'Spectrum Signal Processing Inc.', + 0x0007D2: u'Logopak Systeme', + 0x0007D3: u'Stork Digital Imaging B.V.', + 0x0007D4: u'Zhejiang Yutong Network Communication Co Ltd.', + 0x0007D5: u'3e Technologies Int;., Inc.', + 0x0007D6: u'Commil Ltd.', + 0x0007D7: u'Caporis Networks AG', + 0x0007D8: u'Hitron Systems Inc.', + 0x0007D9: u'Splicecom', + 0x0007DA: u'Neuro Telecom Co., Ltd.', + 0x0007DB: u'Kirana Networks, Inc.', + 0x0007DC: u'Atek Co, Ltd.', + 0x0007DD: u'Cradle Technologies', + 0x0007DE: u'eCopilt AB', + 0x0007DF: u'Vbrick Systems Inc.', + 0x0007E0: u'Palm Inc.', + 0x0007E1: u'WIS Communications Co. Ltd.', + 0x0007E2: u'Bitworks, Inc.', + 0x0007E3: u'Navcom Technology, Inc.', + 0x0007E4: u'SoftRadio Co., Ltd.', + 0x0007E5: u'Coup Corporation', + 0x0007E6: u'edgeflow Canada Inc.', + 0x0007E7: u'FreeWave Technologies', + 0x0007E8: u'St. Bernard Software', + 0x0007E9: u'Intel Corporation', + 0x0007EA: u'Massana, Inc.', + 0x0007EB: u'Cisco Systems Inc.', + 0x0007EC: u'Cisco Systems Inc.', + 0x0007ED: u'Altera Corporation', + 0x0007EE: u'telco Informationssysteme GmbH', + 0x0007EF: u'Lockheed Martin Tactical Systems', + 0x0007F0: u'LogiSync Corporation', + 0x0007F1: u'TeraBurst Networks Inc.', + 0x0007F2: u'IOA Corporation', + 0x0007F3: u'Thinkengine Networks', + 0x0007F4: u'Eletex Co., Ltd.', + 0x0007F5: u'Bridgeco Co AG', + 0x0007F6: u'Qqest Software Systems', + 0x0007F7: u'Galtronics', + 0x0007F8: u'ITDevices, Inc.', + 0x0007F9: u'Phonetics, Inc.', + 0x0007FA: u'ITT Co., Ltd.', + 0x0007FB: u'Giga Stream UMTS Technologies GmbH', + 0x0007FC: u'Adept Systems Inc.', + 0x0007FD: u'LANergy Ltd.', + 0x0007FE: u'Rigaku Corporation', + 0x0007FF: u'Gluon Networks', + 0x000800: u'MULTITECH SYSTEMS, INC.', + 0x000801: u'HighSpeed Surfing Inc.', + 0x000802: u'Compaq Computer Corporation', + 0x000803: u'Cos Tron', + 0x000804: u'ICA Inc.', + 0x000805: u'Techno-Holon Corporation', + 0x000806: u'Raonet Systems, Inc.', + 0x000807: u'Access Devices Limited', + 0x000808: u'PPT Vision, Inc.', + 0x000809: u'Systemonic AG', + 0x00080A: u'Espera-Werke GmbH', + 0x00080B: u'Birka BPA Informationssystem AB', + 0x00080C: u'VDA elettronica SrL', + 0x00080D: u'Toshiba', + 0x00080E: u'Motorola, BCS', + 0x00080F: u'Proximion Fiber Optics AB', + 0x000810: u'Key Technology, Inc.', + 0x000811: u'VOIX Corporation', + 0x000812: u'GM-2 Corporation', + 0x000813: u'Diskbank, Inc.', + 0x000814: u'TIL Technologies', + 0x000815: u'CATS Co., Ltd.', + 0x000816: u'Bluetags A/S', + 0x000817: u'EmergeCore Networks LLC', + 0x000818: u'Pixelworks, Inc.', + 0x000819: u'Banksys', + 0x00081A: u'Sanrad Intelligence Storage Communications (2000) Ltd.', + 0x00081B: u'Windigo Systems', + 0x00081C: u'@pos.com', + 0x00081D: u'Ipsil, Incorporated', + 0x00081E: u'Repeatit AB', + 0x00081F: u'Pou Yuen Tech Corp. Ltd.', + 0x000820: u'Cisco Systems Inc.', + 0x000821: u'Cisco Systems Inc.', + 0x000822: u'InPro Comm', + 0x000823: u'Texa Corp.', + 0x000824: u'Promatek Industries Ltd.', + 0x000825: u'Acme Packet', + 0x000826: u'Colorado Med Tech', + 0x000827: u'Pirelli Broadband Solutions', + 0x000828: u'Koei Engineering Ltd.', + 0x000829: u'Aval Nagasaki Corporation', + 0x00082A: u'Powerwallz Network Security', + 0x00082B: u'Wooksung Electronics, Inc.', + 0x00082C: u'Homag AG', + 0x00082D: u'Indus Teqsite Private Limited', + 0x00082E: u'Multitone Electronics PLC', + 0x00084E: u'DivergeNet, Inc.', + 0x00084F: u'Qualstar Corporation', + 0x000850: u'Arizona Instrument Corp.', + 0x000851: u'Canadian Bank Note Company, Ltd.', + 0x000852: u'Davolink Co. Inc.', + 0x000853: u'Schleicher GmbH & Co. Relaiswerke KG', + 0x000854: u'Netronix, Inc.', + 0x000855: u'NASA-Goddard Space Flight Center', + 0x000856: u'Gamatronic Electronic Industries Ltd.', + 0x000857: u'Polaris Networks, Inc.', + 0x000858: u'Novatechnology Inc.', + 0x000859: u'ShenZhen Unitone Electronics Co., Ltd.', + 0x00085A: u'IntiGate Inc.', + 0x00085B: u'Hanbit Electronics Co., Ltd.', + 0x00085C: u'Shanghai Dare Technologies Co. Ltd.', + 0x00085D: u'Aastra', + 0x00085E: u'PCO AG', + 0x00085F: u'Picanol N.V.', + 0x000860: u'LodgeNet Entertainment Corp.', + 0x000861: u'SoftEnergy Co., Ltd.', + 0x000862: u'NEC Eluminant Technologies, Inc.', + 0x000863: u'Entrisphere Inc.', + 0x000864: u'Fasy S.p.A.', + 0x000865: u'JASCOM CO., LTD', + 0x000866: u'DSX Access Systems, Inc.', + 0x000867: u'Uptime Devices', + 0x000868: u'PurOptix', + 0x000869: u'Command-e Technology Co.,Ltd.', + 0x00086A: u'Industrie Technik IPS GmbH', + 0x00086B: u'MIPSYS', + 0x00086C: u'Plasmon LMS', + 0x00086D: u'Missouri FreeNet', + 0x00086E: u'Hyglo AB', + 0x00086F: u'Resources Computer Network Ltd.', + 0x000870: u'Rasvia Systems, Inc.', + 0x000871: u'NORTHDATA Co., Ltd.', + 0x000872: u'Sorenson Technologies, Inc.', + 0x000873: u'DAP Design B.V.', + 0x000874: u'Dell Computer Corp.', + 0x000875: u'Acorp Electronics Corp.', + 0x000876: u'SDSystem', + 0x000877: u'Liebert HIROSS S.p.A.', + 0x000878: u'Benchmark Storage Innovations', + 0x000879: u'CEM Corporation', + 0x00087A: u'Wipotec GmbH', + 0x00087B: u'RTX Telecom A/S', + 0x00087C: u'Cisco Systems, Inc.', + 0x00087D: u'Cisco Systems Inc.', + 0x00087E: u'Bon Electro-Telecom Inc.', + 0x00087F: u'SPAUN electronic GmbH & Co. KG', + 0x000880: u'BroadTel Canada Communications inc.', + 0x000881: u'DIGITAL HANDS CO.,LTD.', + 0x000882: u'SIGMA CORPORATION', + 0x000883: u'Hewlett-Packard Company', + 0x000884: u'Index Braille AB', + 0x000885: u'EMS Dr. Thomas Wuensche', + 0x000886: u'Hansung Teliann, Inc.', + 0x000887: u'Maschinenfabrik Reinhausen GmbH', + 0x000888: u'OULLIM Information Technology Inc,.', + 0x000889: u'Echostar Technologies Corp', + 0x00088A: u'Minds@Work', + 0x00088B: u'Tropic Networks Inc.', + 0x00088C: u'Quanta Network Systems Inc.', + 0x00088D: u'Sigma-Links Inc.', + 0x00088E: u'Nihon Computer Co., Ltd.', + 0x00088F: u'ADVANCED DIGITAL TECHNOLOGY', + 0x000890: u'AVILINKS SA', + 0x000891: u'Lyan Inc.', + 0x000892: u'EM Solutions', + 0x000893: u'LE INFORMATION COMMUNICATION INC.', + 0x000894: u'InnoVISION Multimedia Ltd.', + 0x000895: u'DIRC Technologie GmbH & Co.KG', + 0x000896: u'Printronix, Inc.', + 0x000897: u'Quake Technologies', + 0x000898: u'Gigabit Optics Corporation', + 0x000899: u'Netbind, Inc.', + 0x00089A: u'Alcatel Microelectronics', + 0x00089B: u'ICP Electronics Inc.', + 0x00089C: u'Elecs Industry Co., Ltd.', + 0x00089D: u'UHD-Elektronik', + 0x00089E: u'Beijing Enter-Net co.LTD', + 0x00089F: u'EFM Networks', + 0x0008A0: u'Stotz Feinmesstechnik GmbH', + 0x0008A1: u'CNet Technology Inc.', + 0x0008A2: u'ADI Engineering, Inc.', + 0x0008A3: u'Cisco Systems', + 0x0008A4: u'Cisco Systems', + 0x0008A5: u'Peninsula Systems Inc.', + 0x0008A6: u'Multiware & Image Co., Ltd.', + 0x0008A7: u'iLogic Inc.', + 0x0008A8: u'Systec Co., Ltd.', + 0x0008A9: u'SangSang Technology, Inc.', + 0x0008AA: u'KARAM', + 0x0008AB: u'EnerLinx.com, Inc.', + 0x0008AC: u'PRIVATE', + 0x0008AD: u'Toyo-Linx Co., Ltd.', + 0x0008AE: u'PacketFront Sweden AB', + 0x0008AF: u'Novatec Corporation', + 0x0008B0: u'BKtel communications GmbH', + 0x0008B1: u'ProQuent Systems', + 0x0008B2: u'SHENZHEN COMPASS TECHNOLOGY DEVELOPMENT CO.,LTD', + 0x0008B3: u'Fastwel', + 0x0008B4: u'SYSPOL', + 0x0008B5: u'TAI GUEN ENTERPRISE CO., LTD', + 0x0008B6: u'RouteFree, Inc.', + 0x0008B7: u'HIT Incorporated', + 0x0008B8: u'E.F. Johnson', + 0x0008B9: u'KAON MEDIA Co., Ltd.', + 0x0008BA: u'Erskine Systems Ltd', + 0x0008BB: u'NetExcell', + 0x0008BC: u'Ilevo AB', + 0x0008BD: u'TEPG-US', + 0x0008BE: u'XENPAK MSA Group', + 0x0008BF: u'Aptus Elektronik AB', + 0x0008C0: u'ASA SYSTEMS', + 0x0008C1: u'Avistar Communications Corporation', + 0x0008C2: u'Cisco Systems', + 0x0008C3: u'Contex A/S', + 0x0008C4: u'Hikari Co.,Ltd.', + 0x0008C5: u'Liontech Co., Ltd.', + 0x0008C6: u'Philips Consumer Communications', + 0x0008C7: u'COMPAQ COMPUTER CORPORATION', + 0x0008C8: u'Soneticom, Inc.', + 0x0008C9: u'TechniSat Digital GmbH', + 0x0008CA: u'TwinHan Technology Co.,Ltd', + 0x0008CB: u'Zeta Broadband Inc.', + 0x0008CC: u'Remotec, Inc.', + 0x0008CD: u'With-Net Inc', + 0x0008CE: u'IPMobileNet Inc.', + 0x0008CF: u'Nippon Koei Power Systems Co., Ltd.', + 0x0008D0: u'Musashi Engineering Co., LTD.', + 0x0008D1: u'KAREL INC.', + 0x0008D2: u'ZOOM Networks Inc.', + 0x0008D3: u'Hercules Technologies S.A.', + 0x0008D4: u'IneoQuest Technologies, Inc', + 0x0008D5: u'Vanguard Managed Solutions', + 0x0008D6: u'HASSNET Inc.', + 0x0008D7: u'HOW CORPORATION', + 0x0008D8: u'Dowkey Microwave', + 0x0008D9: u'Mitadenshi Co.,LTD', + 0x0008DA: u'SofaWare Technologies Ltd.', + 0x0008DB: u'Corrigent Systems', + 0x0008DC: u'Wiznet', + 0x0008DD: u'Telena Communications, Inc.', + 0x0008DE: u'3UP Systems', + 0x0008DF: u'Alistel Inc.', + 0x0008E0: u'ATO Technology Ltd.', + 0x0008E1: u'Barix AG', + 0x0008E2: u'Cisco Systems', + 0x0008E3: u'Cisco Systems', + 0x0008E4: u'Envenergy Inc', + 0x0008E5: u'IDK Corporation', + 0x0008E6: u'Littlefeet', + 0x0008E7: u'SHI ControlSystems,Ltd.', + 0x0008E8: u'Excel Master Ltd.', + 0x0008E9: u'NextGig', + 0x0008EA: u'Motion Control Engineering, Inc', + 0x0008EB: u'ROMWin Co.,Ltd.', + 0x0008EC: u'Zonu, Inc.', + 0x0008ED: u'ST&T Instrument Corp.', + 0x0008EE: u'Logic Product Development', + 0x0008EF: u'DIBAL,S.A.', + 0x0008F0: u'Next Generation Systems, Inc.', + 0x0008F1: u'Voltaire', + 0x0008F2: u'C&S Technology', + 0x0008F3: u'WANY', + 0x0008F4: u'Bluetake Technology Co., Ltd.', + 0x0008F5: u'YESTECHNOLOGY Co.,Ltd.', + 0x0008F6: u'SUMITOMO ELECTRIC HIGHTECHS.co.,ltd.', + 0x0008F7: u'Hitachi Ltd, Semiconductor & Integrated Circuits Gr', + 0x0008F8: u'Guardall Ltd', + 0x0008F9: u'Padcom, Inc.', + 0x0008FA: u'Karl E.Brinkmann GmbH', + 0x0008FB: u'SonoSite, Inc.', + 0x0008FC: u'Gigaphoton Inc.', + 0x0008FD: u'BlueKorea Co., Ltd.', + 0x0008FE: u'UNIK C&C Co.,Ltd.', + 0x0008FF: u'Trilogy Communications Ltd', + 0x000900: u'TMT', + 0x000901: u'Shenzhen Shixuntong Information & Technoligy Co', + 0x000902: u'Redline Communications Inc.', + 0x000903: u'Panasas, Inc', + 0x000904: u'MONDIAL electronic', + 0x000905: u'iTEC Technologies Ltd.', + 0x000906: u'Esteem Networks', + 0x000907: u'Chrysalis Development', + 0x000908: u'VTech Technology Corp.', + 0x000909: u'Telenor Connect A/S', + 0x00090A: u'SnedFar Technology Co., Ltd.', + 0x00090B: u'MTL Instruments PLC', + 0x00090C: u'Mayekawa Mfg. Co. Ltd.', + 0x00090D: u'LEADER ELECTRONICS CORP.', + 0x00090E: u'Helix Technology Inc.', + 0x00090F: u'Fortinet Inc.', + 0x000910: u'Simple Access Inc.', + 0x000911: u'Cisco Systems', + 0x000912: u'Cisco Systems', + 0x000913: u'SystemK Corporation', + 0x000914: u'COMPUTROLS INC.', + 0x000915: u'CAS Corp.', + 0x000916: u'Listman Home Technologies, Inc.', + 0x000917: u'WEM Technology Inc', + 0x000918: u'SAMSUNG TECHWIN CO.,LTD', + 0x000919: u'MDS Gateways', + 0x00091A: u'Macat Optics & Electronics Co., Ltd.', + 0x00091B: u'Digital Generation Inc.', + 0x00091C: u'CacheVision, Inc', + 0x00091D: u'Proteam Computer Corporation', + 0x00091E: u'Firstech Technology Corp.', + 0x00091F: u'A&D Co., Ltd.', + 0x000920: u'EpoX COMPUTER CO.,LTD.', + 0x000921: u'Planmeca Oy', + 0x000922: u'Touchless Sensor Technology AG', + 0x000923: u'Heaman System Co., Ltd', + 0x000924: u'Telebau GmbH', + 0x000925: u'VSN Systemen BV', + 0x000926: u'YODA COMMUNICATIONS, INC.', + 0x000927: u'TOYOKEIKI CO.,LTD.', + 0x000928: u'Telecore Inc', + 0x000929: u'Sanyo Industries (UK) Limited', + 0x00092A: u'MYTECS Co.,Ltd.', + 0x00092B: u'iQstor Networks, Inc.', + 0x00092C: u'Hitpoint Inc.', + 0x00092D: u'High Tech Computer, Corp.', + 0x00092E: u'B&Tech System Inc.', + 0x00092F: u'Akom Technology Corporation', + 0x000930: u'AeroConcierge Inc.', + 0x000931: u'Future Internet, Inc.', + 0x000932: u'Omnilux', + 0x000933: u'OPTOVALLEY Co. Ltd.', + 0x000934: u'Dream-Multimedia-Tv GmbH', + 0x000935: u'Sandvine Incorporated', + 0x000936: u'Ipetronik GmbH & Co.KG', + 0x000937: u'Inventec Appliance Corp', + 0x000938: u'Allot Communications', + 0x000939: u'ShibaSoku Co.,Ltd.', + 0x00093A: u'Molex Fiber Optics', + 0x00093B: u'HYUNDAI NETWORKS INC.', + 0x00093C: u'Jacques Technologies P/L', + 0x00093D: u'Newisys,Inc.', + 0x00093E: u'C&I Technologies', + 0x00093F: u'Double-Win Enterpirse CO., LTD', + 0x000940: u'AGFEO GmbH & Co. KG', + 0x000941: u'Allied Telesis K.K.', + 0x000942: u'CRESCO, LTD.', + 0x000943: u'Cisco Systems', + 0x000944: u'Cisco Systems', + 0x000945: u'Palmmicro Communications Inc', + 0x000946: u'Cluster Labs GmbH', + 0x000947: u'Aztek, Inc.', + 0x000948: u'Vista Control Systems, Corp.', + 0x000949: u'Glyph Technologies Inc.', + 0x00094A: u'Homenet Communications', + 0x00094B: u'FillFactory NV', + 0x00094C: u'Communication Weaver Co.,Ltd.', + 0x00094D: u'Braintree Communications Pty Ltd', + 0x00094E: u'BARTECH SYSTEMS INTERNATIONAL, INC', + 0x00094F: u'elmegt GmbH & Co. KG', + 0x000950: u'Independent Storage Corporation', + 0x000951: u'Apogee Instruments, Inc', + 0x000952: u'Auerswald GmbH & Co. KG', + 0x000953: u'Linkage System Integration Co.Ltd.', + 0x000954: u'AMiT spol. s. r. o.', + 0x000955: u'Young Generation International Corp.', + 0x000956: u'Network Systems Group, Ltd. (NSG)', + 0x000957: u'Supercaller, Inc.', + 0x000958: u'INTELNET S.A.', + 0x000959: u'Sitecsoft', + 0x00095A: u'RACEWOOD TECHNOLOGY', + 0x00095B: u'Netgear, Inc.', + 0x00095C: u'Philips Medical Systems - Cardiac and Monitoring Systems (CM', + 0x00095D: u'Dialogue Technology Corp.', + 0x00095E: u'Masstech Group Inc.', + 0x00095F: u'Telebyte, Inc.', + 0x000960: u'YOZAN Inc.', + 0x000961: u'Switchgear and Instrumentation Ltd', + 0x000962: u'Filetrac AS', + 0x000963: u'Dominion Lasercom Inc.', + 0x000964: u'Hi-Techniques', + 0x000965: u'PRIVATE', + 0x000966: u'Thales Navigation', + 0x000967: u'Tachyon, Inc', + 0x000968: u'TECHNOVENTURE, INC.', + 0x000969: u'Meret Optical Communications', + 0x00096A: u'Cloverleaf Communications Inc.', + 0x00096B: u'IBM Corporation', + 0x00096C: u'Imedia Semiconductor Corp.', + 0x00096D: u'Powernet Technologies Corp.', + 0x00096E: u'GIANT ELECTRONICS LTD.', + 0x00096F: u'Beijing Zhongqing Elegant Tech. Corp.,Limited', + 0x000970: u'Vibration Research Corporation', + 0x000971: u'Time Management, Inc.', + 0x000972: u'Securebase,Inc', + 0x000973: u'Lenten Technology Co., Ltd.', + 0x000974: u'Innopia Technologies, Inc.', + 0x000975: u'fSONA Communications Corporation', + 0x000976: u'Datasoft ISDN Systems GmbH', + 0x000977: u'Brunner Elektronik AG', + 0x000978: u'AIJI System Co., Ltd.', + 0x000979: u'Advanced Television Systems Committee, Inc.', + 0x00097A: u'Louis Design Labs.', + 0x00097B: u'Cisco Systems', + 0x00097C: u'Cisco Systems', + 0x00097D: u'SecWell Networks Oy', + 0x00097E: u'IMI TECHNOLOGY CO., LTD', + 0x00097F: u'Vsecure 2000 LTD.', + 0x000980: u'Power Zenith Inc.', + 0x000981: u'Newport Networks', + 0x000982: u'Loewe Opta GmbH', + 0x000983: u'Gvision Incorporated', + 0x000984: u'MyCasa Network Inc.', + 0x000985: u'Auto Telecom Company', + 0x000986: u'Metalink LTD.', + 0x000987: u'NISHI NIPPON ELECTRIC WIRE & CABLE CO.,LTD.', + 0x000988: u'Nudian Electron Co., Ltd.', + 0x000989: u'VividLogic Inc.', + 0x00098A: u'EqualLogic Inc', + 0x00098B: u'Entropic Communications, Inc.', + 0x00098C: u'Option Wireless Sweden', + 0x00098D: u'Velocity Semiconductor', + 0x00098E: u'ipcas GmbH', + 0x00098F: u'Cetacean Networks', + 0x000990: u'ACKSYS Communications & systems', + 0x000991: u'GE Fanuc Automation Manufacturing, Inc.', + 0x000992: u'InterEpoch Technology,INC.', + 0x000993: u'Visteon Corporation', + 0x000994: u'Cronyx Engineering', + 0x000995: u'Castle Technology Ltd', + 0x000996: u'RDI', + 0x000997: u'Nortel Networks', + 0x000998: u'Capinfo Company Limited', + 0x000999: u'CP GEORGES RENAULT', + 0x00099A: u'ELMO COMPANY, LIMITED', + 0x00099B: u'Western Telematic Inc.', + 0x00099C: u'Naval Research Laboratory', + 0x00099D: u'Haliplex Communications', + 0x00099E: u'Testech, Inc.', + 0x00099F: u'VIDEX INC.', + 0x0009A0: u'Microtechno Corporation', + 0x0009A1: u'Telewise Communications, Inc.', + 0x0009A2: u'Interface Co., Ltd.', + 0x0009A3: u'Leadfly Techologies Corp. Ltd.', + 0x0009A4: u'HARTEC Corporation', + 0x0009A5: u'HANSUNG ELETRONIC INDUSTRIES DEVELOPMENT CO., LTD', + 0x0009A6: u'Ignis Optics, Inc.', + 0x0009A7: u'Bang & Olufsen A/S', + 0x0009A8: u'Eastmode Pte Ltd', + 0x0009A9: u'Ikanos Communications', + 0x0009AA: u'Data Comm for Business, Inc.', + 0x0009AB: u'Netcontrol Oy', + 0x0009AC: u'LANVOICE', + 0x0009AD: u'HYUNDAI SYSCOMM, INC.', + 0x0009AE: u'OKANO ELECTRIC CO.,LTD', + 0x0009AF: u'e-generis', + 0x0009B0: u'Onkyo Corporation', + 0x0009B1: u'Kanematsu Electronics, Ltd.', + 0x0009B2: u'L&F Inc.', + 0x0009B3: u'MCM Systems Ltd', + 0x0009B4: u'KISAN TELECOM CO., LTD.', + 0x0009B5: u'3J Tech. Co., Ltd.', + 0x0009B6: u'Cisco Systems', + 0x0009B7: u'Cisco Systems', + 0x0009B8: u'Entise Systems', + 0x0009B9: u'Action Imaging Solutions', + 0x0009BA: u'MAKU Informationstechik GmbH', + 0x0009BB: u'MathStar, Inc.', + 0x0009BC: u'Integrian, Inc.', + 0x0009BD: u'Epygi Technologies, Ltd.', + 0x0009BE: u'Mamiya-OP Co.,Ltd.', + 0x0009BF: u'Nintendo Co.,Ltd.', + 0x0009C0: u'6WIND', + 0x0009C1: u'PROCES-DATA A/S', + 0x0009C2: u'PRIVATE', + 0x0009C3: u'NETAS', + 0x0009C4: u'Medicore Co., Ltd', + 0x0009C5: u'KINGENE Technology Corporation', + 0x0009C6: u'Visionics Corporation', + 0x0009C7: u'Movistec', + 0x0009C8: u'SINAGAWA TSUSHIN KEISOU SERVICE', + 0x0009C9: u'BlueWINC Co., Ltd.', + 0x0009CA: u'iMaxNetworks(Shenzhen)Limited.', + 0x0009CB: u'HBrain', + 0x0009CC: u'Moog GmbH', + 0x0009CD: u'HUDSON SOFT CO.,LTD.', + 0x0009CE: u'SpaceBridge Semiconductor Corp.', + 0x0009CF: u'iAd GmbH', + 0x0009D0: u'Versatel Networks', + 0x0009D1: u'SERANOA NETWORKS INC', + 0x0009D2: u'Mai Logic Inc.', + 0x0009D3: u'Western DataCom Co., Inc.', + 0x0009D4: u'Transtech Networks', + 0x0009D5: u'Signal Communication, Inc.', + 0x0009D6: u'KNC One GmbH', + 0x0009D7: u'DC Security Products', + 0x0009D8: u'PRIVATE', + 0x0009D9: u'Neoscale Systems, Inc', + 0x0009DA: u'Control Module Inc.', + 0x0009DB: u'eSpace', + 0x0009DC: u'Galaxis Technology AG', + 0x0009DD: u'Mavin Technology Inc.', + 0x0009DE: u'Samjin Information & Communications Co., Ltd.', + 0x0009DF: u'Vestel Komunikasyon Sanayi ve Ticaret A.S.', + 0x0009E0: u'XEMICS S.A.', + 0x0009E1: u'Gemtek Technology Co., Ltd.', + 0x0009E2: u'Sinbon Electronics Co., Ltd.', + 0x0009E3: u'Angel Iglesias S.A.', + 0x0009E4: u'K Tech Infosystem Inc.', + 0x0009E5: u'Hottinger Baldwin Messtechnik GmbH', + 0x0009E6: u'Cyber Switching Inc.', + 0x0009E7: u'ADC Techonology', + 0x0009E8: u'Cisco Systems', + 0x0009E9: u'Cisco Systems', + 0x0009EA: u'YEM Inc.', + 0x0009EB: u'HuMANDATA LTD.', + 0x0009EC: u'Daktronics, Inc.', + 0x0009ED: u'CipherOptics', + 0x0009EE: u'MEIKYO ELECTRIC CO.,LTD', + 0x0009EF: u'Vocera Communications', + 0x0009F0: u'Shimizu Technology Inc.', + 0x0009F1: u'Yamaki Electric Corporation', + 0x0009F2: u'Cohu, Inc., Electronics Division', + 0x0009F3: u'WELL Communication Corp.', + 0x0009F4: u'Alcon Laboratories, Inc.', + 0x0009F5: u'Emerson Network Power Co.,Ltd', + 0x0009F6: u'Shenzhen Eastern Digital Tech Ltd.', + 0x0009F7: u'SED, a division of Calian', + 0x0009F8: u'UNIMO TECHNOLOGY CO., LTD.', + 0x0009F9: u'ART JAPAN CO., LTD.', + 0x0009FB: u'Philips Medizinsysteme Boeblingen GmbH', + 0x0009FC: u'IPFLEX Inc.', + 0x0009FD: u'Ubinetics Limited', + 0x0009FE: u'Daisy Technologies, Inc.', + 0x0009FF: u'X.net 2000 GmbH', + 0x000A00: u'Mediatek Corp.', + 0x000A01: u'SOHOware, Inc.', + 0x000A02: u'ANNSO CO., LTD.', + 0x000A03: u'ENDESA SERVICIOS, S.L.', + 0x000A04: u'3Com Europe Ltd', + 0x000A05: u'Widax Corp.', + 0x000A06: u'Teledex LLC', + 0x000A07: u'WebWayOne Ltd', + 0x000A08: u'ALPINE ELECTRONICS, INC.', + 0x000A09: u'TaraCom Integrated Products, Inc.', + 0x000A0A: u'SUNIX Co., Ltd.', + 0x000A0B: u'Sealevel Systems, Inc.', + 0x000A0C: u'Scientific Research Corporation', + 0x000A0D: u'MergeOptics GmbH', + 0x000A0E: u'Invivo Research Inc.', + 0x000A0F: u'Ilryung Telesys, Inc', + 0x000A10: u'FAST media integrations AG', + 0x000A11: u'ExPet Technologies, Inc', + 0x000A12: u'Azylex Technology, Inc', + 0x000A13: u'Silent Witness', + 0x000A14: u'TECO a.s.', + 0x000A15: u'Silicon Data, Inc', + 0x000A16: u'Lassen Research', + 0x000A17: u'NESTAR COMMUNICATIONS, INC', + 0x000A18: u'Vichel Inc.', + 0x000A19: u'Valere Power, Inc.', + 0x000A1A: u'Imerge Ltd', + 0x000A1B: u'Stream Labs', + 0x000A1C: u'Bridge Information Co., Ltd.', + 0x000A1D: u'Optical Communications Products Inc.', + 0x000A1E: u'Red-M Products Limited', + 0x000A1F: u'ART WARE Telecommunication Co., Ltd.', + 0x000A20: u'SVA Networks, Inc.', + 0x000A21: u'Integra Telecom Co. Ltd', + 0x000A22: u'Amperion Inc', + 0x000A23: u'Parama Networks Inc', + 0x000A24: u'Octave Communications', + 0x000A25: u'CERAGON NETWORKS', + 0x000A26: u'CEIA S.p.A.', + 0x000A27: u'Apple Computer, Inc.', + 0x000A28: u'Motorola', + 0x000A29: u'Pan Dacom Networking AG', + 0x000A2A: u'QSI Systems Inc.', + 0x000A2B: u'Etherstuff', + 0x000A2C: u'Active Tchnology Corporation', + 0x000A2D: u'PRIVATE', + 0x000A2E: u'MAPLE NETWORKS CO., LTD', + 0x000A2F: u'Artnix Inc.', + 0x000A30: u'Johnson Controls-ASG', + 0x000A31: u'HCV Wireless', + 0x000A32: u'Xsido Corporation', + 0x000A33: u'Emulex Corporation', + 0x000A34: u'Identicard Systems Incorporated', + 0x000A35: u'Xilinx', + 0x000A36: u'Synelec Telecom Multimedia', + 0x000A37: u'Procera Networks, Inc.', + 0x000A38: u'Netlock Technologies, Inc.', + 0x000A39: u'LoPA Information Technology', + 0x000A3A: u'J-THREE INTERNATIONAL Holding Co., Ltd.', + 0x000A3B: u'GCT Semiconductor, Inc', + 0x000A3C: u'Enerpoint Ltd.', + 0x000A3D: u'Elo Sistemas Eletronicos S.A.', + 0x000A3E: u'EADS Telecom', + 0x000A3F: u'Data East Corporation', + 0x000A40: u'Crown Audio', + 0x000A41: u'Cisco Systems', + 0x000A42: u'Cisco Systems', + 0x000A43: u'Chunghwa Telecom Co., Ltd.', + 0x000A44: u'Avery Dennison Deutschland GmbH', + 0x000A45: u'Audio-Technica Corp.', + 0x000A46: u'ARO Controls SAS', + 0x000A47: u'Allied Vision Technologies', + 0x000A48: u'Albatron Technology', + 0x000A49: u'Acopia Networks', + 0x000A4A: u'Targa Systems Ltd.', + 0x000A4B: u'DataPower Technology, Inc.', + 0x000A4C: u'Molecular Devices Corporation', + 0x000A4D: u'Noritz Corporation', + 0x000A4E: u'UNITEK Electronics INC.', + 0x000A4F: u'Brain Boxes Limited', + 0x000A50: u'REMOTEK CORPORATION', + 0x000A51: u'GyroSignal Technology Co., Ltd.', + 0x000A52: u'AsiaRF Ltd.', + 0x000A53: u'Intronics, Incorporated', + 0x000A54: u'Laguna Hills, Inc.', + 0x000A55: u'MARKEM Corporation', + 0x000A56: u'HITACHI Maxell Ltd.', + 0x000A57: u'Hewlett-Packard Company - Standards', + 0x000A58: u'Ingenieur-Buero Freyer & Siegel', + 0x000A59: u'HW server', + 0x000A5A: u'GreenNET Technologies Co.,Ltd.', + 0x000A5B: u'Power-One as', + 0x000A5C: u'Carel s.p.a.', + 0x000A5D: u'PUC Founder (MSC) Berhad', + 0x000A5E: u'3COM Corporation', + 0x000A5F: u'almedio inc.', + 0x000A60: u'Autostar Technology Pte Ltd', + 0x000A61: u'Cellinx Systems Inc.', + 0x000A62: u'Crinis Networks, Inc.', + 0x000A63: u'DHD GmbH', + 0x000A64: u'Eracom Technologies', + 0x000A65: u'GentechMedia.co.,ltd.', + 0x000A66: u'MITSUBISHI ELECTRIC SYSTEM & SERVICE CO.,LTD.', + 0x000A67: u'OngCorp', + 0x000A68: u'SolarFlare Communications, Inc.', + 0x000A69: u'SUNNY bell Technology Co., Ltd.', + 0x000A6A: u'SVM Microwaves s.r.o.', + 0x000A6B: u'Tadiran Telecom Business Systems LTD', + 0x000A6C: u'Walchem Corporation', + 0x000A6D: u'EKS Elektronikservice GmbH', + 0x000A6E: u'Broadcast Technology Limited', + 0x000A6F: u'ZyFLEX Technologies Inc', + 0x000A70: u'MPLS Forum', + 0x000A71: u'Avrio Technologies, Inc', + 0x000A72: u'SimpleTech, Inc.', + 0x000A73: u'Scientific Atlanta', + 0x000A74: u'Manticom Networks Inc.', + 0x000A75: u'Cat Electronics', + 0x000A76: u'Beida Jade Bird Huaguang Technology Co.,Ltd', + 0x000A77: u'Bluewire Technologies LLC', + 0x000A78: u'OLITEC', + 0x000A79: u'corega K.K.', + 0x000A7A: u'Kyoritsu Electric Co., Ltd.', + 0x000A7B: u'Cornelius Consult', + 0x000A7C: u'Tecton Ltd', + 0x000A7D: u'Valo, Inc.', + 0x000A7E: u'The Advantage Group', + 0x000A7F: u'Teradon Industries, Inc', + 0x000A80: u'Telkonet Inc.', + 0x000A81: u'TEIMA Audiotex S.L.', + 0x000A82: u'TATSUTA SYSTEM ELECTRONICS CO.,LTD.', + 0x000A83: u'SALTO SYSTEMS S.L.', + 0x000A84: u'Rainsun Enterprise Co., Ltd.', + 0x000A85: u'PLAT\'C2,Inc', + 0x000A86: u'Lenze', + 0x000A87: u'Integrated Micromachines Inc.', + 0x000A88: u'InCypher S.A.', + 0x000A89: u'Creval Systems, Inc.', + 0x000A8A: u'Cisco Systems', + 0x000A8B: u'Cisco Systems', + 0x000A8C: u'Guardware Systems Ltd.', + 0x000A8D: u'EUROTHERM LIMITED', + 0x000A8E: u'Invacom Ltd', + 0x000A8F: u'Aska International Inc.', + 0x000A90: u'Bayside Interactive, Inc.', + 0x000A91: u'HemoCue AB', + 0x000A92: u'Presonus Corporation', + 0x000A93: u'W2 Networks, Inc.', + 0x000A94: u'ShangHai cellink CO., LTD', + 0x000A95: u'Apple Computer, Inc.', + 0x000A96: u'MEWTEL TECHNOLOGY INC.', + 0x000A97: u'SONICblue, Inc.', + 0x000A98: u'M+F Gwinner GmbH & Co', + 0x000A99: u'Dataradio Inc.', + 0x000A9A: u'Aiptek International Inc', + 0x000A9B: u'Towa Meccs Corporation', + 0x000A9C: u'Server Technology, Inc.', + 0x000A9D: u'King Young Technology Co. Ltd.', + 0x000A9E: u'BroadWeb Corportation', + 0x000A9F: u'Pannaway Technologies, Inc.', + 0x000AA0: u'Cedar Point Communications', + 0x000AA1: u'V V S Limited', + 0x000AA2: u'SYSTEK INC.', + 0x000AA3: u'SHIMAFUJI ELECTRIC CO.,LTD.', + 0x000AA4: u'SHANGHAI SURVEILLANCE TECHNOLOGY CO,LTD', + 0x000AA5: u'MAXLINK INDUSTRIES LIMITED', + 0x000AA6: u'Hochiki Corporation', + 0x000AA7: u'FEI Company', + 0x000AA8: u'ePipe Pty. Ltd.', + 0x000AA9: u'Brooks Automation GmbH', + 0x000AAA: u'AltiGen Communications Inc.', + 0x000AAB: u'TOYOTA MACS, INC.', + 0x000AAC: u'TerraTec Electronic GmbH', + 0x000AAD: u'Stargames Corporation', + 0x000AAE: u'Rosemount Process Analytical', + 0x000AAF: u'Pipal Systems', + 0x000AB0: u'LOYTEC electronics GmbH', + 0x000AB1: u'GENETEC Corporation', + 0x000AB2: u'Fresnel Wireless Systems', + 0x000AB3: u'Fa. GIRA', + 0x000AB4: u'ETIC Telecommunications', + 0x000AB5: u'Digital Electronic Network', + 0x000AB6: u'COMPUNETIX, INC', + 0x000AB7: u'Cisco Systems', + 0x000AB8: u'Cisco Systems', + 0x000AB9: u'Astera Technologies Corp.', + 0x000ABA: u'Arcon Technology Limited', + 0x000ABB: u'Taiwan Secom Co,. Ltd', + 0x000ABC: u'Seabridge Ltd.', + 0x000ABD: u'Rupprecht & Patashnick Co.', + 0x000ABE: u'OPNET Technologies CO., LTD.', + 0x000ABF: u'HIROTA SS', + 0x000AC0: u'Fuyoh Video Industry CO., LTD.', + 0x000AC1: u'Futuretel', + 0x000AC2: u'FiberHome Telecommunication Technologies CO.,LTD', + 0x000AC3: u'eM Technics Co., Ltd.', + 0x000AC4: u'Daewoo Teletech Co., Ltd', + 0x000AC5: u'Color Kinetics', + 0x000AC6: u'Ceterus Networks, Inc.', + 0x000AC7: u'Unication Group', + 0x000AC8: u'ZPSYS CO.,LTD. (Planning&Management)', + 0x000AC9: u'Zambeel Inc', + 0x000ACA: u'YOKOYAMA SHOKAI CO.,Ltd.', + 0x000ACB: u'XPAK MSA Group', + 0x000ACC: u'Winnow Networks, Inc.', + 0x000ACD: u'Sunrich Technology Limited', + 0x000ACE: u'RADIANTECH, INC.', + 0x000ACF: u'PROVIDEO Multimedia Co. Ltd.', + 0x000AD0: u'Niigata Develoment Center, F.I.T. Co., Ltd.', + 0x000AD1: u'MWS', + 0x000AD2: u'JEPICO Corporation', + 0x000AD3: u'INITECH Co., Ltd', + 0x000AD4: u'CoreBell Systems Inc.', + 0x000AD5: u'Brainchild Electronic Co., Ltd.', + 0x000AD6: u'BeamReach Networks', + 0x000AD7: u'Origin ELECTRIC CO.,LTD.', + 0x000AD8: u'IPCserv Technology Corp.', + 0x000AD9: u'Sony Ericsson Mobile Communications AB', + 0x000ADA: u'PRIVATE', + 0x000ADB: u'SkyPilot Network, Inc', + 0x000ADC: u'RuggedCom Inc.', + 0x000ADD: u'InSciTek Microsystems, Inc.', + 0x000ADE: u'Happy Communication Co., Ltd.', + 0x000ADF: u'Gennum Corporation', + 0x000AE0: u'Fujitsu Softek', + 0x000AE1: u'EG Technology', + 0x000AE2: u'Binatone Electronics International, Ltd', + 0x000AE3: u'YANG MEI TECHNOLOGY CO., LTD', + 0x000AE4: u'Wistron Corp.', + 0x000AE5: u'ScottCare Corporation', + 0x000AE6: u'Elitegroup Computer System Co. (ECS)', + 0x000AE7: u'ELIOP S.A.', + 0x000AE8: u'Cathay Roxus Information Technology Co. LTD', + 0x000AE9: u'AirVast Technology Inc.', + 0x000AEA: u'ADAM ELEKTRONIK LTD.STI.', + 0x000AEB: u'Shenzhen Tp-Link Technology Co; Ltd.', + 0x000AEC: u'Koatsu Gas Kogyo Co., Ltd.', + 0x000AED: u'HARTING Vending G.m.b.H. & CO KG', + 0x000AEE: u'GCD Hard- & Software GmbH', + 0x000AEF: u'OTRUM ASA', + 0x000AF0: u'SHIN-OH ELECTRONICS CO., LTD. R&D', + 0x000AF1: u'Clarity Design, Inc.', + 0x000AF2: u'NeoAxiom Corp.', + 0x000AF3: u'Cisco Systems', + 0x000AF4: u'Cisco Systems', + 0x000AF5: u'Airgo Networks, Inc.', + 0x000AF6: u'Computer Process Controls', + 0x000AF7: u'Broadcom Corp.', + 0x000AF8: u'American Telecare Inc.', + 0x000AF9: u'HiConnect, Inc.', + 0x000AFA: u'Traverse Technologies Australia', + 0x000AFB: u'Ambri Limited', + 0x000AFC: u'Core Tec Communications, LLC', + 0x000AFD: u'Viking Electronic Services', + 0x000AFE: u'NovaPal Ltd', + 0x000AFF: u'Kilchherr Elektronik AG', + 0x000B00: u'FUJIAN START COMPUTER EQUIPMENT CO.,LTD', + 0x000B01: u'DAIICHI ELECTRONICS CO., LTD.', + 0x000B02: u'Dallmeier electronic', + 0x000B03: u'Taekwang Industrial Co., Ltd', + 0x000B04: u'Volktek Corporation', + 0x000B05: u'Pacific Broadband Networks', + 0x000B06: u'Motorola BCS', + 0x000B07: u'Voxpath Networks', + 0x000B08: u'Pillar Data Systems', + 0x000B09: u'Ifoundry Systems Singapore', + 0x000B0A: u'dBm Optics', + 0x000B0B: u'Corrent Corporation', + 0x000B0C: u'Agile Systems Inc.', + 0x000B0D: u'Air2U, Inc.', + 0x000B0E: u'Trapeze Networks', + 0x000B0F: u'Nyquist Industrial Control BV', + 0x000B10: u'11wave Technonlogy Co.,Ltd', + 0x000B11: u'HIMEJI ABC TRADING CO.,LTD.', + 0x000B12: u'NURI Telecom Co., Ltd.', + 0x000B13: u'ZETRON INC', + 0x000B14: u'ViewSonic Corporation', + 0x000B15: u'Platypus Technology', + 0x000B16: u'Communication Machinery Corporation', + 0x000B17: u'MKS Instruments', + 0x000B18: u'PRIVATE', + 0x000B19: u'Vernier Networks, Inc.', + 0x000B1A: u'Teltone Corporation', + 0x000B1B: u'Systronix, Inc.', + 0x000B1C: u'SIBCO bv', + 0x000B1D: u'LayerZero Power Systems, Inc.', + 0x000B1E: u'KAPPA opto-electronics GmbH', + 0x000B1F: u'I CON Computer Co.', + 0x000B20: u'Hirata corporation', + 0x000B21: u'G-Star Communications Inc.', + 0x000B22: u'Environmental Systems and Services', + 0x000B23: u'Siemens Subscriber Networks', + 0x000B24: u'AirLogic', + 0x000B25: u'Aeluros', + 0x000B26: u'Wetek Corporation', + 0x000B27: u'Scion Corporation', + 0x000B28: u'Quatech Inc.', + 0x000B29: u'LG Industrial Systems Co.,Ltd.', + 0x000B2A: u'HOWTEL Co., Ltd.', + 0x000B2B: u'HOSTNET CORPORATION', + 0x000B2C: u'Eiki Industrial Co. Ltd.', + 0x000B2D: u'Danfoss Inc.', + 0x000B2E: u'Cal-Comp Electronics (Thailand) Public Company Limited Taipe', + 0x000B2F: u'bplan GmbH', + 0x000B30: u'Beijing Gongye Science & Technology Co.,Ltd', + 0x000B31: u'Yantai ZhiYang Scientific and technology industry CO., LTD', + 0x000B32: u'VORMETRIC, INC.', + 0x000B33: u'Vivato', + 0x000B34: u'ShangHai Broadband Technologies CO.LTD', + 0x000B35: u'Quad Bit System co., Ltd.', + 0x000B36: u'Productivity Systems, Inc.', + 0x000B37: u'MANUFACTURE DES MONTRES ROLEX SA', + 0x000B38: u'Knuerr AG', + 0x000B39: u'Keisoku Giken Co.,Ltd.', + 0x000B3A: u'QuStream Corporation', + 0x000B3B: u'devolo AG', + 0x000B3C: u'Cygnal Integrated Products, Inc.', + 0x000B3D: u'CONTAL OK Ltd.', + 0x000B3E: u'BittWare, Inc', + 0x000B3F: u'Anthology Solutions Inc.', + 0x000B40: u'OpNext Inc.', + 0x000B41: u'Ing. Buero Dr. Beutlhauser', + 0x000B42: u'commax Co., Ltd.', + 0x000B43: u'Microscan Systems, Inc.', + 0x000B44: u'Concord IDea Corp.', + 0x000B45: u'Cisco', + 0x000B46: u'Cisco', + 0x000B47: u'Advanced Energy', + 0x000B48: u'sofrel', + 0x000B49: u'RF-Link System Inc.', + 0x000B4A: u'Visimetrics (UK) Ltd', + 0x000B4B: u'VISIOWAVE SA', + 0x000B4C: u'Clarion (M) Sdn Bhd', + 0x000B4D: u'Emuzed', + 0x000B4E: u'VertexRSI Antenna Products Division', + 0x000B4F: u'Verifone, INC.', + 0x000B50: u'Oxygnet', + 0x000B51: u'Micetek International Inc.', + 0x000B52: u'JOYMAX ELECTRONICS CORP.', + 0x000B53: u'INITIUM Co., Ltd.', + 0x000B54: u'BiTMICRO Networks, Inc.', + 0x000B55: u'ADInstruments', + 0x000B56: u'Cybernetics', + 0x000B57: u'Silicon Laboratories', + 0x000B58: u'Astronautics C.A LTD', + 0x000B59: u'ScriptPro, LLC', + 0x000B5A: u'HyperEdge', + 0x000B5B: u'Rincon Research Corporation', + 0x000B5C: u'Newtech Co.,Ltd', + 0x000B5D: u'FUJITSU LIMITED', + 0x000B5E: u'Audio Engineering Society Inc.', + 0x000B5F: u'Cisco Systems', + 0x000B60: u'Cisco Systems', + 0x000B61: u'Friedrich Lütze GmbH &Co.', + 0x000B62: u'Ingenieurbüro Ingo Mohnen', + 0x000B63: u'Kaleidescape', + 0x000B64: u'Kieback & Peter GmbH & Co KG', + 0x000B65: u'Sy.A.C. srl', + 0x000B66: u'Teralink Communications', + 0x000B67: u'Topview Technology Corporation', + 0x000B68: u'Addvalue Communications Pte Ltd', + 0x000B69: u'Franke Finland Oy', + 0x000B6A: u'Asiarock Incorporation', + 0x000B6B: u'Wistron Neweb Corp.', + 0x000B6C: u'Sychip Inc.', + 0x000B6D: u'SOLECTRON JAPAN NAKANIIDA', + 0x000B6E: u'Neff Instrument Corp.', + 0x000B6F: u'Media Streaming Networks Inc', + 0x000B70: u'Load Technology, Inc.', + 0x000B71: u'Litchfield Communications Inc.', + 0x000B72: u'Lawo AG', + 0x000B73: u'Kodeos Communications', + 0x000B74: u'Kingwave Technology Co., Ltd.', + 0x000B75: u'Iosoft Ltd.', + 0x000B76: u'ET&T Co. Ltd.', + 0x000B77: u'Cogent Systems, Inc.', + 0x000B78: u'TAIFATECH INC.', + 0x000B79: u'X-COM, Inc.', + 0x000B7A: u'Wave Science Inc.', + 0x000B7B: u'Test-Um Inc.', + 0x000B7C: u'Telex Communications', + 0x000B7D: u'SOLOMON EXTREME INTERNATIONAL LTD.', + 0x000B7E: u'SAGINOMIYA Seisakusho Inc.', + 0x000B7F: u'OmniWerks', + 0x000B80: u'Lycium Networks', + 0x000B81: u'Kaparel Corporation', + 0x000B82: u'Grandstream Networks, Inc.', + 0x000B83: u'DATAWATT B.V.', + 0x000B84: u'BODET', + 0x000B85: u'Airespace, Inc.', + 0x000B86: u'Aruba Networks', + 0x000B87: u'American Reliance Inc.', + 0x000B88: u'Vidisco ltd.', + 0x000B89: u'Top Global Technology, Ltd.', + 0x000B8A: u'MITEQ Inc.', + 0x000B8B: u'KERAJET, S.A.', + 0x000B8C: u'flextronics israel', + 0x000B8D: u'Avvio Networks', + 0x000B8E: u'Ascent Corporation', + 0x000B8F: u'AKITA ELECTRONICS SYSTEMS CO.,LTD.', + 0x000B90: u'Covaro Networks, Inc.', + 0x000B91: u'Aglaia Gesellschaft für Bildverarbeitung und Kommunikation m', + 0x000B92: u'Ascom Danmark A/S', + 0x000B93: u'Barmag Electronic', + 0x000B94: u'Digital Monitoring Products, Inc.', + 0x000B95: u'eBet Gaming Systems Pty Ltd', + 0x000B96: u'Innotrac Diagnostics Oy', + 0x000B97: u'Matsushita Electric Industrial Co.,Ltd.', + 0x000B98: u'NiceTechVision', + 0x000B99: u'SensAble Technologies, Inc.', + 0x000B9A: u'Shanghai Ulink Telecom Equipment Co. Ltd.', + 0x000B9B: u'Sirius System Co, Ltd.', + 0x000B9C: u'TriBeam Technologies, Inc.', + 0x000B9D: u'TwinMOS Technologies Inc.', + 0x000B9E: u'Yasing Technology Corp.', + 0x000B9F: u'Neue ELSA GmbH', + 0x000BA0: u'T&L Information Inc.', + 0x000BA1: u'SYSCOM Ltd.', + 0x000BA2: u'Sumitomo Electric Networks, Inc', + 0x000BA3: u'Siemens AG, I&S', + 0x000BA4: u'Shiron Satellite Communications Ltd. (1996)', + 0x000BA5: u'Quasar Cipta Mandiri, PT', + 0x000BA6: u'Miyakawa Electric Works Ltd.', + 0x000BA7: u'Maranti Networks', + 0x000BA8: u'HANBACK ELECTRONICS CO., LTD.', + 0x000BA9: u'CloudShield Technologies, Inc.', + 0x000BAA: u'Aiphone co.,Ltd', + 0x000BAB: u'Advantech Technology (CHINA) Co., Ltd.', + 0x000BAC: u'3Com Europe Ltd.', + 0x000BAD: u'PC-PoS Inc.', + 0x000BAE: u'Vitals System Inc.', + 0x000BAF: u'WOOJU COMMUNICATIONS Co,.Ltd', + 0x000BB0: u'Sysnet Telematica srl', + 0x000BB1: u'Super Star Technology Co., Ltd.', + 0x000BB2: u'SMALLBIG TECHNOLOGY', + 0x000BB3: u'RiT technologies Ltd.', + 0x000BB4: u'RDC Semiconductor Inc.,', + 0x000BB5: u'nStor Technologies, Inc.', + 0x000BB6: u'Mototech Inc.', + 0x000BB7: u'Micro Systems Co.,Ltd.', + 0x000BB8: u'Kihoku Electronic Co.', + 0x000BB9: u'Imsys AB', + 0x000BBA: u'Harmonic Broadband Access Networks', + 0x000BBB: u'Etin Systems Co., Ltd', + 0x000BBC: u'En Garde Systems, Inc.', + 0x000BBD: u'Connexionz Limited', + 0x000BBE: u'Cisco Systems', + 0x000BBF: u'Cisco Systems', + 0x000BC0: u'China IWNComm Co., Ltd.', + 0x000BC1: u'Bay Microsystems, Inc.', + 0x000BC2: u'Corinex Communication Corp.', + 0x000BC3: u'Multiplex, Inc.', + 0x000BC4: u'BIOTRONIK GmbH & Co', + 0x000BC5: u'SMC Networks, Inc.', + 0x000BC6: u'ISAC, Inc.', + 0x000BC7: u'ICET S.p.A.', + 0x000BC8: u'AirFlow Networks', + 0x000BC9: u'Electroline Equipment', + 0x000BCA: u'DATAVAN International Corporation', + 0x000BCB: u'Fagor Automation , S. Coop', + 0x000BCC: u'JUSAN, S.A.', + 0x000BCD: u'Compaq (HP)', + 0x000BCE: u'Free2move AB', + 0x000BCF: u'AGFA NDT INC.', + 0x000BD0: u'XiMeta Technology Americas Inc.', + 0x000BD1: u'Aeronix, Inc.', + 0x000BD2: u'Remopro Technology Inc.', + 0x000BD3: u'cd3o', + 0x000BD4: u'Beijing Wise Technology & Science Development Co.Ltd', + 0x000BD5: u'Nvergence, Inc.', + 0x000BD6: u'Paxton Access Ltd', + 0x000BD7: u'MBB Gelma GmbH', + 0x000BD8: u'Industrial Scientific Corp.', + 0x000BD9: u'General Hydrogen', + 0x000BDA: u'EyeCross Co.,Inc.', + 0x000BDB: u'Dell ESG PCBA Test', + 0x000BDC: u'AKCP', + 0x000BDD: u'TOHOKU RICOH Co., LTD.', + 0x000BDE: u'TELDIX GmbH', + 0x000BDF: u'Shenzhen RouterD Networks Limited', + 0x000BE0: u'SercoNet Ltd.', + 0x000BE1: u'Nokia NET Product Operations', + 0x000BE2: u'Lumenera Corporation', + 0x000BE3: u'Key Stream Co., Ltd.', + 0x000BE4: u'Hosiden Corporation', + 0x000BE5: u'HIMS Korea Co., Ltd.', + 0x000BE6: u'Datel Electronics', + 0x000BE7: u'COMFLUX TECHNOLOGY INC.', + 0x000BE8: u'AOIP', + 0x000BE9: u'Actel Corporation', + 0x000BEA: u'Zultys Technologies', + 0x000BEB: u'Systegra AG', + 0x000BEC: u'NIPPON ELECTRIC INSTRUMENT, INC.', + 0x000BED: u'ELM Inc.', + 0x000BEE: u'inc.jet, Incorporated', + 0x000BEF: u'Code Corporation', + 0x000BF0: u'MoTEX Products Co., Ltd.', + 0x000BF1: u'LAP Laser Applikations', + 0x000BF2: u'Chih-Kan Technology Co., Ltd.', + 0x000BF3: u'BAE SYSTEMS', + 0x000BF4: u'PRIVATE', + 0x000BF5: u'Shanghai Sibo Telecom Technology Co.,Ltd', + 0x000BF6: u'Nitgen Co., Ltd', + 0x000BF7: u'NIDEK CO.,LTD', + 0x000BF8: u'Infinera', + 0x000BF9: u'Gemstone communications, Inc.', + 0x000BFA: u'EXEMYS SRL', + 0x000BFB: u'D-NET International Corporation', + 0x000BFC: u'Cisco Systems', + 0x000BFD: u'Cisco Systems', + 0x000BFE: u'CASTEL Broadband Limited', + 0x000BFF: u'Berkeley Camera Engineering', + 0x000C00: u'BEB Industrie-Elektronik AG', + 0x000C01: u'Abatron AG', + 0x000C02: u'ABB Oy', + 0x000C03: u'HDMI Licensing, LLC', + 0x000C04: u'Tecnova', + 0x000C05: u'RPA Reserch Co., Ltd.', + 0x000C06: u'Nixvue Systems Pte Ltd', + 0x000C07: u'Iftest AG', + 0x000C08: u'HUMEX Technologies Corp.', + 0x000C09: u'Hitachi IE Systems Co., Ltd', + 0x000C0A: u'Guangdong Province Electronic Technology Research Institute', + 0x000C0B: u'Broadbus Technologies', + 0x000C0C: u'APPRO TECHNOLOGY INC.', + 0x000C0D: u'Communications & Power Industries / Satcom Division', + 0x000C0E: u'XtremeSpectrum, Inc.', + 0x000C0F: u'Techno-One Co., Ltd', + 0x000C10: u'PNI Corporation', + 0x000C11: u'NIPPON DEMPA CO.,LTD.', + 0x000C12: u'Micro-Optronic-Messtechnik GmbH', + 0x000C13: u'MediaQ', + 0x000C14: u'Diagnostic Instruments, Inc.', + 0x000C15: u'CyberPower Systems, Inc.', + 0x000C16: u'Concorde Microsystems Inc.', + 0x000C17: u'AJA Video Systems Inc', + 0x000C18: u'Zenisu Keisoku Inc.', + 0x000C19: u'Telio Communications GmbH', + 0x000C1A: u'Quest Technical Solutions Inc.', + 0x000C1B: u'ORACOM Co, Ltd.', + 0x000C1C: u'MicroWeb Co., Ltd.', + 0x000C1D: u'Mettler & Fuchs AG', + 0x000C1E: u'Global Cache', + 0x000C1F: u'Glimmerglass Networks', + 0x000C20: u'Fi WIn, Inc.', + 0x000C21: u'Faculty of Science and Technology, Keio University', + 0x000C22: u'Double D Electronics Ltd', + 0x000C23: u'Beijing Lanchuan Tech. Co., Ltd.', + 0x000C24: u'ANATOR', + 0x000C25: u'Allied Telesyn Networks', + 0x000C26: u'Weintek Labs. Inc.', + 0x000C27: u'Sammy Corporation', + 0x000C28: u'RIFATRON', + 0x000C29: u'VMware, Inc.', + 0x000C2A: u'OCTTEL Communication Co., Ltd.', + 0x000C2B: u'ELIAS Technology, Inc.', + 0x000C2C: u'Enwiser Inc.', + 0x000C2D: u'FullWave Technology Co., Ltd.', + 0x000C2E: u'Openet information technology(shenzhen) Co., Ltd.', + 0x000C2F: u'SeorimTechnology Co.,Ltd.', + 0x000C30: u'Cisco', + 0x000C31: u'Cisco', + 0x000C32: u'Avionic Design Development GmbH', + 0x000C33: u'Compucase Enterprise Co. Ltd.', + 0x000C34: u'Vixen Co., Ltd.', + 0x000C35: u'KaVo Dental GmbH & Co. KG', + 0x000C36: u'SHARP TAKAYA ELECTRONICS INDUSTRY CO.,LTD.', + 0x000C37: u'Geomation, Inc.', + 0x000C38: u'TelcoBridges Inc.', + 0x000C39: u'Sentinel Wireless Inc.', + 0x000C3A: u'Oxance', + 0x000C3B: u'Orion Electric Co., Ltd.', + 0x000C3C: u'MediaChorus, Inc.', + 0x000C3D: u'Glsystech Co., Ltd.', + 0x000C3E: u'Crest Audio', + 0x000C3F: u'Cogent Defence & Security Networks,', + 0x000C40: u'Altech Controls', + 0x000C41: u'The Linksys Group, Inc.', + 0x000C42: u'Routerboard.com', + 0x000C43: u'Ralink Technology, Corp.', + 0x000C44: u'Automated Interfaces, Inc.', + 0x000C45: u'Animation Technologies Inc.', + 0x000C46: u'Allied Telesyn Inc.', + 0x000C47: u'SK Teletech(R&D Planning Team)', + 0x000C48: u'QoStek Corporation', + 0x000C49: u'Dangaard Telecom RTC Division A/S', + 0x000C4A: u'Cygnus Microsystems Private Limited', + 0x000C4B: u'Cheops Elektronik', + 0x000C4C: u'Arcor AG&Co.', + 0x000C4D: u'ACRA CONTROL', + 0x000C4E: u'Winbest Technology CO,LT', + 0x000C4F: u'UDTech Japan Corporation', + 0x000C50: u'Seagate Technology', + 0x000C51: u'Scientific Technologies Inc.', + 0x000C52: u'Roll Systems Inc.', + 0x000C53: u'PRIVATE', + 0x000C54: u'Pedestal Networks, Inc', + 0x000C55: u'Microlink Communications Inc.', + 0x000C56: u'Megatel Computer (1986) Corp.', + 0x000C57: u'MACKIE Engineering Services Belgium BVBA', + 0x000C58: u'M&S Systems', + 0x000C59: u'Indyme Electronics, Inc.', + 0x000C5A: u'IBSmm Industrieelektronik Multimedia', + 0x000C5B: u'HANWANG TECHNOLOGY CO.,LTD', + 0x000C5C: u'GTN Systems B.V.', + 0x000C5D: u'CHIC TECHNOLOGY (CHINA) CORP.', + 0x000C5E: u'Calypso Medical', + 0x000C5F: u'Avtec, Inc.', + 0x000C60: u'ACM Systems', + 0x000C61: u'AC Tech corporation DBA Advanced Digital', + 0x000C62: u'ABB Automation Technology Products AB, Control', + 0x000C63: u'Zenith Electronics Corporation', + 0x000C64: u'X2 MSA Group', + 0x000C65: u'Sunin Telecom', + 0x000C66: u'Pronto Networks Inc', + 0x000C67: u'OYO ELECTRIC CO.,LTD', + 0x000C68: u'SigmaTel, Inc.', + 0x000C69: u'National Radio Astronomy Observatory', + 0x000C6A: u'MBARI', + 0x000C6B: u'Kurz Industrie-Elektronik GmbH', + 0x000C6C: u'Elgato Systems LLC', + 0x000C6D: u'BOC Edwards', + 0x000C6E: u'ASUSTEK COMPUTER INC.', + 0x000C6F: u'Amtek system co.,LTD.', + 0x000C70: u'ACC GmbH', + 0x000C71: u'Wybron, Inc', + 0x000C72: u'Tempearl Industrial Co., Ltd.', + 0x000C73: u'TELSON ELECTRONICS CO., LTD', + 0x000C74: u'RIVERTEC CORPORATION', + 0x000C75: u'Oriental integrated electronics. LTD', + 0x000C76: u'MICRO-STAR INTERNATIONAL CO., LTD.', + 0x000C77: u'Life Racing Ltd', + 0x000C78: u'In-Tech Electronics Limited', + 0x000C79: u'Extel Communications P/L', + 0x000C7A: u'DaTARIUS Technologies GmbH', + 0x000C7B: u'ALPHA PROJECT Co.,Ltd.', + 0x000C7C: u'Internet Information Image Inc.', + 0x000C7D: u'TEIKOKU ELECTRIC MFG. CO., LTD', + 0x000C7E: u'Tellium Incorporated', + 0x000C7F: u'synertronixx GmbH', + 0x000C80: u'Opelcomm Inc.', + 0x000C81: u'Nulec Industries Pty Ltd', + 0x000C82: u'NETWORK TECHNOLOGIES INC', + 0x000C83: u'Logical Solutions', + 0x000C84: u'Eazix, Inc.', + 0x000C85: u'Cisco Systems', + 0x000C86: u'Cisco Systems', + 0x000C87: u'ATI', + 0x000C88: u'Apache Micro Peripherals, Inc.', + 0x000C89: u'AC Electric Vehicles, Ltd.', + 0x000C8A: u'Bose Corporation', + 0x000C8B: u'Connect Tech Inc', + 0x000C8C: u'KODICOM CO.,LTD.', + 0x000C8D: u'MATRIX VISION GmbH', + 0x000C8E: u'Mentor Engineering Inc', + 0x000C8F: u'Nergal s.r.l.', + 0x000C90: u'Octasic Inc.', + 0x000C91: u'Riverhead Networks Inc.', + 0x000C92: u'WolfVision Gmbh', + 0x000C93: u'Xeline Co., Ltd.', + 0x000C94: u'United Electronic Industries, Inc.', + 0x000C95: u'PrimeNet', + 0x000C96: u'OQO, Inc.', + 0x000C97: u'NV ADB TTV Technologies SA', + 0x000C98: u'LETEK Communications Inc.', + 0x000C99: u'HITEL LINK Co.,Ltd', + 0x000C9A: u'Hitech Electronics Corp.', + 0x000C9B: u'EE Solutions, Inc', + 0x000C9C: u'Chongho information & communications', + 0x000C9D: u'AirWalk Communications, Inc.', + 0x000C9E: u'MemoryLink Corp.', + 0x000C9F: u'NKE Corporation', + 0x000CA0: u'StorCase Technology, Inc.', + 0x000CA1: u'SIGMACOM Co., LTD.', + 0x000CA2: u'Scopus Network Technologies Ltd', + 0x000CA3: u'Rancho Technology, Inc.', + 0x000CA4: u'Prompttec Product Management GmbH', + 0x000CA5: u'Naman NZ LTd', + 0x000CA6: u'Mintera Corporation', + 0x000CA7: u'Metro (Suzhou) Technologies Co., Ltd.', + 0x000CA8: u'Garuda Networks Corporation', + 0x000CA9: u'Ebtron Inc.', + 0x000CAA: u'Cubic Transportation Systems Inc', + 0x000CAB: u'COMMEND International', + 0x000CAC: u'Citizen Watch Co., Ltd.', + 0x000CAD: u'BTU International', + 0x000CAE: u'Ailocom Oy', + 0x000CAF: u'TRI TERM CO.,LTD.', + 0x000CB0: u'Star Semiconductor Corporation', + 0x000CB1: u'Salland Engineering (Europe) BV', + 0x000CB2: u'safei Co., Ltd.', + 0x000CB3: u'ROUND Co.,Ltd.', + 0x000CB4: u'AutoCell Laboratories, Inc.', + 0x000CB5: u'Premier Technolgies, Inc', + 0x000CB6: u'NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD', + 0x000CB7: u'Nanjing Huazhuo Electronics Co., Ltd.', + 0x000CB8: u'MEDION AG', + 0x000CB9: u'LEA', + 0x000CBA: u'Jamex', + 0x000CBB: u'ISKRAEMECO', + 0x000CBC: u'Iscutum', + 0x000CBD: u'Interface Masters, Inc', + 0x000CBE: u'PRIVATE', + 0x000CBF: u'Holy Stone Ent. Co., Ltd.', + 0x000CC0: u'Genera Oy', + 0x000CC1: u'Cooper Industries Inc.', + 0x000CC2: u'PRIVATE', + 0x000CC3: u'BeWAN systems', + 0x000CC4: u'Tiptel AG', + 0x000CC5: u'Nextlink Co., Ltd.', + 0x000CC6: u'Ka-Ro electronics GmbH', + 0x000CC7: u'Intelligent Computer Solutions Inc.', + 0x000CC8: u'Xytronix Research & Design, Inc.', + 0x000CC9: u'ILWOO DATA & TECHNOLOGY CO.,LTD', + 0x000CCA: u'Hitachi Global Storage Technologies', + 0x000CCB: u'Design Combus Ltd', + 0x000CCC: u'Aeroscout Ltd.', + 0x000CCD: u'IEC - TC57', + 0x000CCE: u'Cisco Systems', + 0x000CCF: u'Cisco Systems', + 0x000CD0: u'Symetrix', + 0x000CD1: u'SFOM Technology Corp.', + 0x000CD2: u'Schaffner EMV AG', + 0x000CD3: u'Prettl Elektronik Radeberg GmbH', + 0x000CD4: u'Positron Public Safety Systems inc.', + 0x000CD5: u'Passave Inc.', + 0x000CD6: u'PARTNER TECH', + 0x000CD7: u'Nallatech Ltd', + 0x000CD8: u'M. K. Juchheim GmbH & Co', + 0x000CD9: u'Itcare Co., Ltd', + 0x000CDA: u'FreeHand Systems, Inc.', + 0x000CDB: u'Foundry Networks', + 0x000CDC: u'BECS Technology, Inc', + 0x000CDD: u'AOS Technologies AG', + 0x000CDE: u'ABB STOTZ-KONTAKT GmbH', + 0x000CDF: u'PULNiX America, Inc', + 0x000CE0: u'Trek Diagnostics Inc.', + 0x000CE1: u'The Open Group', + 0x000CE2: u'Rolls-Royce', + 0x000CE3: u'Option International N.V.', + 0x000CE4: u'NeuroCom International, Inc.', + 0x000CE5: u'Motorola BCS', + 0x000CE6: u'Meru Networks Inc', + 0x000CE7: u'MediaTek Inc.', + 0x000CE8: u'GuangZhou AnJuBao Co., Ltd', + 0x000CE9: u'BLOOMBERG L.P.', + 0x000CEA: u'aphona Kommunikationssysteme', + 0x000CEB: u'CNMP Networks, Inc.', + 0x000CEC: u'Spectracom Corp.', + 0x000CED: u'Real Digital Media', + 0x000CEE: u'jp-embedded', + 0x000CEF: u'Open Networks Engineering Ltd', + 0x000CF0: u'M & N GmbH', + 0x000CF1: u'Intel Corporation', + 0x000CF2: u'GAMESA EÓLICA', + 0x000CF3: u'CALL IMAGE SA', + 0x000CF4: u'AKATSUKI ELECTRIC MFG.CO.,LTD.', + 0x000CF5: u'InfoExpress', + 0x000CF6: u'Sitecom Europe BV', + 0x000CF7: u'Nortel Networks', + 0x000CF8: u'Nortel Networks', + 0x000CF9: u'ITT Flygt AB', + 0x000CFA: u'Digital Systems Corp', + 0x000CFB: u'Korea Network Systems', + 0x000CFC: u'S2io Technologies Corp', + 0x000CFD: u'PRIVATE', + 0x000CFE: u'Grand Electronic Co., Ltd', + 0x000CFF: u'MRO-TEK LIMITED', + 0x000D00: u'Seaway Networks Inc.', + 0x000D01: u'P&E Microcomputer Systems, Inc.', + 0x000D02: u'NEC AccessTechnica,Ltd', + 0x000D03: u'Matrics, Inc.', + 0x000D04: u'Foxboro Eckardt Development GmbH', + 0x000D05: u'cybernet manufacturing inc.', + 0x000D06: u'Compulogic Limited', + 0x000D07: u'Calrec Audio Ltd', + 0x000D08: u'AboveCable, Inc.', + 0x000D09: u'Yuehua(Zhuhai) Electronic CO. LTD', + 0x000D0A: u'Projectiondesign as', + 0x000D0B: u'Buffalo Inc.', + 0x000D0C: u'MDI Security Systems', + 0x000D0D: u'ITSupported, LLC', + 0x000D0E: u'Inqnet Systems, Inc.', + 0x000D0F: u'Finlux Ltd', + 0x000D10: u'Embedtronics Oy', + 0x000D11: u'DENTSPLY - Gendex', + 0x000D12: u'AXELL Corporation', + 0x000D13: u'Wilhelm Rutenbeck GmbH&Co.', + 0x000D14: u'Vtech Innovation LP dba Advanced American Telephones', + 0x000D15: u'Voipac s.r.o.', + 0x000D16: u'UHS Systems Pty Ltd', + 0x000D17: u'Turbo Networks Co.Ltd', + 0x000D18: u'Sunitec Enterprise Co., Ltd.', + 0x000D19: u'ROBE Show lighting', + 0x000D1A: u'Mustek System Inc.', + 0x000D1B: u'Kyoto Electronics Manufacturing Co., Ltd.', + 0x000D1C: u'I2E TELECOM', + 0x000D1D: u'HIGH-TEK HARNESS ENT. CO., LTD.', + 0x000D1E: u'Control Techniques', + 0x000D1F: u'AV Digital', + 0x000D20: u'ASAHIKASEI TECHNOSYSTEM CO.,LTD.', + 0x000D21: u'WISCORE Inc.', + 0x000D22: u'Unitronics', + 0x000D23: u'Smart Solution, Inc', + 0x000D24: u'SENTEC E&E CO., LTD.', + 0x000D25: u'SANDEN CORPORATION', + 0x000D26: u'Primagraphics Limited', + 0x000D27: u'MICROPLEX Printware AG', + 0x000D28: u'Cisco', + 0x000D29: u'Cisco', + 0x000D2A: u'Scanmatic AS', + 0x000D2B: u'Racal Instruments', + 0x000D2C: u'Patapsco Designs Ltd', + 0x000D2D: u'NCT Deutschland GmbH', + 0x000D2E: u'Matsushita Avionics Systems Corporation', + 0x000D2F: u'AIN Comm.Tech.Co., LTD', + 0x000D30: u'IceFyre Semiconductor', + 0x000D31: u'Compellent Technologies, Inc.', + 0x000D32: u'DispenseSource, Inc.', + 0x000D33: u'Prediwave Corp.', + 0x000D34: u'Shell International Exploration and Production, Inc.', + 0x000D35: u'PAC International Ltd', + 0x000D36: u'Wu Han Routon Electronic Co., Ltd', + 0x000D37: u'WIPLUG', + 0x000D38: u'NISSIN INC.', + 0x000D39: u'Network Electronics', + 0x000D3A: u'Microsoft Corp.', + 0x000D3B: u'Microelectronics Technology Inc.', + 0x000D3C: u'i.Tech Dynamic Ltd', + 0x000D3D: u'Hammerhead Systems, Inc.', + 0x000D3E: u'APLUX Communications Ltd.', + 0x000D3F: u'VXI Technology', + 0x000D40: u'Verint Loronix Video Solutions', + 0x000D41: u'Siemens AG ICM MP UC RD IT KLF1', + 0x000D42: u'Newbest Development Limited', + 0x000D43: u'DRS Tactical Systems Inc.', + 0x000D44: u'PRIVATE', + 0x000D45: u'Tottori SANYO Electric Co., Ltd.', + 0x000D46: u'SSD Drives, Inc.', + 0x000D47: u'Collex', + 0x000D48: u'AEWIN Technologies Co., Ltd.', + 0x000D49: u'Triton Systems of Delaware, Inc.', + 0x000D4A: u'Steag ETA-Optik', + 0x000D4B: u'Roku, LLC', + 0x000D4C: u'Outline Electronics Ltd.', + 0x000D4D: u'Ninelanes', + 0x000D4E: u'NDR Co.,LTD.', + 0x000D4F: u'Kenwood Corporation', + 0x000D50: u'Galazar Networks', + 0x000D51: u'DIVR Systems, Inc.', + 0x000D52: u'Comart system', + 0x000D53: u'Beijing 5w Communication Corp.', + 0x000D54: u'3Com Europe Ltd', + 0x000D55: u'SANYCOM Technology Co.,Ltd', + 0x000D56: u'Dell PCBA Test', + 0x000D57: u'Fujitsu I-Network Systems Limited.', + 0x000D58: u'PRIVATE', + 0x000D59: u'Amity Systems, Inc.', + 0x000D5A: u'Tiesse SpA', + 0x000D5B: u'Smart Empire Investments Limited', + 0x000D5C: u'Robert Bosch GmbH, VT-ATMO', + 0x000D5D: u'Raritan Computer, Inc', + 0x000D5E: u'NEC CustomTechnica, Ltd.', + 0x000D5F: u'Minds Inc', + 0x000D60: u'IBM Corporation', + 0x000D61: u'Giga-Byte Technology Co., Ltd.', + 0x000D62: u'Funkwerk Dabendorf GmbH', + 0x000D63: u'DENT Instruments, Inc.', + 0x000D64: u'COMAG Handels AG', + 0x000D65: u'Cisco Systems', + 0x000D66: u'Cisco Systems', + 0x000D67: u'BelAir Networks Inc.', + 0x000D68: u'Vinci Systems, Inc.', + 0x000D69: u'TMT&D Corporation', + 0x000D6A: u'Redwood Technologies LTD', + 0x000D6B: u'Mita-Teknik A/S', + 0x000D6C: u'M-Audio', + 0x000D6D: u'K-Tech Devices Corp.', + 0x000D6E: u'K-Patents Oy', + 0x000D6F: u'Ember Corporation', + 0x000D70: u'Datamax Corporation', + 0x000D71: u'boca systems', + 0x000D72: u'2Wire, Inc', + 0x000D73: u'Technical Support, Inc.', + 0x000D74: u'Sand Network Systems, Inc.', + 0x000D75: u'Kobian Pte Ltd - Taiwan Branch', + 0x000D76: u'Hokuto Denshi Co,. Ltd.', + 0x000D77: u'FalconStor Software', + 0x000D78: u'Engineering & Security', + 0x000D79: u'Dynamic Solutions Co,.Ltd.', + 0x000D7A: u'DiGATTO Asia Pacific Pte Ltd', + 0x000D7B: u'Consensys Computers Inc.', + 0x000D7C: u'Codian Ltd', + 0x000D7D: u'Afco Systems', + 0x000D7E: u'Axiowave Networks, Inc.', + 0x000D7F: u'MIDAS COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)', + 0x000D80: u'Online Development Inc', + 0x000D81: u'Pepperl+Fuchs GmbH', + 0x000D82: u'PHS srl', + 0x000D83: u'Sanmina-SCI Hungary Ltd.', + 0x000D84: u'Makus Inc.', + 0x000D85: u'Tapwave, Inc.', + 0x000D86: u'Huber + Suhner AG', + 0x000D87: u'Elitegroup Computer System Co. (ECS)', + 0x000D88: u'D-Link Corporation', + 0x000D89: u'Bils Technology Inc', + 0x000D8A: u'Winners Electronics Co., Ltd.', + 0x000D8B: u'T&D Corporation', + 0x000D8C: u'Shanghai Wedone Digital Ltd. CO.', + 0x000D8D: u'ProLinx Communication Gateways, Inc.', + 0x000D8E: u'Koden Electronics Co., Ltd.', + 0x000D8F: u'King Tsushin Kogyo Co., LTD.', + 0x000D90: u'Factum Electronics AB', + 0x000D91: u'Eclipse (HQ Espana) S.L.', + 0x000D92: u'Arima Communication Corporation', + 0x000D93: u'Apple Computer', + 0x000D94: u'AFAR Communications,Inc', + 0x000D95: u'Opti-cell, Inc.', + 0x000D96: u'Vtera Technology Inc.', + 0x000D97: u'Tropos Networks, Inc.', + 0x000D98: u'S.W.A.C. Schmitt-Walter Automation Consult GmbH', + 0x000D99: u'Orbital Sciences Corp.; Launch Systems Group', + 0x000D9A: u'INFOTEC LTD', + 0x000D9B: u'Heraeus Electro-Nite International N.V.', + 0x000D9C: u'Elan GmbH & Co KG', + 0x000D9D: u'Hewlett Packard', + 0x000D9E: u'TOKUDEN OHIZUMI SEISAKUSYO Co.,Ltd.', + 0x000D9F: u'RF Micro Devices', + 0x000DA0: u'NEDAP N.V.', + 0x000DA1: u'MIRAE ITS Co.,LTD.', + 0x000DA2: u'Infrant Technologies, Inc.', + 0x000DA3: u'Emerging Technologies Limited', + 0x000DA4: u'DOSCH & AMAND SYSTEMS AG', + 0x000DA5: u'Fabric7 Systems, Inc', + 0x000DA6: u'Universal Switching Corporation', + 0x000DA7: u'PRIVATE', + 0x000DA8: u'Teletronics Technology Corporation', + 0x000DA9: u'T.E.A.M. S.L.', + 0x000DAA: u'S.A.Tehnology co.,Ltd.', + 0x000DAB: u'Parker Hannifin GmbH Electromechanical Division Europe', + 0x000DAC: u'Japan CBM Corporation', + 0x000DAD: u'Dataprobe Inc', + 0x000DAE: u'SAMSUNG HEAVY INDUSTRIES CO., LTD.', + 0x000DAF: u'Plexus Corp (UK) Ltd', + 0x000DB0: u'Olym-tech Co.,Ltd.', + 0x000DB1: u'Japan Network Service Co., Ltd.', + 0x000DB2: u'Ammasso, Inc.', + 0x000DB3: u'SDO Communication Corperation', + 0x000DB4: u'NETASQ', + 0x000DB5: u'GLOBALSAT TECHNOLOGY CORPORATION', + 0x000DB6: u'Teknovus, Inc.', + 0x000DB7: u'SANKO ELECTRIC CO,.LTD', + 0x000DB8: u'SCHILLER AG', + 0x000DB9: u'PC Engines GmbH', + 0x000DBA: u'Océ Document Technologies GmbH', + 0x000DBB: u'Nippon Dentsu Co.,Ltd.', + 0x000DBC: u'Cisco Systems', + 0x000DBD: u'Cisco Systems', + 0x000DBE: u'Bel Fuse Europe Ltd.,UK', + 0x000DBF: u'TekTone Sound & Signal Mfg., Inc.', + 0x000DC0: u'Spagat AS', + 0x000DC1: u'SafeWeb Inc', + 0x000DC2: u'PRIVATE', + 0x000DC3: u'First Communication, Inc.', + 0x000DC4: u'Emcore Corporation', + 0x000DC5: u'EchoStar International Corporation', + 0x000DC6: u'DigiRose Technology Co., Ltd.', + 0x000DC7: u'COSMIC ENGINEERING INC.', + 0x000DC8: u'AirMagnet, Inc', + 0x000DC9: u'THALES Elektronik Systeme GmbH', + 0x000DCA: u'Tait Electronics', + 0x000DCB: u'Petcomkorea Co., Ltd.', + 0x000DCC: u'NEOSMART Corp.', + 0x000DCD: u'GROUPE TXCOM', + 0x000DCE: u'Dynavac Technology Pte Ltd', + 0x000DCF: u'Cidra Corp.', + 0x000DD0: u'TetraTec Instruments GmbH', + 0x000DD1: u'Stryker Corporation', + 0x000DD2: u'Simrad Optronics ASA', + 0x000DD3: u'SAMWOO Telecommunication Co.,Ltd.', + 0x000DD4: u'Revivio Inc.', + 0x000DD5: u'O\'RITE TECHNOLOGY CO.,LTD', + 0x000DD6: u'ITI LTD', + 0x000DD7: u'Bright', + 0x000DD8: u'BBN', + 0x000DD9: u'Anton Paar GmbH', + 0x000DDA: u'ALLIED TELESIS K.K.', + 0x000DDB: u'AIRWAVE TECHNOLOGIES INC.', + 0x000DDC: u'VAC', + 0x000DDD: u'PROFÃLO TELRA ELEKTRONÃK SANAYà VE TÃCARET A.Þ.', + 0x000DDE: u'Joyteck Co., Ltd.', + 0x000DDF: u'Japan Image & Network Inc.', + 0x000DE0: u'ICPDAS Co.,LTD', + 0x000DE1: u'Control Products, Inc.', + 0x000DE2: u'CMZ Sistemi Elettronici', + 0x000DE3: u'AT Sweden AB', + 0x000DE4: u'DIGINICS, Inc.', + 0x000DE5: u'Samsung Thales', + 0x000DE6: u'YOUNGBO ENGINEERING CO.,LTD', + 0x000DE7: u'Snap-on OEM Group', + 0x000DE8: u'Nasaco Electronics Pte. Ltd', + 0x000DE9: u'Napatech Aps', + 0x000DEA: u'Kingtel Telecommunication Corp.', + 0x000DEB: u'CompXs Limited', + 0x000DEC: u'Cisco Systems', + 0x000DED: u'Cisco Systems', + 0x000DEE: u'Andrew RF Power Amplifier Group', + 0x000DEF: u'Soc. Coop. Bilanciai', + 0x000DF0: u'QCOM TECHNOLOGY INC.', + 0x000DF1: u'IONIX INC.', + 0x000DF2: u'PRIVATE', + 0x000DF3: u'Asmax Solutions', + 0x000DF4: u'Watertek Co.', + 0x000DF5: u'Teletronics International Inc.', + 0x000DF6: u'Technology Thesaurus Corp.', + 0x000DF7: u'Space Dynamics Lab', + 0x000DF8: u'ORGA Kartensysteme GmbH', + 0x000DF9: u'NDS Limited', + 0x000DFA: u'Micro Control Systems Ltd.', + 0x000DFB: u'Komax AG', + 0x000DFC: u'ITFOR Inc. resarch and development', + 0x000DFD: u'Huges Hi-Tech Inc.,', + 0x000DFE: u'Hauppauge Computer Works, Inc.', + 0x000DFF: u'CHENMING MOLD INDUSTRY CORP.', + 0x000E00: u'Atrie', + 0x000E01: u'ASIP Technologies Inc.', + 0x000E02: u'Advantech AMT Inc.', + 0x000E03: u'Emulex', + 0x000E04: u'CMA/Microdialysis AB', + 0x000E05: u'WIRELESS MATRIX CORP.', + 0x000E06: u'Team Simoco Ltd', + 0x000E07: u'Sony Ericsson Mobile Communications AB', + 0x000E08: u'Sipura Technology, Inc.', + 0x000E09: u'Shenzhen Coship Software Co.,LTD.', + 0x000E0A: u'SAKUMA DESIGN OFFICE', + 0x000E0B: u'Netac Technology Co., Ltd.', + 0x000E0C: u'Intel Corporation', + 0x000E0D: u'HESCH Schröder GmbH', + 0x000E0E: u'ESA elettronica S.P.A.', + 0x000E0F: u'ERMME', + 0x000E10: u'PRIVATE', + 0x000E11: u'BDT Büro- und Datentechnik GmbH & Co. KG', + 0x000E12: u'Adaptive Micro Systems Inc.', + 0x000E13: u'Accu-Sort Systems inc.', + 0x000E14: u'Visionary Solutions, Inc.', + 0x000E15: u'Tadlys LTD', + 0x000E16: u'SouthWing', + 0x000E17: u'PRIVATE', + 0x000E18: u'MyA Technology', + 0x000E19: u'LogicaCMG Pty Ltd', + 0x000E1A: u'JPS Communications', + 0x000E1B: u'IAV GmbH', + 0x000E1C: u'Hach Company', + 0x000E1D: u'ARION Technology Inc.', + 0x000E1E: u'PRIVATE', + 0x000E1F: u'TCL Networks Equipment Co., Ltd.', + 0x000E20: u'PalmSource, Inc.', + 0x000E21: u'MTU Friedrichshafen GmbH', + 0x000E22: u'PRIVATE', + 0x000E23: u'Incipient, Inc.', + 0x000E24: u'Huwell Technology Inc.', + 0x000E25: u'Hannae Technology Co., Ltd', + 0x000E26: u'Gincom Technology Corp.', + 0x000E27: u'Crere Networks, Inc.', + 0x000E28: u'Dynamic Ratings P/L', + 0x000E29: u'Shester Communications Inc', + 0x000E2A: u'PRIVATE', + 0x000E2B: u'Safari Technologies', + 0x000E2C: u'Netcodec co.', + 0x000E2D: u'Hyundai Digital Technology Co.,Ltd.', + 0x000E2E: u'Edimax Technology Co., Ltd.', + 0x000E2F: u'Disetronic Medical Systems AG', + 0x000E30: u'AERAS Networks, Inc.', + 0x000E31: u'Olympus BioSystems GmbH', + 0x000E32: u'Kontron Medical', + 0x000E33: u'Shuko Electronics Co.,Ltd', + 0x000E34: u'NexGen City, LP', + 0x000E35: u'Intel Corp', + 0x000E36: u'HEINESYS, Inc.', + 0x000E37: u'Harms & Wende GmbH & Co.KG', + 0x000E38: u'Cisco Systems', + 0x000E39: u'Cisco Systems', + 0x000E3A: u'Cirrus Logic', + 0x000E3B: u'Hawking Technologies, Inc.', + 0x000E3C: u'TransAct Technoloiges Inc.', + 0x000E3D: u'Televic N.V.', + 0x000E3E: u'Sun Optronics Inc', + 0x000E3F: u'Soronti, Inc.', + 0x000E40: u'Nortel Networks', + 0x000E41: u'NIHON MECHATRONICS CO.,LTD.', + 0x000E42: u'Motic Incoporation Ltd.', + 0x000E43: u'G-Tek Electronics Sdn. Bhd.', + 0x000E44: u'Digital 5, Inc.', + 0x000E45: u'Beijing Newtry Electronic Technology Ltd', + 0x000E46: u'Niigata Seimitsu Co.,Ltd.', + 0x000E47: u'NCI System Co.,Ltd.', + 0x000E48: u'Lipman TransAction Solutions', + 0x000E49: u'Forsway Scandinavia AB', + 0x000E4A: u'Changchun Huayu WEBPAD Co.,LTD', + 0x000E4B: u'atrium c and i', + 0x000E4C: u'Bermai Inc.', + 0x000E4D: u'Numesa Inc.', + 0x000E4E: u'Waveplus Technology Co., Ltd.', + 0x000E4F: u'Trajet GmbH', + 0x000E50: u'Thomson Telecom Belgium', + 0x000E51: u'tecna elettronica srl', + 0x000E52: u'Optium Corporation', + 0x000E53: u'AV TECH CORPORATION', + 0x000E54: u'AlphaCell Wireless Ltd.', + 0x000E55: u'AUVITRAN', + 0x000E56: u'4G Systems GmbH', + 0x000E57: u'Iworld Networking, Inc.', + 0x000E58: u'Sonos, Inc.', + 0x000E59: u'SAGEM SA', + 0x000E5A: u'TELEFIELD inc.', + 0x000E5B: u'ParkerVision - Direct2Data', + 0x000E5C: u'Motorola BCS', + 0x000E5D: u'Triple Play Technologies A/S', + 0x000E5E: u'Beijing Raisecom Science & Technology Development Co.,Ltd', + 0x000E5F: u'activ-net GmbH & Co. KG', + 0x000E60: u'360SUN Digital Broadband Corporation', + 0x000E61: u'MICROTROL LIMITED', + 0x000E62: u'Nortel Networks', + 0x000E63: u'Lemke Diagnostics GmbH', + 0x000E64: u'Elphel, Inc', + 0x000E65: u'TransCore', + 0x000E66: u'Hitachi Advanced Digital, Inc.', + 0x000E67: u'Eltis Microelectronics Ltd.', + 0x000E68: u'E-TOP Network Technology Inc.', + 0x000E69: u'China Electric Power Research Institute', + 0x000E6A: u'3COM EUROPE LTD', + 0x000E6B: u'Janitza electronics GmbH', + 0x000E6C: u'Device Drivers Limited', + 0x000E6D: u'Murata Manufacturing Co., Ltd.', + 0x000E6E: u'MICRELEC ELECTRONICS S.A', + 0x000E6F: u'IRIS Corporation Berhad', + 0x000E70: u'in2 Networks', + 0x000E71: u'Gemstar Technology Development Ltd.', + 0x000E72: u'CTS electronics', + 0x000E73: u'Tpack A/S', + 0x000E74: u'Solar Telecom. Tech', + 0x000E75: u'New York Air Brake Corp.', + 0x000E76: u'GEMSOC INNOVISION INC.', + 0x000E77: u'Decru, Inc.', + 0x000E78: u'Amtelco', + 0x000E79: u'Ample Communications Inc.', + 0x000E7A: u'GemWon Communications Co., Ltd.', + 0x000E7B: u'Toshiba', + 0x000E7C: u'Televes S.A.', + 0x000E7D: u'Electronics Line 3000 Ltd.', + 0x000E7E: u'Comprog Oy', + 0x000E7F: u'Hewlett Packard', + 0x000E80: u'Thomson Technology Inc', + 0x000E81: u'Devicescape Software, Inc.', + 0x000E82: u'Commtech Wireless', + 0x000E83: u'Cisco Systems', + 0x000E84: u'Cisco Systems', + 0x000E85: u'Catalyst Enterprises, Inc.', + 0x000E86: u'Alcatel North America', + 0x000E87: u'adp Gauselmann GmbH', + 0x000E88: u'VIDEOTRON CORP.', + 0x000E89: u'CLEMATIC', + 0x000E8A: u'Avara Technologies Pty. Ltd.', + 0x000E8B: u'Astarte Technology Co, Ltd.', + 0x000E8C: u'Siemens AG A&D ET', + 0x000E8D: u'Systems in Progress Holding GmbH', + 0x000E8E: u'SparkLAN Communications, Inc.', + 0x000E8F: u'Sercomm Corp.', + 0x000E90: u'PONICO CORP.', + 0x000E91: u'Northstar Technologies', + 0x000E92: u'Millinet Co., Ltd.', + 0x000E93: u'Milénio 3 Sistemas Electrónicos, Lda.', + 0x000E94: u'Maas International BV', + 0x000E95: u'Fujiya Denki Seisakusho Co.,Ltd.', + 0x000E96: u'Cubic Defense Applications, Inc.', + 0x000E97: u'Ultracker Technology CO., Inc', + 0x000E98: u'Vitec CC, INC.', + 0x000E99: u'Spectrum Digital, Inc', + 0x000E9A: u'BOE TECHNOLOGY GROUP CO.,LTD', + 0x000E9B: u'Ambit Microsystems Corporation', + 0x000E9C: u'Pemstar', + 0x000E9D: u'Video Networks Ltd', + 0x000E9E: u'Topfield Co., Ltd', + 0x000E9F: u'TEMIC SDS GmbH', + 0x000EA0: u'NetKlass Technology Inc.', + 0x000EA1: u'Formosa Teletek Corporation', + 0x000EA2: u'CyberGuard Corporation', + 0x000EA3: u'CNCR-IT CO.,LTD,HangZhou P.R.CHINA', + 0x000EA4: u'Certance Inc.', + 0x000EA5: u'BLIP Systems', + 0x000EA6: u'ASUSTEK COMPUTER INC.', + 0x000EA7: u'Endace Inc Ltd.', + 0x000EA8: u'United Technologists Europe Limited', + 0x000EA9: u'Shanghai Xun Shi Communications Equipment Ltd. Co.', + 0x000EAA: u'Scalent Systems, Inc.', + 0x000EAB: u'OctigaBay Systems Corporation', + 0x000EAC: u'MINTRON ENTERPRISE CO., LTD.', + 0x000EAD: u'Metanoia Technologies, Inc.', + 0x000EAE: u'GAWELL TECHNOLOGIES CORP.', + 0x000EAF: u'CASTEL', + 0x000EB0: u'Solutions Radio BV', + 0x000EB1: u'Newcotech,Ltd', + 0x000EB2: u'Micro-Research Finland Oy', + 0x000EB3: u'LeftHand Networks', + 0x000EB4: u'GUANGZHOU GAOKE COMMUNICATIONS TECHNOLOGY CO.LTD.', + 0x000EB5: u'Ecastle Electronics Co., Ltd.', + 0x000EB6: u'Riverbed Technology, Inc.', + 0x000EB7: u'Knovative, Inc.', + 0x000EB8: u'Iiga co.,Ltd', + 0x000EB9: u'HASHIMOTO Electronics Industry Co.,Ltd.', + 0x000EBA: u'HANMI SEMICONDUCTOR CO., LTD.', + 0x000EBB: u'Everbee Networks', + 0x000EBC: u'Cullmann GmbH', + 0x000EBD: u'Burdick, a Quinton Compny', + 0x000EBE: u'B&B Electronics Manufacturing Co.', + 0x000EBF: u'Remsdaq Limited', + 0x000EC0: u'Nortel Networks', + 0x000EC1: u'MYNAH Technologies', + 0x000EC2: u'Lowrance Electronics, Inc.', + 0x000EC3: u'Logic Controls, Inc.', + 0x000EC4: u'Iskra Transmission d.d.', + 0x000EC5: u'Digital Multitools Inc', + 0x000EC6: u'ASIX ELECTRONICS CORP.', + 0x000EC7: u'Motorola Korea', + 0x000EC8: u'Zoran Corporation', + 0x000EC9: u'YOKO Technology Corp.', + 0x000ECA: u'WTSS Inc', + 0x000ECB: u'VineSys Technology', + 0x000ECC: u'Tableau', + 0x000ECD: u'SKOV A/S', + 0x000ECE: u'S.I.T.T.I. S.p.A.', + 0x000ECF: u'PROFIBUS Nutzerorganisation e.V.', + 0x000ED0: u'Privaris, Inc.', + 0x000ED1: u'Osaka Micro Computer.', + 0x000ED2: u'Filtronic plc', + 0x000ED3: u'Epicenter, Inc.', + 0x000ED4: u'CRESITT INDUSTRIE', + 0x000ED5: u'COPAN Systems Inc.', + 0x000ED6: u'Cisco Systems', + 0x000ED7: u'Cisco Systems', + 0x000ED8: u'Aktino, Inc.', + 0x000ED9: u'Aksys, Ltd.', + 0x000EDA: u'C-TECH UNITED CORP.', + 0x000EDB: u'XiNCOM Corp.', + 0x000EDC: u'Tellion INC.', + 0x000EDD: u'SHURE INCORPORATED', + 0x000EDE: u'REMEC, Inc.', + 0x000EDF: u'PLX Technology', + 0x000EE0: u'Mcharge', + 0x000EE1: u'ExtremeSpeed Inc.', + 0x000EE2: u'Custom Engineering S.p.A.', + 0x000EE3: u'Chiyu Technology Co.,Ltd', + 0x000EE4: u'BOE TECHNOLOGY GROUP CO.,LTD', + 0x000EE5: u'bitWallet, Inc.', + 0x000EE6: u'Adimos Systems LTD', + 0x000EE7: u'AAC ELECTRONICS CORP.', + 0x000EE8: u'zioncom', + 0x000EE9: u'WayTech Development, Inc.', + 0x000EEA: u'Shadong Luneng Jicheng Electronics,Co.,Ltd', + 0x000EEB: u'Sandmartin(zhong shan)Electronics Co.,Ltd', + 0x000EEC: u'Orban', + 0x000EED: u'Nokia Danmark A/S', + 0x000EEE: u'Muco Industrie BV', + 0x000EEF: u'PRIVATE', + 0x000EF0: u'Festo AG & Co. KG', + 0x000EF1: u'EZQUEST INC.', + 0x000EF2: u'Infinico Corporation', + 0x000EF3: u'Smarthome', + 0x000EF4: u'Shenzhen Kasda Digital Technology Co.,Ltd', + 0x000EF5: u'iPAC Technology Co., Ltd.', + 0x000EF6: u'E-TEN Information Systems Co., Ltd.', + 0x000EF7: u'Vulcan Portals Inc', + 0x000EF8: u'SBC ASI', + 0x000EF9: u'REA Elektronik GmbH', + 0x000EFA: u'Optoway Technology Incorporation', + 0x000EFB: u'Macey Enterprises', + 0x000EFC: u'JTAG Technologies B.V.', + 0x000EFD: u'FUJI PHOTO OPTICAL CO., LTD.', + 0x000EFE: u'EndRun Technologies LLC', + 0x000EFF: u'Megasolution,Inc.', + 0x000F00: u'Legra Systems, Inc.', + 0x000F01: u'DIGITALKS INC', + 0x000F02: u'Digicube Technology Co., Ltd', + 0x000F03: u'COM&C CO., LTD', + 0x000F04: u'cim-usa inc', + 0x000F05: u'3B SYSTEM INC.', + 0x000F06: u'Nortel Networks', + 0x000F07: u'Mangrove Systems, Inc.', + 0x000F08: u'Indagon Oy', + 0x000F09: u'PRIVATE', + 0x000F0A: u'Clear Edge Networks', + 0x000F0B: u'Kentima Technologies AB', + 0x000F0C: u'SYNCHRONIC ENGINEERING', + 0x000F0D: u'Hunt Electronic Co., Ltd.', + 0x000F0E: u'WaveSplitter Technologies, Inc.', + 0x000F0F: u'Real ID Technology Co., Ltd.', + 0x000F10: u'RDM Corporation', + 0x000F11: u'Prodrive B.V.', + 0x000F12: u'Panasonic AVC Networks Germany GmbH', + 0x000F13: u'Nisca corporation', + 0x000F14: u'Mindray Co., Ltd.', + 0x000F15: u'Kjaerulff1 A/S', + 0x000F16: u'JAY HOW TECHNOLOGY CO.,', + 0x000F17: u'Insta Elektro GmbH', + 0x000F18: u'Industrial Control Systems', + 0x000F19: u'Guidant Corporation', + 0x000F1A: u'Gaming Support B.V.', + 0x000F1B: u'Ego Systems Inc.', + 0x000F1C: u'DigitAll World Co., Ltd', + 0x000F1D: u'Cosmo Techs Co., Ltd.', + 0x000F1E: u'Chengdu KT Electric Co.of High & New Technology', + 0x000F1F: u'WW PCBA Test', + 0x000F20: u'Hewlett Packard', + 0x000F21: u'Scientific Atlanta, Inc', + 0x000F22: u'Helius, Inc.', + 0x000F23: u'Cisco Systems', + 0x000F24: u'Cisco Systems', + 0x000F25: u'AimValley B.V.', + 0x000F26: u'WorldAccxx LLC', + 0x000F27: u'TEAL Electronics, Inc.', + 0x000F28: u'Itronix Corporation', + 0x000F29: u'Augmentix Corporation', + 0x000F2A: u'Cableware Electronics', + 0x000F2B: u'GREENBELL SYSTEMS', + 0x000F2C: u'Uplogix, Inc.', + 0x000F2D: u'CHUNG-HSIN ELECTRIC & MACHINERY MFG.CORP.', + 0x000F2E: u'Megapower International Corp.', + 0x000F2F: u'W-LINX TECHNOLOGY CO., LTD.', + 0x000F30: u'Raza Microelectronics Inc', + 0x000F31: u'Prosilica', + 0x000F32: u'LuTong Electronic Technology Co.,Ltd', + 0x000F33: u'DUALi Inc.', + 0x000F34: u'Cisco Systems', + 0x000F35: u'Cisco Systems', + 0x000F36: u'Accurate Techhnologies, Inc.', + 0x000F37: u'Xambala Incorporated', + 0x000F38: u'Netstar', + 0x000F39: u'IRIS SENSORS', + 0x000F3A: u'HISHARP', + 0x000F3B: u'Fuji System Machines Co., Ltd.', + 0x000F3C: u'Endeleo Limited', + 0x000F3D: u'D-Link Corporation', + 0x000F3E: u'CardioNet, Inc', + 0x000F3F: u'Big Bear Networks', + 0x000F40: u'Optical Internetworking Forum', + 0x000F41: u'Zipher Ltd', + 0x000F42: u'Xalyo Systems', + 0x000F43: u'Wasabi Systems Inc.', + 0x000F44: u'Tivella Inc.', + 0x000F45: u'Stretch, Inc.', + 0x000F46: u'SINAR AG', + 0x000F47: u'ROBOX SPA', + 0x000F48: u'Polypix Inc.', + 0x000F49: u'Northover Solutions Limited', + 0x000F4A: u'Kyushu-kyohan co.,ltd', + 0x000F4B: u'Katana Technology', + 0x000F4C: u'Elextech INC', + 0x000F4D: u'Centrepoint Technologies Inc.', + 0x000F4E: u'Cellink', + 0x000F4F: u'Cadmus Technology Ltd', + 0x000F50: u'Baxall Limited', + 0x000F51: u'Azul Systems, Inc.', + 0x000F52: u'YORK Refrigeration, Marine & Controls', + 0x000F53: u'Solarflare Communications Inc', + 0x000F54: u'Entrelogic Corporation', + 0x000F55: u'Datawire Communication Networks Inc.', + 0x000F56: u'Continuum Photonics Inc', + 0x000F57: u'CABLELOGIC Co., Ltd.', + 0x000F58: u'Adder Technology Limited', + 0x000F59: u'Phonak Communications AG', + 0x000F5A: u'Peribit Networks', + 0x000F5B: u'Delta Information Systems, Inc.', + 0x000F5C: u'Day One Digital Media Limited', + 0x000F5D: u'42Networks AB', + 0x000F5E: u'Veo', + 0x000F5F: u'Nicety Technologies Inc. (NTS)', + 0x000F60: u'Lifetron Co.,Ltd', + 0x000F61: u'Kiwi Networks', + 0x000F62: u'Alcatel Bell Space N.V.', + 0x000F63: u'Obzerv Technologies', + 0x000F64: u'D&R Electronica Weesp BV', + 0x000F65: u'icube Corp.', + 0x000F66: u'Cisco-Linksys', + 0x000F67: u'West Instruments', + 0x000F68: u'Vavic Network Technology, Inc.', + 0x000F69: u'SEW Eurodrive GmbH & Co. KG', + 0x000F6A: u'Nortel Networks', + 0x000F6B: u'GateWare Communications GmbH', + 0x000F6C: u'ADDI-DATA GmbH', + 0x000F6D: u'Midas Engineering', + 0x000F6E: u'BBox', + 0x000F6F: u'FTA Communication Technologies', + 0x000F70: u'Wintec Industries, inc.', + 0x000F71: u'Sanmei Electronics Co.,Ltd', + 0x000F72: u'Sandburst', + 0x000F73: u'Rockwell Samsung Automation', + 0x000F74: u'Qamcom Technology AB', + 0x000F75: u'First Silicon Solutions', + 0x000F76: u'Digital Keystone, Inc.', + 0x000F77: u'DENTUM CO.,LTD', + 0x000F78: u'Datacap Systems Inc', + 0x000F79: u'Bluetooth Interest Group Inc.', + 0x000F7A: u'BeiJing NuQX Technology CO.,LTD', + 0x000F7B: u'Arce Sistemas, S.A.', + 0x000F7C: u'ACTi Corporation', + 0x000F7D: u'Xirrus', + 0x000F7E: u'Ablerex Electronics Co., LTD', + 0x000F7F: u'UBSTORAGE Co.,Ltd.', + 0x000F80: u'Trinity Security Systems,Inc.', + 0x000F81: u'Secure Info Imaging', + 0x000F82: u'Mortara Instrument, Inc.', + 0x000F83: u'Brainium Technologies Inc.', + 0x000F84: u'Astute Networks, Inc.', + 0x000F85: u'ADDO-Japan Corporation', + 0x000F86: u'Research In Motion Limited', + 0x000F87: u'Maxcess International', + 0x000F88: u'AMETEK, Inc.', + 0x000F89: u'Winnertec System Co., Ltd.', + 0x000F8A: u'WideView', + 0x000F8B: u'Orion MultiSystems Inc', + 0x000F8C: u'Gigawavetech Pte Ltd', + 0x000F8D: u'FAST TV-Server AG', + 0x000F8E: u'DONGYANG TELECOM CO.,LTD.', + 0x000F8F: u'Cisco Systems', + 0x000F90: u'Cisco Systems', + 0x000F91: u'Aerotelecom Co.,Ltd.', + 0x000F92: u'Microhard Systems Inc.', + 0x000F93: u'Landis+Gyr Ltd.', + 0x000F94: u'Genexis', + 0x000F95: u'ELECOM Co.,LTD Laneed Division', + 0x000F96: u'Critical Telecom Corp.', + 0x000F97: u'Avanex Corporation', + 0x000F98: u'Avamax Co. Ltd.', + 0x000F99: u'APAC opto Electronics Inc.', + 0x000F9A: u'Synchrony, Inc.', + 0x000F9B: u'Ross Video Limited', + 0x000F9C: u'Panduit Corp', + 0x000F9D: u'Newnham Research Ltd', + 0x000F9E: u'Murrelektronik GmbH', + 0x000F9F: u'Motorola BCS', + 0x000FA0: u'CANON KOREA BUSINESS SOLUTIONS INC.', + 0x000FA1: u'Gigabit Systems Inc.', + 0x000FA2: u'Digital Path Networks', + 0x000FA3: u'Alpha Networks Inc.', + 0x000FA4: u'Sprecher Automation GmbH', + 0x000FA5: u'SMP / BWA Technology GmbH', + 0x000FA6: u'S2 Security Corporation', + 0x000FA7: u'Raptor Networks Technology', + 0x000FA8: u'Photometrics, Inc.', + 0x000FA9: u'PC Fabrik', + 0x000FAA: u'Nexus Technologies', + 0x000FAB: u'Kyushu Electronics Systems Inc.', + 0x000FAC: u'IEEE 802.11', + 0x000FAD: u'FMN communications GmbH', + 0x000FAE: u'E2O Communications', + 0x000FAF: u'Dialog Inc.', + 0x000FB0: u'Compal Electronics,INC.', + 0x000FB1: u'Cognio Inc.', + 0x000FB2: u'Broadband Pacenet (India) Pvt. Ltd.', + 0x000FB3: u'Actiontec Electronics, Inc', + 0x000FB4: u'Timespace Technology', + 0x000FB5: u'NETGEAR Inc', + 0x000FB6: u'Europlex Technologies', + 0x000FB7: u'Cavium Networks', + 0x000FB8: u'CallURL Inc.', + 0x000FB9: u'Adaptive Instruments', + 0x000FBA: u'Tevebox AB', + 0x000FBB: u'Siemens Networks GmbH & Co. KG', + 0x000FBC: u'Onkey Technologies, Inc.', + 0x000FBD: u'MRV Communications (Networks) LTD', + 0x000FBE: u'e-w/you Inc.', + 0x000FBF: u'DGT Sp. z o.o.', + 0x000FC0: u'DELCOMp', + 0x000FC1: u'WAVE Corporation', + 0x000FC2: u'Uniwell Corporation', + 0x000FC3: u'PalmPalm Technology, Inc.', + 0x000FC4: u'NST co.,LTD.', + 0x000FC5: u'KeyMed Ltd', + 0x000FC6: u'Eurocom Industries A/S', + 0x000FC7: u'Dionica R&D Ltd.', + 0x000FC8: u'Chantry Networks', + 0x000FC9: u'Allnet GmbH', + 0x000FCA: u'A-JIN TECHLINE CO, LTD', + 0x000FCB: u'3COM EUROPE LTD', + 0x000FCC: u'Netopia, Inc.', + 0x000FCD: u'Nortel Networks', + 0x000FCE: u'Kikusui Electronics Corp.', + 0x000FCF: u'Datawind Research', + 0x000FD0: u'ASTRI', + 0x000FD1: u'Applied Wireless Identifications Group, Inc.', + 0x000FD2: u'EWA Technologies, Inc.', + 0x000FD3: u'Digium', + 0x000FD4: u'Soundcraft', + 0x000FD5: u'Schwechat - RISE', + 0x000FD6: u'Sarotech Co., Ltd', + 0x000FD7: u'Harman Music Group', + 0x000FD8: u'Force, Inc.', + 0x000FD9: u'FlexDSL Telecommunications AG', + 0x000FDA: u'YAZAKI CORPORATION', + 0x000FDB: u'Westell Technologies', + 0x000FDC: u'Ueda Japan Radio Co., Ltd.', + 0x000FDD: u'SORDIN AB', + 0x000FDE: u'Sony Ericsson Mobile Communications AB', + 0x000FDF: u'SOLOMON Technology Corp.', + 0x000FE0: u'NComputing Co.,Ltd.', + 0x000FE1: u'ID DIGITAL CORPORATION', + 0x000FE2: u'Hangzhou Huawei-3Com Tech. Co., Ltd.', + 0x000FE3: u'Damm Cellular Systems A/S', + 0x000FE4: u'Pantech Co.,Ltd', + 0x000FE5: u'MERCURY SECURITY CORPORATION', + 0x000FE6: u'MBTech Systems, Inc.', + 0x000FE7: u'Lutron Electronics Co., Inc.', + 0x000FE8: u'Lobos, Inc.', + 0x000FE9: u'GW TECHNOLOGIES CO.,LTD.', + 0x000FEA: u'Giga-Byte Technology Co.,LTD.', + 0x000FEB: u'Cylon Controls', + 0x000FEC: u'Arkus Inc.', + 0x000FED: u'Anam Electronics Co., Ltd', + 0x000FEE: u'XTec, Incorporated', + 0x000FEF: u'Thales e-Transactions GmbH', + 0x000FF0: u'Sunray Enterprise', + 0x000FF1: u'nex-G Systems Pte.Ltd', + 0x000FF2: u'Loud Technologies Inc.', + 0x000FF3: u'Jung Myoung Communications&Technology', + 0x000FF4: u'Guntermann & Drunck GmbH', + 0x000FF5: u'GN&S company', + 0x000FF6: u'Darfon Electronics Corp.', + 0x000FF7: u'Cisco Systems', + 0x000FF8: u'Cisco Systems', + 0x000FF9: u'Valcretec, Inc.', + 0x000FFA: u'Optinel Systems, Inc.', + 0x000FFB: u'Nippon Denso Industry Co., Ltd.', + 0x000FFC: u'Merit Li-Lin Ent.', + 0x000FFD: u'Glorytek Network Inc.', + 0x000FFE: u'G-PRO COMPUTER', + 0x000FFF: u'Control4', + 0x001000: u'CABLE TELEVISION LABORATORIES, INC.', + 0x001001: u'MCK COMMUNICATIONS', + 0x001002: u'ACTIA', + 0x001003: u'IMATRON, INC.', + 0x001004: u'THE BRANTLEY COILE COMPANY,INC', + 0x001005: u'UEC COMMERCIAL', + 0x001006: u'Thales Contact Solutions Ltd.', + 0x001007: u'CISCO SYSTEMS, INC.', + 0x001008: u'VIENNA SYSTEMS CORPORATION', + 0x001009: u'HORO QUARTZ', + 0x00100A: u'WILLIAMS COMMUNICATIONS GROUP', + 0x00100B: u'CISCO SYSTEMS, INC.', + 0x00100C: u'ITO CO., LTD.', + 0x00100D: u'CISCO SYSTEMS, INC.', + 0x00100E: u'MICRO LINEAR COPORATION', + 0x00100F: u'INDUSTRIAL CPU SYSTEMS', + 0x001010: u'INITIO CORPORATION', + 0x001011: u'CISCO SYSTEMS, INC.', + 0x001012: u'PROCESSOR SYSTEMS (I) PVT LTD', + 0x001013: u'Kontron', + 0x001014: u'CISCO SYSTEMS, INC.', + 0x001015: u'OOmon Inc.', + 0x001016: u'T.SQWARE', + 0x001017: u'MICOS GmbH', + 0x001018: u'BROADCOM CORPORATION', + 0x001019: u'SIRONA DENTAL SYSTEMS GmbH & Co. KG', + 0x00101A: u'PictureTel Corp.', + 0x00101B: u'CORNET TECHNOLOGY, INC.', + 0x00101C: u'OHM TECHNOLOGIES INTL, LLC', + 0x00101D: u'WINBOND ELECTRONICS CORP.', + 0x00101E: u'MATSUSHITA ELECTRONIC INSTRUMENTS CORP.', + 0x00101F: u'CISCO SYSTEMS, INC.', + 0x001020: u'WELCH ALLYN, DATA COLLECTION', + 0x001021: u'ENCANTO NETWORKS, INC.', + 0x001022: u'SatCom Media Corporation', + 0x001023: u'FLOWWISE NETWORKS, INC.', + 0x001024: u'NAGOYA ELECTRIC WORKS CO., LTD', + 0x001025: u'GRAYHILL INC.', + 0x001026: u'ACCELERATED NETWORKS, INC.', + 0x001027: u'L-3 COMMUNICATIONS EAST', + 0x001028: u'COMPUTER TECHNICA, INC.', + 0x001029: u'CISCO SYSTEMS, INC.', + 0x00102A: u'ZF MICROSYSTEMS, INC.', + 0x00102B: u'UMAX DATA SYSTEMS, INC.', + 0x00102C: u'Lasat Networks A/S', + 0x00102D: u'HITACHI SOFTWARE ENGINEERING', + 0x00102E: u'NETWORK SYSTEMS & TECHNOLOGIES PVT. LTD.', + 0x00102F: u'CISCO SYSTEMS, INC.', + 0x001030: u'EION Inc.', + 0x001031: u'OBJECTIVE COMMUNICATIONS, INC.', + 0x001032: u'ALTA TECHNOLOGY', + 0x001033: u'ACCESSLAN COMMUNICATIONS, INC.', + 0x001034: u'GNP Computers', + 0x001035: u'ELITEGROUP COMPUTER SYSTEMS CO., LTD', + 0x001036: u'INTER-TEL INTEGRATED SYSTEMS', + 0x001037: u'CYQ\'ve Technology Co., Ltd.', + 0x001038: u'MICRO RESEARCH INSTITUTE, INC.', + 0x001039: u'Vectron Systems AG', + 0x00103A: u'DIAMOND NETWORK TECH', + 0x00103B: u'HIPPI NETWORKING FORUM', + 0x00103C: u'IC ENSEMBLE, INC.', + 0x00103D: u'PHASECOM, LTD.', + 0x00103E: u'NETSCHOOLS CORPORATION', + 0x00103F: u'TOLLGRADE COMMUNICATIONS, INC.', + 0x001040: u'INTERMEC CORPORATION', + 0x001041: u'BRISTOL BABCOCK, INC.', + 0x001042: u'AlacriTech', + 0x001043: u'A2 CORPORATION', + 0x001044: u'InnoLabs Corporation', + 0x001045: u'Nortel Networks', + 0x001046: u'ALCORN MCBRIDE INC.', + 0x001047: u'ECHO ELETRIC CO. LTD.', + 0x001048: u'HTRC AUTOMATION, INC.', + 0x001049: u'SHORELINE TELEWORKS, INC.', + 0x00104A: u'THE PARVUC CORPORATION', + 0x00104B: u'3COM CORPORATION', + 0x00104C: u'COMPUTER ACCESS TECHNOLOGY', + 0x00104D: u'SURTEC INDUSTRIES, INC.', + 0x00104E: u'CEOLOGIC', + 0x00104F: u'STORAGE TECHNOLOGY CORPORATION', + 0x001050: u'RION CO., LTD.', + 0x001051: u'CMICRO CORPORATION', + 0x001052: u'METTLER-TOLEDO (ALBSTADT) GMBH', + 0x001053: u'COMPUTER TECHNOLOGY CORP.', + 0x001054: u'CISCO SYSTEMS, INC.', + 0x001055: u'FUJITSU MICROELECTRONICS, INC.', + 0x001056: u'SODICK CO., LTD.', + 0x001057: u'Rebel.com, Inc.', + 0x001058: u'ArrowPoint Communications', + 0x001059: u'DIABLO RESEARCH CO. LLC', + 0x00105A: u'3COM CORPORATION', + 0x00105B: u'NET INSIGHT AB', + 0x00105C: u'QUANTUM DESIGNS (H.K.) LTD.', + 0x00105D: u'Draeger Medical', + 0x00105E: u'HEKIMIAN LABORATORIES, INC.', + 0x00105F: u'IN-SNEC', + 0x001060: u'BILLIONTON SYSTEMS, INC.', + 0x001061: u'HOSTLINK CORP.', + 0x001062: u'NX SERVER, ILNC.', + 0x001063: u'STARGUIDE DIGITAL NETWORKS', + 0x001064: u'DNPG, LLC', + 0x001065: u'RADYNE CORPORATION', + 0x001066: u'ADVANCED CONTROL SYSTEMS, INC.', + 0x001067: u'REDBACK NETWORKS, INC.', + 0x001068: u'COMOS TELECOM', + 0x001069: u'HELIOSS COMMUNICATIONS, INC.', + 0x00106A: u'DIGITAL MICROWAVE CORPORATION', + 0x00106B: u'SONUS NETWORKS, INC.', + 0x00106C: u'INFRATEC PLUS GmbH', + 0x00106D: u'Axxcelera Broadband Wireless', + 0x00106E: u'TADIRAN COM. LTD.', + 0x00106F: u'TRENTON TECHNOLOGY INC.', + 0x001070: u'CARADON TREND LTD.', + 0x001071: u'ADVANET INC.', + 0x001072: u'GVN TECHNOLOGIES, INC.', + 0x001073: u'TECHNOBOX, INC.', + 0x001074: u'ATEN INTERNATIONAL CO., LTD.', + 0x001075: u'Maxtor Corporation', + 0x001076: u'EUREM GmbH', + 0x001077: u'SAF DRIVE SYSTEMS, LTD.', + 0x001078: u'NUERA COMMUNICATIONS, INC.', + 0x001079: u'CISCO SYSTEMS, INC.', + 0x00107A: u'AmbiCom, Inc.', + 0x00107B: u'CISCO SYSTEMS, INC.', + 0x00107C: u'P-COM, INC.', + 0x00107D: u'AURORA COMMUNICATIONS, LTD.', + 0x00107E: u'BACHMANN ELECTRONIC GmbH', + 0x00107F: u'CRESTRON ELECTRONICS, INC.', + 0x001080: u'METAWAVE COMMUNICATIONS', + 0x001081: u'DPS, INC.', + 0x001082: u'JNA TELECOMMUNICATIONS LIMITED', + 0x001083: u'HEWLETT-PACKARD COMPANY', + 0x001084: u'K-BOT COMMUNICATIONS', + 0x001085: u'POLARIS COMMUNICATIONS, INC.', + 0x001086: u'ATTO TECHNOLOGY, INC.', + 0x001087: u'Xstreamis PLC', + 0x001088: u'AMERICAN NETWORKS INC.', + 0x001089: u'WebSonic', + 0x00108A: u'TeraLogic, Inc.', + 0x00108B: u'LASERANIMATION SOLLINGER GmbH', + 0x00108C: u'FUJITSU TELECOMMUNICATIONS EUROPE, LTD.', + 0x00108D: u'JOHNSON CONTROLS, INC.', + 0x00108E: u'HUGH SYMONS CONCEPT Technologies Ltd.', + 0x00108F: u'RAPTOR SYSTEMS', + 0x001090: u'CIMETRICS, INC.', + 0x001091: u'NO WIRES NEEDED BV', + 0x001092: u'NETCORE INC.', + 0x001093: u'CMS COMPUTERS, LTD.', + 0x001094: u'Performance Analysis Broadband, Spirent plc', + 0x001095: u'Thomson Inc.', + 0x001096: u'TRACEWELL SYSTEMS, INC.', + 0x001097: u'WinNet Metropolitan Communications Systems, Inc.', + 0x001098: u'STARNET TECHNOLOGIES, INC.', + 0x001099: u'InnoMedia, Inc.', + 0x00109A: u'NETLINE', + 0x00109B: u'Emulex Corporation', + 0x00109C: u'M-SYSTEM CO., LTD.', + 0x00109D: u'CLARINET SYSTEMS, INC.', + 0x00109E: u'AWARE, INC.', + 0x00109F: u'PAVO, INC.', + 0x0010A0: u'INNOVEX TECHNOLOGIES, INC.', + 0x0010A1: u'KENDIN SEMICONDUCTOR, INC.', + 0x0010A2: u'TNS', + 0x0010A3: u'OMNITRONIX, INC.', + 0x0010A4: u'XIRCOM', + 0x0010A5: u'OXFORD INSTRUMENTS', + 0x0010A6: u'CISCO SYSTEMS, INC.', + 0x0010A7: u'UNEX TECHNOLOGY CORPORATION', + 0x0010A8: u'RELIANCE COMPUTER CORP.', + 0x0010A9: u'ADHOC TECHNOLOGIES', + 0x0010AA: u'MEDIA4, INC.', + 0x0010AB: u'KOITO INDUSTRIES, LTD.', + 0x0010AC: u'IMCI TECHNOLOGIES', + 0x0010AD: u'SOFTRONICS USB, INC.', + 0x0010AE: u'SHINKO ELECTRIC INDUSTRIES CO.', + 0x0010AF: u'TAC SYSTEMS, INC.', + 0x0010B0: u'MERIDIAN TECHNOLOGY CORP.', + 0x0010B1: u'FOR-A CO., LTD.', + 0x0010B2: u'COACTIVE AESTHETICS', + 0x0010B3: u'NOKIA MULTIMEDIA TERMINALS', + 0x0010B4: u'ATMOSPHERE NETWORKS', + 0x0010B5: u'ACCTON TECHNOLOGY CORPORATION', + 0x0010B6: u'ENTRATA COMMUNICATIONS CORP.', + 0x0010B7: u'COYOTE TECHNOLOGIES, LLC', + 0x0010B8: u'ISHIGAKI COMPUTER SYSTEM CO.', + 0x0010B9: u'MAXTOR CORP.', + 0x0010BA: u'MARTINHO-DAVIS SYSTEMS, INC.', + 0x0010BB: u'DATA & INFORMATION TECHNOLOGY', + 0x0010BC: u'Aastra Telecom', + 0x0010BD: u'THE TELECOMMUNICATION TECHNOLOGY COMMITTEE', + 0x0010BE: u'TELEXIS CORP.', + 0x0010BF: u'InterAir Wireless', + 0x0010C0: u'ARMA, INC.', + 0x0010C1: u'OI ELECTRIC CO., LTD.', + 0x0010C2: u'WILLNET, INC.', + 0x0010C3: u'CSI-CONTROL SYSTEMS', + 0x0010C4: u'MEDIA LINKS CO., LTD.', + 0x0010C5: u'PROTOCOL TECHNOLOGIES, INC.', + 0x0010C6: u'USI', + 0x0010C7: u'DATA TRANSMISSION NETWORK', + 0x0010C8: u'COMMUNICATIONS ELECTRONICS SECURITY GROUP', + 0x0010C9: u'MITSUBISHI ELECTRONICS LOGISTIC SUPPORT CO.', + 0x0010CA: u'INTEGRAL ACCESS', + 0x0010CB: u'FACIT K.K.', + 0x0010CC: u'CLP COMPUTER LOGISTIK PLANUNG GmbH', + 0x0010CD: u'INTERFACE CONCEPT', + 0x0010CE: u'VOLAMP, LTD.', + 0x0010CF: u'FIBERLANE COMMUNICATIONS', + 0x0010D0: u'WITCOM, LTD.', + 0x0010D1: u'Top Layer Networks, Inc.', + 0x0010D2: u'NITTO TSUSHINKI CO., LTD', + 0x0010D3: u'GRIPS ELECTRONIC GMBH', + 0x0010D4: u'STORAGE COMPUTER CORPORATION', + 0x0010D5: u'IMASDE CANARIAS, S.A.', + 0x0010D6: u'ITT - A/CD', + 0x0010D7: u'ARGOSY RESEARCH INC.', + 0x0010D8: u'CALISTA', + 0x0010D9: u'IBM JAPAN, FUJISAWA MT+D', + 0x0010DA: u'MOTION ENGINEERING, INC.', + 0x0010DB: u'Juniper Networks, Inc.', + 0x0010DC: u'MICRO-STAR INTERNATIONAL CO., LTD.', + 0x0010DD: u'ENABLE SEMICONDUCTOR, INC.', + 0x0010DE: u'INTERNATIONAL DATACASTING CORPORATION', + 0x0010DF: u'RISE COMPUTER INC.', + 0x0010E0: u'COBALT MICROSERVER, INC.', + 0x0010E1: u'S.I. TECH, INC.', + 0x0010E2: u'ArrayComm, Inc.', + 0x0010E3: u'COMPAQ COMPUTER CORPORATION', + 0x0010E4: u'NSI CORPORATION', + 0x0010E5: u'SOLECTRON TEXAS', + 0x0010E6: u'APPLIED INTELLIGENT SYSTEMS, INC.', + 0x0010E7: u'BreezeCom', + 0x0010E8: u'TELOCITY, INCORPORATED', + 0x0010E9: u'RAIDTEC LTD.', + 0x0010EA: u'ADEPT TECHNOLOGY', + 0x0010EB: u'SELSIUS SYSTEMS, INC.', + 0x0010EC: u'RPCG, LLC', + 0x0010ED: u'SUNDANCE TECHNOLOGY, INC.', + 0x0010EE: u'CTI PRODUCTS, INC.', + 0x0010EF: u'DBTEL INCORPORATED', + 0x0010F1: u'I-O CORPORATION', + 0x0010F2: u'ANTEC', + 0x0010F3: u'Nexcom International Co., Ltd.', + 0x0010F4: u'VERTICAL NETWORKS, INC.', + 0x0010F5: u'AMHERST SYSTEMS, INC.', + 0x0010F6: u'CISCO SYSTEMS, INC.', + 0x0010F7: u'IRIICHI TECHNOLOGIES Inc.', + 0x0010F8: u'TEXIO CORPORATION', + 0x0010F9: u'UNIQUE SYSTEMS, INC.', + 0x0010FA: u'ZAYANTE, INC.', + 0x0010FB: u'ZIDA TECHNOLOGIES LIMITED', + 0x0010FC: u'BROADBAND NETWORKS, INC.', + 0x0010FD: u'COCOM A/S', + 0x0010FE: u'DIGITAL EQUIPMENT CORPORATION', + 0x0010FF: u'CISCO SYSTEMS, INC.', + 0x001100: u'RAM Industries, LLC', + 0x001101: u'CET Technologies Pte Ltd', + 0x001102: u'Aurora Multimedia Corp.', + 0x001103: u'kawamura electric inc.', + 0x001104: u'TELEXY', + 0x001105: u'Sunplus Technology Co., Ltd.', + 0x001106: u'Siemens NV (Belgium)', + 0x001107: u'RGB Networks Inc.', + 0x001108: u'Orbital Data Corporation', + 0x001109: u'Micro-Star International', + 0x00110A: u'Hewlett Packard', + 0x00110B: u'Franklin Technology Systems', + 0x00110C: u'Atmark Techno, Inc.', + 0x00110D: u'SANBlaze Technology, Inc.', + 0x00110E: u'Tsurusaki Sealand Transportation Co. Ltd.', + 0x00110F: u'netplat,Inc.', + 0x001110: u'Maxanna Technology Co., Ltd.', + 0x001111: u'Intel Corporation', + 0x001112: u'Honeywell CMSS', + 0x001113: u'Fraunhofer FOKUS', + 0x001114: u'EverFocus Electronics Corp.', + 0x001115: u'EPIN Technologies, Inc.', + 0x001116: u'COTEAU VERT CO., LTD.', + 0x001117: u'CESNET', + 0x001118: u'BLX IC Design Corp., Ltd.', + 0x001119: u'Solteras, Inc.', + 0x00111A: u'Motorola BCS', + 0x00111B: u'Targa Systems Div L-3 Communications Canada', + 0x00111C: u'Pleora Technologies Inc.', + 0x00111D: u'Hectrix Limited', + 0x00111E: u'EPSG (Ethernet Powerlink Standardization Group)', + 0x00111F: u'Doremi Labs, Inc.', + 0x001120: u'Cisco Systems', + 0x001121: u'Cisco Systems', + 0x001122: u'CIMSYS Inc', + 0x001123: u'Appointech, Inc.', + 0x001124: u'Apple Computer', + 0x001125: u'IBM Corporation', + 0x001126: u'Venstar Inc.', + 0x001127: u'TASI, Inc', + 0x001128: u'Streamit', + 0x001129: u'Paradise Datacom Ltd.', + 0x00112A: u'Niko NV', + 0x00112B: u'NetModule', + 0x00112C: u'IZT GmbH', + 0x00112D: u'Guys Without Ties', + 0x00112E: u'CEICOM', + 0x00112F: u'ASUSTek Computer Inc.', + 0x001130: u'Allied Telesis (Hong Kong) Ltd.', + 0x001131: u'UNATECH. CO.,LTD', + 0x001132: u'Synology Incorporated', + 0x001133: u'Siemens Austria SIMEA', + 0x001134: u'MediaCell, Inc.', + 0x001135: u'Grandeye Ltd', + 0x001136: u'Goodrich Sensor Systems', + 0x001137: u'AICHI ELECTRIC CO., LTD.', + 0x001138: u'TAISHIN CO., LTD.', + 0x001139: u'STOEBER ANTRIEBSTECHNIK GmbH + Co. KG.', + 0x00113A: u'SHINBORAM', + 0x00113B: u'Micronet Communications Inc.', + 0x00113C: u'Micronas GmbH', + 0x00113D: u'KN SOLTEC CO.,LTD.', + 0x00113E: u'JL Corporation', + 0x00113F: u'Alcatel DI', + 0x001140: u'Nanometrics Inc.', + 0x001141: u'GoodMan Corporation', + 0x001142: u'e-SMARTCOM INC.', + 0x001143: u'DELL INC.', + 0x001144: u'Assurance Technology Corp', + 0x001145: u'ValuePoint Networks', + 0x001146: u'Telecard-Pribor Ltd', + 0x001147: u'Secom-Industry co.LTD.', + 0x001148: u'Prolon Control Systems', + 0x001149: u'Proliphix LLC', + 0x00114A: u'KAYABA INDUSTRY Co,.Ltd.', + 0x00114B: u'Francotyp-Postalia AG & Co. KG', + 0x00114C: u'caffeina applied research ltd.', + 0x00114D: u'Atsumi Electric Co.,LTD.', + 0x00114E: u'690885 Ontario Inc.', + 0x00114F: u'US Digital Television, Inc', + 0x001150: u'Belkin Corporation', + 0x001151: u'Mykotronx', + 0x001152: u'Eidsvoll Electronics AS', + 0x001153: u'Trident Tek, Inc.', + 0x001154: u'Webpro Technologies Inc.', + 0x001155: u'Sevis Systems', + 0x001156: u'Pharos Systems NZ', + 0x001157: u'OF Networks Co., Ltd.', + 0x001158: u'Nortel Networks', + 0x001159: u'MATISSE NETWORKS INC', + 0x00115A: u'Ivoclar Vivadent AG', + 0x00115B: u'Elitegroup Computer System Co. (ECS)', + 0x00115C: u'Cisco', + 0x00115D: u'Cisco', + 0x00115E: u'ProMinent Dosiertechnik GmbH', + 0x00115F: u'Intellix Co., Ltd.', + 0x001160: u'ARTDIO Company Co., LTD', + 0x001161: u'NetStreams, LLC', + 0x001162: u'STAR MICRONICS CO.,LTD.', + 0x001163: u'SYSTEM SPA DEPT. ELECTRONICS', + 0x001164: u'ACARD Technology Corp.', + 0x001165: u'Znyx Networks', + 0x001166: u'Taelim Electronics Co., Ltd.', + 0x001167: u'Integrated System Solution Corp.', + 0x001168: u'HomeLogic LLC', + 0x001169: u'EMS Satcom', + 0x00116A: u'Domo Ltd', + 0x00116B: u'Digital Data Communications Asia Co.,Ltd', + 0x00116C: u'Nanwang Multimedia Inc.,Ltd', + 0x00116D: u'American Time and Signal', + 0x00116E: u'PePLink Ltd.', + 0x00116F: u'Netforyou Co., LTD.', + 0x001170: u'GSC SRL', + 0x001171: u'DEXTER Communications, Inc.', + 0x001172: u'COTRON CORPORATION', + 0x001173: u'Adtron Corporation', + 0x001174: u'Wibhu Technologies, Inc.', + 0x001175: u'PathScale, Inc.', + 0x001176: u'Intellambda Systems, Inc.', + 0x001177: u'COAXIAL NETWORKS, INC.', + 0x001178: u'Chiron Technology Ltd', + 0x001179: u'Singular Technology Co. Ltd.', + 0x00117A: u'Singim International Corp.', + 0x00117B: u'Büchi Labortechnik AG', + 0x00117C: u'e-zy.net', + 0x00117D: u'ZMD America, Inc.', + 0x00117E: u'Progeny Inc.', + 0x00117F: u'Neotune Information Technology Corporation,.LTD', + 0x001180: u'Motorola BCS', + 0x001181: u'InterEnergy Co.Ltd,', + 0x001182: u'IMI Norgren Ltd', + 0x001183: u'PSC Scanning, Inc', + 0x001184: u'Humo Laboratory,Ltd.', + 0x001185: u'Hewlett Packard', + 0x001186: u'Prime Systems, Inc.', + 0x001187: u'Category Solutions, Inc', + 0x001188: u'Enterasys', + 0x001189: u'Aerotech Inc', + 0x00118A: u'Viewtran Technology Limited', + 0x00118B: u'NetDevices Inc.', + 0x00118C: u'Missouri Department of Transportation', + 0x00118D: u'Hanchang System Corp.', + 0x00118E: u'Halytech Mace', + 0x00118F: u'EUTECH INSTRUMENTS PTE. LTD.', + 0x001190: u'Digital Design Corporation', + 0x001191: u'CTS-Clima Temperatur Systeme GmbH', + 0x001192: u'Cisco Systems', + 0x001193: u'Cisco Systems', + 0x001194: u'Chi Mei Communication Systems, Inc.', + 0x001195: u'D-Link Corporation', + 0x001196: u'Actuality Systems, Inc.', + 0x001197: u'Monitoring Technologies Limited', + 0x001198: u'Prism Media Products Limited', + 0x001199: u'2wcom GmbH', + 0x00119A: u'Alkeria srl', + 0x00119B: u'Telesynergy Research Inc.', + 0x00119C: u'EP&T Energy', + 0x00119D: u'Diginfo Technology Corporation', + 0x00119E: u'Solectron Brazil', + 0x00119F: u'Nokia Danmark A/S', + 0x0011A0: u'Vtech Engineering Canada Ltd', + 0x0011A1: u'VISION NETWARE CO.,LTD', + 0x0011A2: u'Manufacturing Technology Inc', + 0x0011A3: u'LanReady Technologies Inc.', + 0x0011A4: u'JStream Technologies Inc.', + 0x0011A5: u'Fortuna Electronic Corp.', + 0x0011A6: u'Sypixx Networks', + 0x0011A7: u'Infilco Degremont Inc.', + 0x0011A8: u'Quest Technologies', + 0x0011A9: u'MOIMSTONE Co., LTD', + 0x0011AA: u'Uniclass Technology, Co., LTD', + 0x0011AB: u'TRUSTABLE TECHNOLOGY CO.,LTD.', + 0x0011AC: u'Simtec Electronics', + 0x0011AD: u'Shanghai Ruijie Technology', + 0x0011AE: u'Motorola BCS', + 0x0011AF: u'Medialink-i,Inc', + 0x0011B0: u'Fortelink Inc.', + 0x0011B1: u'BlueExpert Technology Corp.', + 0x0011B2: u'2001 Technology Inc.', + 0x0011B3: u'YOSHIMIYA CO.,LTD.', + 0x0011B4: u'Westermo Teleindustri AB', + 0x0011B5: u'Shenzhen Powercom Co.,Ltd', + 0x0011B6: u'Open Systems International', + 0x0011B7: u'Melexis Nederland B.V.', + 0x0011B8: u'Liebherr - Elektronik GmbH', + 0x0011B9: u'Inner Range Pty. Ltd.', + 0x0011BA: u'Elexol Pty Ltd', + 0x0011BB: u'Cisco Systems', + 0x0011BC: u'Cisco Systems', + 0x0011BD: u'Bombardier Transportation', + 0x0011BE: u'AGP Telecom Co. Ltd', + 0x0011BF: u'AESYS S.p.A.', + 0x0011C0: u'Aday Technology Inc', + 0x0011C1: u'4P MOBILE DATA PROCESSING', + 0x0011C2: u'United Fiber Optic Communication', + 0x0011C3: u'Transceiving System Technology Corporation', + 0x0011C4: u'Terminales de Telecomunicacion Terrestre, S.L.', + 0x0011C5: u'TEN Technology', + 0x0011C6: u'Seagate Technology LLC', + 0x0011C7: u'RAYMARINE Group Ltd.', + 0x0011C8: u'Powercom Co., Ltd.', + 0x0011C9: u'MTT Corporation', + 0x0011CA: u'Long Range Systems, Inc.', + 0x0011CB: u'Jacobsons RKH AB', + 0x0011CC: u'Guangzhou Jinpeng Group Co.,Ltd.', + 0x0011CD: u'Axsun Technologies', + 0x0011CE: u'Ubisense Limited', + 0x0011CF: u'Thrane & Thrane A/S', + 0x0011D0: u'Tandberg Data ASA', + 0x0011D1: u'Soft Imaging System GmbH', + 0x0011D2: u'Perception Digital Ltd', + 0x0011D3: u'NextGenTel Holding ASA', + 0x0011D4: u'NetEnrich, Inc', + 0x0011D5: u'Hangzhou Sunyard System Engineering Co.,Ltd.', + 0x0011D6: u'HandEra, Inc.', + 0x0011D7: u'eWerks Inc', + 0x0011D8: u'ASUSTek Computer Inc.', + 0x0011D9: u'TiVo', + 0x0011DA: u'Vivaas Technology Inc.', + 0x0011DB: u'Land-Cellular Corporation', + 0x0011DC: u'Glunz & Jensen', + 0x0011DD: u'FROMUS TEC. Co., Ltd.', + 0x0011DE: u'EURILOGIC', + 0x0011DF: u'Arecont Systems', + 0x0011E0: u'U-MEDIA Communications, Inc.', + 0x0011E1: u'BEKO Electronics Co.', + 0x0011E2: u'Hua Jung Components Co., Ltd.', + 0x0011E3: u'Thomson, Inc.', + 0x0011E4: u'Danelec Electronics A/S', + 0x0011E5: u'KCodes Corporation', + 0x0011E6: u'Scientific Atlanta', + 0x0011E7: u'WORLDSAT - Texas de France', + 0x0011E8: u'Tixi.Com', + 0x0011E9: u'STARNEX CO., LTD.', + 0x0011EA: u'IWICS Inc.', + 0x0011EB: u'Innovative Integration', + 0x0011EC: u'AVIX INC.', + 0x0011ED: u'802 Global', + 0x0011EE: u'Estari, Inc.', + 0x0011EF: u'Conitec Datensysteme GmbH', + 0x0011F0: u'Wideful Limited', + 0x0011F1: u'QinetiQ Ltd', + 0x0011F2: u'Institute of Network Technologies', + 0x0011F3: u'Gavitec AG- mobile digit', + 0x0011F4: u'woori-net', + 0x0011F5: u'ASKEY COMPUTER CORP.', + 0x0011F6: u'Asia Pacific Microsystems , Inc.', + 0x0011F7: u'Shenzhen Forward Industry Co., Ltd', + 0x0011F8: u'AIRAYA Corp', + 0x0011F9: u'Nortel Networks', + 0x0011FA: u'Rane Corporation', + 0x0011FB: u'Heidelberg Engineering GmbH', + 0x0011FC: u'HARTING Electric Gmbh & Co.KG', + 0x0011FD: u'KORG INC.', + 0x0011FE: u'Keiyo System Research, Inc.', + 0x0011FF: u'Digitro Tecnologia Ltda', + 0x001200: u'Cisco', + 0x001201: u'Cisco', + 0x001202: u'Audio International Inc.', + 0x001203: u'Activ Networks', + 0x001204: u'u10 Networks, Inc.', + 0x001205: u'Terrasat Communications, Inc.', + 0x001206: u'iQuest (NZ) Ltd', + 0x001207: u'Head Strong International Limited', + 0x001208: u'Gantner Electronic GmbH', + 0x001209: u'Fastrax Ltd', + 0x00120A: u'Emerson Electric GmbH & Co. OHG', + 0x00120B: u'Chinasys Technologies Limited', + 0x00120C: u'CE-Infosys Pte Ltd', + 0x00120D: u'Advanced Telecommunication Technologies, Inc.', + 0x00120E: u'AboCom', + 0x00120F: u'IEEE 802.3', + 0x001210: u'WideRay Corp', + 0x001211: u'Protechna Herbst GmbH & Co. KG', + 0x001212: u'PLUS Vision Corporation', + 0x001213: u'Metrohm AG', + 0x001214: u'Koenig & Bauer AG', + 0x001215: u'iStor Networks, Inc.', + 0x001216: u'ICP Internet Communication Payment AG', + 0x001217: u'Cisco-Linksys, LLC', + 0x001218: u'ARUZE Corporation', + 0x001219: u'Ahead Communication Systems Inc', + 0x00121A: u'Techno Soft Systemnics Inc.', + 0x00121B: u'Sound Devices, LLC', + 0x00121C: u'PARROT S.A.', + 0x00121D: u'Netfabric Corporation', + 0x00121E: u'Juniper Networks, Inc.', + 0x00121F: u'Harding Intruments', + 0x001220: u'Cadco Systems', + 0x001221: u'B.Braun Melsungen AG', + 0x001222: u'Skardin (UK) Ltd', + 0x001223: u'Pixim', + 0x001224: u'NexQL Corporation', + 0x001225: u'Motorola BCS', + 0x001226: u'Japan Direx Corporation', + 0x001227: u'Franklin Electric Co., Inc.', + 0x001228: u'Data Ltd.', + 0x001229: u'BroadEasy Technologies Co.,Ltd', + 0x00122A: u'VTech Telecommunications Ltd.', + 0x00122B: u'Virbiage Pty Ltd', + 0x00122C: u'Soenen Controls N.V.', + 0x00122D: u'SiNett Corporation', + 0x00122E: u'Signal Technology - AISD', + 0x00122F: u'Sanei Electric Inc.', + 0x001230: u'Picaso Infocommunication CO., LTD.', + 0x001231: u'Motion Control Systems, Inc.', + 0x001232: u'LeWiz Communications Inc.', + 0x001233: u'JRC TOKKI Co.,Ltd.', + 0x001234: u'Camille Bauer', + 0x001235: u'Andrew Corporation', + 0x001236: u'ConSentry Networks', + 0x001237: u'Texas Instruments', + 0x001238: u'SetaBox Technology Co., Ltd.', + 0x001239: u'S Net Systems Inc.', + 0x00123A: u'Posystech Inc., Co.', + 0x00123B: u'KeRo Systems ApS', + 0x00123C: u'IP3 Networks, Inc.', + 0x00123D: u'GES', + 0x00123E: u'ERUNE technology Co., Ltd.', + 0x00123F: u'Dell Inc', + 0x001240: u'AMOI ELECTRONICS CO.,LTD', + 0x001241: u'a2i marketing center', + 0x001242: u'Millennial Net', + 0x001243: u'Cisco', + 0x001244: u'Cisco', + 0x001245: u'Zellweger Analytics, Inc.', + 0x001246: u'T.O.M TECHNOLOGY INC..', + 0x001247: u'Samsung Electronics Co., Ltd.', + 0x001248: u'Kashya Inc.', + 0x001249: u'Delta Elettronica S.p.A.', + 0x00124A: u'Dedicated Devices, Inc.', + 0x00124B: u'Chipcon AS', + 0x00124C: u'BBWM Corporation', + 0x00124D: u'Inducon BV', + 0x00124E: u'XAC AUTOMATION CORP.', + 0x00124F: u'Tyco Thermal Controls LLC.', + 0x001250: u'Tokyo Aircaft Instrument Co., Ltd.', + 0x001251: u'SILINK', + 0x001252: u'Citronix, LLC', + 0x001253: u'AudioDev AB', + 0x001254: u'Spectra Technologies Holdings Company Ltd', + 0x001255: u'NetEffect Incorporated', + 0x001256: u'LG INFORMATION & COMM.', + 0x001257: u'LeapComm Communication Technologies Inc.', + 0x001258: u'Activis Polska', + 0x001259: u'THERMO ELECTRON KARLSRUHE', + 0x00125A: u'Microsoft Corporation', + 0x00125B: u'KAIMEI ELECTRONI', + 0x00125C: u'Green Hills Software, Inc.', + 0x00125D: u'CyberNet Inc.', + 0x00125E: u'CAEN', + 0x00125F: u'AWIND Inc.', + 0x001260: u'Stanton Magnetics,inc.', + 0x001261: u'Adaptix, Inc', + 0x001262: u'Nokia Danmark A/S', + 0x001263: u'Data Voice Technologies GmbH', + 0x001264: u'daum electronic gmbh', + 0x001265: u'Enerdyne Technologies, Inc.', + 0x001266: u'PRIVATE', + 0x001267: u'Matsushita Electronic Components Co., Ltd.', + 0x001268: u'IPS d.o.o.', + 0x001269: u'Value Electronics', + 0x00126A: u'OPTOELECTRONICS Co., Ltd.', + 0x00126B: u'Ascalade Communications Limited', + 0x00126C: u'Visonic Ltd.', + 0x00126D: u'University of California, Berkeley', + 0x00126E: u'Seidel Elektronik GmbH Nfg.KG', + 0x00126F: u'Rayson Technology Co., Ltd.', + 0x001270: u'NGES Denro Systems', + 0x001271: u'Measurement Computing Corp', + 0x001272: u'Redux Communications Ltd.', + 0x001273: u'Stoke Inc', + 0x001274: u'NIT lab', + 0x001275: u'Moteiv Corporation', + 0x001276: u'Microsol Holdings Ltd.', + 0x001277: u'Korenix Technologies Co., Ltd.', + 0x001278: u'International Bar Code', + 0x001279: u'Hewlett Packard', + 0x00127A: u'Sanyu Industry Co.,Ltd.', + 0x00127B: u'VIA Networking Technologies, Inc.', + 0x00127C: u'SWEGON AB', + 0x00127D: u'MobileAria', + 0x00127E: u'Digital Lifestyles Group, Inc.', + 0x00127F: u'Cisco', + 0x001280: u'Cisco', + 0x001281: u'CIEFFE srl', + 0x001282: u'Qovia', + 0x001283: u'Nortel Networks', + 0x001284: u'Lab33 Srl', + 0x001285: u'Gizmondo Europe Ltd', + 0x001286: u'ENDEVCO CORP', + 0x001287: u'Digital Everywhere Unterhaltungselektronik GmbH', + 0x001288: u'2Wire, Inc', + 0x001289: u'Advance Sterilization Products', + 0x00128A: u'Motorola PCS', + 0x00128B: u'Sensory Networks Inc', + 0x00128C: u'Woodward Governor', + 0x00128D: u'STB Datenservice GmbH', + 0x00128E: u'Q-Free ASA', + 0x00128F: u'Montilio', + 0x001290: u'KYOWA Electric & Machinery Corp.', + 0x001291: u'KWS Computersysteme GmbH', + 0x001292: u'Griffin Technology', + 0x001293: u'GE Energy', + 0x001294: u'Eudyna Devices Inc.', + 0x001295: u'Aiware Inc.', + 0x001296: u'Addlogix', + 0x001297: u'O2Micro, Inc.', + 0x001298: u'MICO ELECTRIC(SHENZHEN) LIMITED', + 0x001299: u'Ktech Telecommunications Inc', + 0x00129A: u'IRT Electronics Pty Ltd', + 0x00129B: u'E2S Electronic Engineering Solutions, S.L.', + 0x00129C: u'Yulinet', + 0x00129D: u'FIRST INTERNATIONAL COMPUTER DO BRASIL LTDA', + 0x00129E: u'Surf Communications Inc.', + 0x00129F: u'RAE Systems, Inc.', + 0x0012A0: u'NeoMeridian Sdn Bhd', + 0x0012A1: u'BluePacket Communications Co., Ltd.', + 0x0012A2: u'VITA', + 0x0012A3: u'Trust International B.V.', + 0x0012A4: u'ThingMagic, LLC', + 0x0012A5: u'Stargen, Inc.', + 0x0012A6: u'Lake Technology Ltd', + 0x0012A7: u'ISR TECHNOLOGIES Inc', + 0x0012A8: u'intec GmbH', + 0x0012A9: u'3COM EUROPE LTD', + 0x0012AA: u'IEE, Inc.', + 0x0012AB: u'WiLife, Inc.', + 0x0012AC: u'ONTIMETEK INC.', + 0x0012AD: u'IDS GmbH', + 0x0012AE: u'HLS HARD-LINE Solutions Inc.', + 0x0012AF: u'ELPRO Technologies', + 0x0012B0: u'Efore Oyj (Plc)', + 0x0012B1: u'Dai Nippon Printing Co., Ltd', + 0x0012B2: u'AVOLITES LTD.', + 0x0012B3: u'Advance Wireless Technology Corp.', + 0x0012B4: u'Work GmbH', + 0x0012B5: u'Vialta, Inc.', + 0x0012B6: u'Santa Barbara Infrared, Inc.', + 0x0012B7: u'PTW Freiburg', + 0x0012B8: u'G2 Microsystems', + 0x0012B9: u'Fusion Digital Technology', + 0x0012BA: u'FSI Systems, Inc.', + 0x0012BB: u'Telecommunications Industry Association TR-41 Committee', + 0x0012BC: u'Echolab LLC', + 0x0012BD: u'Avantec Manufacturing Limited', + 0x0012BE: u'Astek Corporation', + 0x0012BF: u'Arcadyan Technology Corporation', + 0x0012C0: u'HotLava Systems, Inc.', + 0x0012C1: u'Check Point Software Technologies', + 0x0012C2: u'Apex Electronics Factory', + 0x0012C3: u'WIT S.A.', + 0x0012C4: u'Viseon, Inc.', + 0x0012C5: u'V-Show Technology Co.Ltd', + 0x0012C6: u'TGC America, Inc', + 0x0012C7: u'SECURAY Technologies Ltd.Co.', + 0x0012C8: u'Perfect tech', + 0x0012C9: u'Motorola BCS', + 0x0012CA: u'Hansen Telecom', + 0x0012CB: u'CSS Inc.', + 0x0012CC: u'Bitatek CO., LTD', + 0x0012CD: u'ASEM SpA', + 0x0012CE: u'Advanced Cybernetics Group', + 0x0012CF: u'Accton Technology Corporation', + 0x0012D0: u'Gossen-Metrawatt-GmbH', + 0x0012D1: u'Texas Instruments Inc', + 0x0012D2: u'Texas Instruments', + 0x0012D3: u'Zetta Systems, Inc.', + 0x0012D4: u'Princeton Technology, Ltd', + 0x0012D5: u'Motion Reality Inc.', + 0x0012D6: u'Jiangsu Yitong High-Tech Co.,Ltd', + 0x0012D7: u'Invento Networks, Inc.', + 0x0012D8: u'International Games System Co., Ltd.', + 0x0012D9: u'Cisco Systems', + 0x0012DA: u'Cisco Systems', + 0x0012DB: u'ZIEHL industrie-elektronik GmbH + Co KG', + 0x0012DC: u'SunCorp Industrial Limited', + 0x0012DD: u'Shengqu Information Technology (Shanghai) Co., Ltd.', + 0x0012DE: u'Radio Components Sweden AB', + 0x0012DF: u'Novomatic AG', + 0x0012E0: u'Codan Limited', + 0x0012E1: u'Alliant Networks, Inc', + 0x0012E2: u'ALAXALA Networks Corporation', + 0x0012E3: u'Agat-RT, Ltd.', + 0x0012E4: u'ZIEHL industrie-electronik GmbH + Co KG', + 0x0012E5: u'Time America, Inc.', + 0x0012E6: u'SPECTEC COMPUTER CO., LTD.', + 0x0012E7: u'Projectek Networking Electronics Corp.', + 0x0012E8: u'Fraunhofer IMS', + 0x0012E9: u'Abbey Systems Ltd', + 0x0012EA: u'Trane', + 0x0012EB: u'R2DI, LLC', + 0x0012EC: u'Movacolor b.v.', + 0x0012ED: u'AVG Advanced Technologies', + 0x0012EE: u'Sony Ericsson Mobile Communications AB', + 0x0012EF: u'OneAccess SA', + 0x0012F0: u'Intel Corporate', + 0x0012F1: u'IFOTEC', + 0x0012F2: u'Foundry Networks', + 0x0012F3: u'connectBlue AB', + 0x0012F4: u'Belco International Co.,Ltd.', + 0x0012F5: u'Prolificx Ltd', + 0x0012F6: u'MDK CO.,LTD.', + 0x0012F7: u'Xiamen Xinglian Electronics Co., Ltd.', + 0x0012F8: u'WNI Resources, LLC', + 0x0012F9: u'URYU SEISAKU, LTD.', + 0x0012FA: u'THX LTD', + 0x0012FB: u'Samsung Electronics', + 0x0012FC: u'PLANET System Co.,LTD', + 0x0012FD: u'OPTIMUS IC S.A.', + 0x0012FE: u'Lenovo Mobile Communication Technology Ltd.', + 0x0012FF: u'Lely Industries N.V.', + 0x001300: u'IT-FACTORY, INC.', + 0x001301: u'IronGate S.L.', + 0x001302: u'Intel Corporate', + 0x001303: u'GateConnect Technologies GmbH', + 0x001304: u'Flaircomm Technologies Co. LTD', + 0x001305: u'Epicom, Inc.', + 0x001306: u'Always On Wireless', + 0x001307: u'Paravirtual Corporation', + 0x001308: u'Nuvera Fuel Cells', + 0x001309: u'Ocean Broadband Networks', + 0x00130A: u'Nortel', + 0x00130B: u'Mextal B.V.', + 0x00130C: u'HF System Corporation', + 0x00130D: u'GALILEO AVIONICA', + 0x00130E: u'Focusrite Audio Engineering Limited', + 0x00130F: u'EGEMEN Bilgisayar Muh San ve Tic LTD STI', + 0x001310: u'Cisco-Linksys, LLC', + 0x001311: u'ARRIS International', + 0x001312: u'Amedia Networks Inc.', + 0x001313: u'GuangZhou Post & Telecom Equipment ltd', + 0x001314: u'Asiamajor Inc.', + 0x001315: u'SONY Computer Entertainment inc,', + 0x001316: u'L-S-B GmbH', + 0x001317: u'GN Netcom as', + 0x001318: u'DGSTATION Co., Ltd.', + 0x001319: u'Cisco Systems', + 0x00131A: u'Cisco Systems', + 0x00131B: u'BeCell Innovations Corp.', + 0x00131C: u'LiteTouch, Inc.', + 0x00131D: u'Scanvaegt International A/S', + 0x00131E: u'Peiker acustic GmbH & Co. KG', + 0x00131F: u'NxtPhase T&D, Corp.', + 0x001320: u'Intel Corporate', + 0x001321: u'Hewlett Packard', + 0x001322: u'DAQ Electronics, Inc.', + 0x001323: u'Cap Co., Ltd.', + 0x001324: u'Schneider Electric Ultra Terminal', + 0x001325: u'ImmenStar Inc.', + 0x001326: u'ECM Systems Ltd', + 0x001327: u'Data Acquisitions limited', + 0x001328: u'Westech Korea Inc.,', + 0x001329: u'VSST Co., LTD', + 0x00132A: u'STROM telecom, s. r. o.', + 0x00132B: u'Phoenix Digital', + 0x00132C: u'MAZ Brandenburg GmbH', + 0x00132D: u'iWise Communications Pty Ltd', + 0x00132E: u'ITian Coporation', + 0x00132F: u'Interactek', + 0x001330: u'EURO PROTECTION SURVEILLANCE', + 0x001331: u'CellPoint Connect', + 0x001332: u'Beijing Topsec Network Security Technology Co., Ltd.', + 0x001333: u'Baud Technology Inc.', + 0x001334: u'Arkados, Inc.', + 0x001335: u'VS Industry Berhad', + 0x001336: u'Tianjin 712 Communication Broadcasting co., ltd.', + 0x001337: u'Orient Power Home Network Ltd.', + 0x001338: u'FRESENIUS-VIAL', + 0x001339: u'EL-ME AG', + 0x00133A: u'VadaTech Inc.', + 0x00133B: u'Speed Dragon Multimedia Limited', + 0x00133C: u'QUINTRON SYSTEMS INC.', + 0x00133D: u'Micro Memory LLC', + 0x00133E: u'MetaSwitch', + 0x00133F: u'Eppendorf Instrumente GmbH', + 0x001340: u'AD.EL s.r.l.', + 0x001341: u'Shandong New Beiyang Information Technology Co.,Ltd', + 0x001342: u'Vision Research, Inc.', + 0x001343: u'Matsushita Electronic Components (Europe) GmbH', + 0x001344: u'Fargo Electronics Inc.', + 0x001345: u'Eaton Corporation', + 0x001346: u'D-Link Corporation', + 0x001347: u'BlueTree Wireless Data Inc.', + 0x001348: u'Artila Electronics Co., Ltd.', + 0x001349: u'ZyXEL Communications Corporation', + 0x00134A: u'Engim, Inc.', + 0x00134B: u'ToGoldenNet Technology Inc.', + 0x00134C: u'YDT Technology International', + 0x00134D: u'IPC systems', + 0x00134E: u'Valox Systems, Inc.', + 0x00134F: u'Tranzeo Wireless Technologies Inc.', + 0x001350: u'Silver Spring Networks, Inc', + 0x001351: u'Niles Audio Corporation', + 0x001352: u'Naztec, Inc.', + 0x001353: u'HYDAC Filtertechnik GMBH', + 0x001354: u'Zcomax Technologies, Inc.', + 0x001355: u'TOMEN Cyber-business Solutions, Inc.', + 0x001356: u'target systemelectronic gmbh', + 0x001357: u'Soyal Technology Co., Ltd.', + 0x001358: u'Realm Systems, Inc.', + 0x001359: u'ProTelevision Technologies A/S', + 0x00135A: u'Project T&E Limited', + 0x00135B: u'PanelLink Cinema, LLC', + 0x00135C: u'OnSite Systems, Inc.', + 0x00135D: u'NTTPC Communications, Inc.', + 0x00135E: u'EAB/RWI/K', + 0x00135F: u'Cisco Systems', + 0x001360: u'Cisco Systems', + 0x001361: u'Biospace Co., Ltd.', + 0x001362: u'ShinHeung Precision Co., Ltd.', + 0x001363: u'Verascape, Inc.', + 0x001364: u'Paradigm Technology Inc..', + 0x001365: u'Nortel', + 0x001366: u'Neturity Technologies Inc.', + 0x001367: u'Narayon. Co., Ltd.', + 0x001368: u'Maersk Data Defence', + 0x001369: u'Honda Electron Co., LED.', + 0x00136A: u'Hach Ultra Analytics', + 0x00136B: u'E-TEC', + 0x00136C: u'PRIVATE', + 0x00136D: u'Tentaculus AB', + 0x00136E: u'Techmetro Corp.', + 0x00136F: u'PacketMotion, Inc.', + 0x001370: u'Nokia Danmark A/S', + 0x001371: u'Motorola CHS', + 0x001372: u'Dell Inc.', + 0x001373: u'BLwave Electronics Co., Ltd', + 0x001374: u'Attansic Technology Corp.', + 0x001375: u'American Security Products Co.', + 0x001376: u'Tabor Electronics Ltd.', + 0x001377: u'Samsung Electronics CO., LTD', + 0x001378: u'QSAN Technology, Inc.', + 0x001379: u'PONDER INFORMATION INDUSTRIES LTD.', + 0x00137A: u'Netvox Technology Co., Ltd.', + 0x00137B: u'Movon Corporation', + 0x00137C: u'Kaicom co., Ltd.', + 0x00137D: u'Dynalab, Inc.', + 0x00137E: u'CorEdge Networks, Inc.', + 0x00137F: u'Cisco Systems', + 0x001380: u'Cisco Systems', + 0x001381: u'CHIPS & Systems, Inc.', + 0x001382: u'Cetacea Networks Corporation', + 0x001383: u'Application Technologies and Engineering Research Laboratory', + 0x001384: u'Advanced Motion Controls', + 0x001385: u'Add-On Technology Co., LTD.', + 0x001386: u'ABB Inc./Totalflow', + 0x001387: u'27M Technologies AB', + 0x001388: u'WiMedia Alliance', + 0x001389: u'Redes de Telefonía Móvil S.A.', + 0x00138A: u'QINGDAO GOERTEK ELECTRONICS CO.,LTD.', + 0x00138B: u'Phantom Technologies LLC', + 0x00138C: u'Kumyoung.Co.Ltd', + 0x00138D: u'Kinghold', + 0x00138E: u'FOAB Elektronik AB', + 0x00138F: u'Asiarock Incorporation', + 0x001390: u'Termtek Computer Co., Ltd', + 0x001391: u'OUEN CO.,LTD.', + 0x001392: u'Ruckus Wireless', + 0x001393: u'Panta Systems, Inc.', + 0x001394: u'Infohand Co.,Ltd', + 0x001395: u'congatec AG', + 0x001396: u'Acbel Polytech Inc.', + 0x001397: u'Xsigo Systems, Inc.', + 0x001398: u'TrafficSim Co.,Ltd', + 0x001399: u'STAC Corporation.', + 0x00139A: u'K-ubique ID Corp.', + 0x00139B: u'ioIMAGE Ltd.', + 0x00139C: u'Exavera Technologies, Inc.', + 0x00139D: u'Design of Systems on Silicon S.A.', + 0x00139E: u'Ciara Technologies Inc.', + 0x00139F: u'Electronics Design Services, Co., Ltd.', + 0x0013A0: u'ALGOSYSTEM Co., Ltd.', + 0x0013A1: u'Crow Electronic Engeneering', + 0x0013A2: u'MaxStream, Inc', + 0x0013A3: u'Siemens Com CPE Devices', + 0x0013A4: u'KeyEye Communications', + 0x0013A5: u'General Solutions, LTD.', + 0x0013A6: u'Extricom Ltd', + 0x0013A7: u'BATTELLE MEMORIAL INSTITUTE', + 0x0013A8: u'Tanisys Technology', + 0x0013A9: u'Sony Corporation', + 0x0013AA: u'ALS & TEC Ltd.', + 0x0013AB: u'Telemotive AG', + 0x0013AC: u'Sunmyung Electronics Co., LTD', + 0x0013AD: u'Sendo Ltd', + 0x0013AE: u'Radiance Technologies', + 0x0013AF: u'NUMA Technology,Inc.', + 0x0013B0: u'Jablotron', + 0x0013B1: u'Intelligent Control Systems (Asia) Pte Ltd', + 0x0013B2: u'Carallon Limited', + 0x0013B3: u'Beijing Ecom Communications Technology Co., Ltd.', + 0x0013B4: u'Appear TV', + 0x0013B5: u'Wavesat', + 0x0013B6: u'Sling Media, Inc.', + 0x0013B7: u'Scantech ID', + 0x0013B8: u'RyCo Electronic Systems Limited', + 0x0013B9: u'BM SPA', + 0x0013BA: u'ReadyLinks Inc', + 0x0013BB: u'PRIVATE', + 0x0013BC: u'Artimi Ltd', + 0x0013BD: u'HYMATOM SA', + 0x0013BE: u'Virtual Conexions', + 0x0013BF: u'Media System Planning Corp.', + 0x0013C0: u'Trix Tecnologia Ltda.', + 0x0013C1: u'Asoka USA Corporation', + 0x0013C2: u'WACOM Co.,Ltd', + 0x0013C3: u'Cisco Systems', + 0x0013C4: u'Cisco Systems', + 0x0013C5: u'LIGHTRON FIBER-OPTIC DEVICES INC.', + 0x0013C6: u'OpenGear, Inc', + 0x0013C7: u'IONOS Co.,Ltd.', + 0x0013C8: u'PIRELLI BROADBAND SOLUTIONS S.P.A.', + 0x0013C9: u'Beyond Achieve Enterprises Ltd.', + 0x0013CA: u'X-Digital Systems, Inc.', + 0x0013CB: u'Zenitel Norway AS', + 0x0013CC: u'Tall Maple Systems', + 0x0013CD: u'MTI co. LTD', + 0x0013CE: u'Intel Corporate', + 0x0013CF: u'4Access Communications', + 0x0013D0: u'e-San Limited', + 0x0013D1: u'KIRK telecom A/S', + 0x0013D2: u'PAGE IBERICA, S.A.', + 0x0013D3: u'MICRO-STAR INTERNATIONAL CO., LTD.', + 0x0013D4: u'ASUSTek COMPUTER INC.', + 0x0013D5: u'WiNetworks LTD', + 0x0013D6: u'TII NETWORK TECHNOLOGIES, INC.', + 0x0013D7: u'SPIDCOM Technologies SA', + 0x0013D8: u'Princeton Instruments', + 0x0013D9: u'Matrix Product Development, Inc.', + 0x0013DA: u'Diskware Co., Ltd', + 0x0013DB: u'SHOEI Electric Co.,Ltd', + 0x0013DC: u'IBTEK INC.', + 0x0013DD: u'Abbott Diagnostics', + 0x0013DE: u'Adapt4', + 0x0013DF: u'Ryvor Corp.', + 0x0013E0: u'Murata Manufacturing Co., Ltd.', + 0x0013E1: u'Iprobe', + 0x0013E2: u'GeoVision Inc.', + 0x0013E3: u'CoVi Technologies, Inc.', + 0x0013E4: u'YANGJAE SYSTEMS CORP.', + 0x0013E5: u'TENOSYS, INC.', + 0x0013E6: u'Technolution', + 0x0013E7: u'Minelab Electronics Pty Limited', + 0x0013E8: u'Intel Corporate', + 0x0013E9: u'VeriWave, Inc.', + 0x0013EA: u'Kamstrup A/S', + 0x0013EB: u'Sysmaster Corporation', + 0x0013EC: u'Sunbay Software AG', + 0x0013ED: u'PSIA', + 0x0013EE: u'JBX Designs Inc.', + 0x0013EF: u'Kingjon Digital Technology Co.,Ltd', + 0x0013F0: u'Wavefront Semiconductor', + 0x0013F1: u'AMOD Technology Co., Ltd.', + 0x0013F2: u'Klas Ltd', + 0x0013F3: u'Giga-byte Communications Inc.', + 0x0013F4: u'Psitek (Pty) Ltd', + 0x0013F5: u'Akimbi Systems', + 0x0013F6: u'Cintech', + 0x0013F7: u'SMC Networks, Inc.', + 0x0013F8: u'Dex Security Solutions', + 0x0013F9: u'Cavera Systems', + 0x0013FA: u'LifeSize Communications, Inc', + 0x0013FB: u'RKC INSTRUMENT INC.', + 0x0013FC: u'SiCortex, Inc', + 0x0013FD: u'Nokia Danmark A/S', + 0x0013FE: u'GRANDTEC ELECTRONIC CORP.', + 0x0013FF: u'Dage-MTI of MC, Inc.', + 0x001400: u'MINERVA KOREA CO., LTD', + 0x001401: u'Rivertree Networks Corp.', + 0x001402: u'kk-electronic a/s', + 0x001403: u'Renasis, LLC', + 0x001404: u'Motorola CHS', + 0x001405: u'OpenIB, Inc.', + 0x001406: u'Go Networks', + 0x001407: u'Biosystems', + 0x001408: u'Eka Systems Inc.', + 0x001409: u'MAGNETI MARELLI S.E. S.p.A.', + 0x00140A: u'WEPIO Co., Ltd.', + 0x00140B: u'FIRST INTERNATIONAL COMPUTER, INC.', + 0x00140C: u'GKB CCTV CO., LTD.', + 0x00140D: u'Nortel', + 0x00140E: u'Nortel', + 0x00140F: u'Federal State Unitary Enterprise Leningrad R&D Institute of', + 0x001410: u'Suzhou Keda Technology CO.,Ltd', + 0x001411: u'Deutschmann Automation GmbH & Co. KG', + 0x001412: u'S-TEC electronics AG', + 0x001413: u'Trebing & Himstedt Prozessautomation GmbH & Co. KG', + 0x001414: u'Jumpnode Systems LLC.', + 0x001415: u'Intec Automation Inc.', + 0x001416: u'Scosche Industries, Inc.', + 0x001417: u'RSE Informations Technologie GmbH', + 0x001418: u'C4Line', + 0x001419: u'SIDSA', + 0x00141A: u'DEICY CORPORATION', + 0x00141B: u'Cisco Systems', + 0x00141C: u'Cisco Systems', + 0x00141D: u'Lust Antriebstechnik GmbH', + 0x00141E: u'P.A. Semi, Inc.', + 0x00141F: u'SunKwang Electronics Co., Ltd', + 0x001420: u'G-Links networking company', + 0x001421: u'Total Wireless Technologies Pte. Ltd.', + 0x001422: u'Dell Inc.', + 0x001423: u'J-S Co. NEUROCOM', + 0x001424: u'Merry Electrics CO., LTD.', + 0x001425: u'Galactic Computing Corp.', + 0x001426: u'NL Technology', + 0x001427: u'JazzMutant', + 0x001428: u'Vocollect, Inc', + 0x001429: u'V Center Technologies Co., Ltd.', + 0x00142A: u'Elitegroup Computer System Co., Ltd', + 0x00142B: u'Edata Technologies Inc.', + 0x00142C: u'Koncept International, Inc.', + 0x00142D: u'Toradex AG', + 0x00142E: u'77 Elektronika Kft.', + 0x00142F: u'WildPackets', + 0x001430: u'ViPowER, Inc', + 0x001431: u'PDL Electronics Ltd', + 0x001432: u'Tarallax Wireless, Inc.', + 0x001433: u'Empower Technologies(Canada) Inc.', + 0x001434: u'Keri Systems, Inc', + 0x001435: u'CityCom Corp.', + 0x001436: u'Qwerty Elektronik AB', + 0x001437: u'GSTeletech Co.,Ltd.', + 0x001438: u'Hewlett Packard', + 0x001439: u'Blonder Tongue Laboratories, Inc.', + 0x00143A: u'RAYTALK INTERNATIONAL SRL', + 0x00143B: u'Sensovation AG', + 0x00143C: u'Oerlikon Contraves Inc.', + 0x00143D: u'Aevoe Inc.', + 0x00143E: u'AirLink Communications, Inc.', + 0x00143F: u'Hotway Technology Corporation', + 0x001440: u'ATOMIC Corporation', + 0x001441: u'Innovation Sound Technology Co., LTD.', + 0x001442: u'ATTO CORPORATION', + 0x001443: u'Consultronics Europe Ltd', + 0x001444: u'Grundfos Electronics', + 0x001445: u'Telefon-Gradnja d.o.o.', + 0x001446: u'KidMapper, Inc.', + 0x001447: u'BOAZ Inc.', + 0x001448: u'Inventec Multimedia & Telecom Corporation', + 0x001449: u'Sichuan Changhong Electric Ltd.', + 0x00144A: u'Taiwan Thick-Film Ind. Corp.', + 0x00144B: u'Hifn, Inc.', + 0x00144C: u'General Meters Corp.', + 0x00144D: u'Intelligent Systems', + 0x00144E: u'SRISA', + 0x00144F: u'Sun Microsystems, Inc.', + 0x001450: u'Heim Systems GmbH', + 0x001451: u'Apple Computer Inc.', + 0x001452: u'CALCULEX,INC.', + 0x001453: u'ADVANTECH TECHNOLOGIES CO.,LTD', + 0x001454: u'Symwave', + 0x001455: u'Coder Electronics Corporation', + 0x001456: u'Edge Products', + 0x001457: u'T-VIPS AS', + 0x001458: u'HS Automatic ApS', + 0x001459: u'Moram Co., Ltd.', + 0x00145A: u'Elektrobit AG', + 0x00145B: u'SeekerNet Inc.', + 0x00145C: u'Intronics B.V.', + 0x00145D: u'WJ Communications, Inc.', + 0x00145E: u'IBM', + 0x00145F: u'ADITEC CO. LTD', + 0x001460: u'Kyocera Wireless Corp.', + 0x001461: u'CORONA CORPORATION', + 0x001462: u'Digiwell Technology, inc', + 0x001463: u'IDCS N.V.', + 0x001464: u'Cryptosoft', + 0x001465: u'Novo Nordisk A/S', + 0x001466: u'Kleinhenz Elektronik GmbH', + 0x001467: u'ArrowSpan Inc.', + 0x001468: u'CelPlan International, Inc.', + 0x001469: u'Cisco Systems', + 0x00146A: u'Cisco Systems', + 0x00146B: u'Anagran, Inc.', + 0x00146C: u'Netgear Inc.', + 0x00146D: u'RF Technologies', + 0x00146E: u'H. Stoll GmbH & Co. KG', + 0x00146F: u'Kohler Co', + 0x001470: u'Prokom Software SA', + 0x001471: u'Eastern Asia Technology Limited', + 0x001472: u'China Broadband Wireless IP Standard Group', + 0x001473: u'Bookham Inc', + 0x001474: u'K40 Electronics', + 0x001475: u'Wiline Networks, Inc.', + 0x001476: u'MultiCom Industries Limited', + 0x001477: u'Nertec Inc.', + 0x001478: u'ShenZhen TP-LINK Technologies Co., Ltd.', + 0x001479: u'NEC Magnus Communications,Ltd.', + 0x00147A: u'Eubus GmbH', + 0x00147B: u'Iteris, Inc.', + 0x00147C: u'3Com Europe Ltd', + 0x00147D: u'Aeon Digital International', + 0x00147E: u'PanGo Networks, Inc.', + 0x00147F: u'Thomson Telecom Belgium', + 0x001480: u'Hitachi-LG Data Storage Korea, Inc', + 0x001481: u'Multilink Inc', + 0x001482: u'GoBackTV, Inc', + 0x001483: u'eXS Inc.', + 0x001484: u'CERMATE TECHNOLOGIES INC', + 0x001485: u'Giga-Byte', + 0x001486: u'Echo Digital Audio Corporation', + 0x001487: u'American Technology Integrators', + 0x001488: u'Akorri Networks', + 0x001489: u'B15402100 - JANDEI, S.L.', + 0x00148A: u'Elin Ebg Traction Gmbh', + 0x00148B: u'Globo Electronic GmbH & Co. KG', + 0x00148C: u'Fortress Technologies', + 0x00148D: u'Cubic Defense Simulation Systems', + 0x00148E: u'Tele Power Inc.', + 0x00148F: u'Protronic (Far East) Ltd.', + 0x001490: u'ASP Corporation', + 0x001491: u'Daniels Electronics Ltd.', + 0x001492: u'Liteon, Mobile Media Solution SBU', + 0x001493: u'Systimax Solutions', + 0x001494: u'ESU AG', + 0x001495: u'2Wire, Inc.', + 0x001496: u'Phonic Corp.', + 0x001497: u'ZHIYUAN Eletronics co.,ltd.', + 0x001498: u'Viking Design Technology', + 0x001499: u'Helicomm Inc', + 0x00149A: u'Motorola Mobile Devices Business', + 0x00149B: u'Nokota Communications, LLC', + 0x00149C: u'HF Company', + 0x00149D: u'Sound ID Inc.', + 0x00149E: u'UbONE Co., Ltd', + 0x00149F: u'System and Chips, Inc.', + 0x0014A0: u'RFID Asset Track, Inc.', + 0x0014A1: u'Synchronous Communication Corp', + 0x0014A2: u'Core Micro Systems Inc.', + 0x0014A3: u'Vitelec BV', + 0x0014A4: u'Hon Hai Precision Ind. Co., Ltd.', + 0x0014A5: u'Gemtek Technology Co., Ltd.', + 0x0014A6: u'Teranetics, Inc.', + 0x0014A7: u'Nokia Danmark A/S', + 0x0014A8: u'Cisco Systems', + 0x0014A9: u'Cisco Systems', + 0x0014AA: u'Ashly Audio, Inc.', + 0x0014AB: u'Senhai Electronic Technology Co., Ltd.', + 0x0014AC: u'Bountiful WiFi', + 0x0014AD: u'Gassner Wiege- u. Meßtechnik GmbH', + 0x0014AE: u'Wizlogics Co., Ltd.', + 0x0014AF: u'Datasym Inc.', + 0x0014B0: u'Naeil Community', + 0x0014B1: u'Avitec AB', + 0x0014B2: u'mCubelogics Corporation', + 0x0014B3: u'CoreStar International Corp', + 0x0014B4: u'General Dynamics United Kingdom Ltd', + 0x0014B5: u'PRIVATE', + 0x0014B6: u'Enswer Technology Inc.', + 0x0014B7: u'AR Infotek Inc.', + 0x0014B8: u'Hill-Rom', + 0x0014B9: u'STEPMIND', + 0x0014BA: u'Carvers SA de CV', + 0x0014BB: u'Open Interface North America', + 0x0014BC: u'SYNECTIC TELECOM EXPORTS PVT. LTD.', + 0x0014BD: u'incNETWORKS, Inc', + 0x0014BE: u'Wink communication technology CO.LTD', + 0x0014BF: u'Cisco-Linksys LLC', + 0x0014C0: u'Symstream Technology Group Ltd', + 0x0014C1: u'U.S. Robotics Corporation', + 0x0014C2: u'Hewlett Packard', + 0x0014C3: u'Seagate Technology LLC', + 0x0014C4: u'Vitelcom Mobile Technology', + 0x0014C5: u'Alive Technologies Pty Ltd', + 0x0014C6: u'Quixant Ltd', + 0x0014C7: u'Nortel', + 0x0014C8: u'Contemporary Research Corp', + 0x0014C9: u'Silverback Systems, Inc.', + 0x0014CA: u'Key Radio Systems Limited', + 0x0014CB: u'LifeSync Corporation', + 0x0014CC: u'Zetec, Inc.', + 0x0014CD: u'DigitalZone Co., Ltd.', + 0x0014CE: u'NF CORPORATION', + 0x0014CF: u'Nextlink.to A/S', + 0x0014D0: u'BTI Photonics', + 0x0014D1: u'TRENDware International, Inc.', + 0x0014D2: u'KYUKI CORPORATION', + 0x0014D3: u'SEPSA', + 0x0014D4: u'K Technology Corporation', + 0x0014D5: u'Datang Telecom Technology CO. , LCD,Optical Communication Br', + 0x0014D6: u'Jeongmin Electronics Co.,Ltd.', + 0x0014D7: u'DataStor Technology Inc.', + 0x0014D8: u'bio-logic SA', + 0x0014D9: u'IP Fabrics, Inc.', + 0x0014DA: u'Huntleigh Healthcare', + 0x0014DB: u'Elma Trenew Electronic GmbH', + 0x0014DC: u'Communication System Design & Manufacturing (CSDM)', + 0x0014DD: u'Covergence Inc.', + 0x0014DE: u'Sage Instruments Inc.', + 0x0014DF: u'HI-P Tech Corporation', + 0x0014E0: u'LET\'S Corporation', + 0x0014E1: u'Data Display AG', + 0x0014E2: u'datacom systems inc.', + 0x0014E3: u'mm-lab GmbH', + 0x0014E4: u'Integral Technologies', + 0x0014E5: u'Alticast', + 0x0014E6: u'AIM Infrarotmodule GmbH', + 0x0014E7: u'Stolinx,. Inc', + 0x0014E8: u'Motorola CHS', + 0x0014E9: u'Nortech International', + 0x0014EA: u'S Digm Inc. (Safe Paradigm Inc.)', + 0x0014EB: u'AwarePoint Corporation', + 0x0014EC: u'Acro Telecom', + 0x0014ED: u'Airak, Inc.', + 0x0014EE: u'Western Digital Technologies, Inc.', + 0x0014EF: u'TZero Technologies, Inc.', + 0x0014F0: u'Business Security OL AB', + 0x0014F1: u'Cisco Systems', + 0x0014F2: u'Cisco Systems', + 0x0014F3: u'ViXS Systems Inc', + 0x0014F4: u'DekTec Digital Video B.V.', + 0x0014F5: u'OSI Security Devices', + 0x0014F6: u'Juniper Networks, Inc.', + 0x0014F7: u'Crevis', + 0x0014F8: u'Scientific Atlanta', + 0x0014F9: u'Vantage Controls', + 0x0014FA: u'AsGa S.A.', + 0x0014FB: u'Technical Solutions Inc.', + 0x0014FC: u'Extandon, Inc.', + 0x0014FD: u'Thecus Technology Corp.', + 0x0014FE: u'Artech Electronics', + 0x0014FF: u'Precise Automation, LLC', + 0x001500: u'Intel Corporate', + 0x001501: u'LexBox', + 0x001502: u'BETA tech', + 0x001503: u'PROFIcomms s.r.o.', + 0x001504: u'GAME PLUS CO., LTD.', + 0x001505: u'Actiontec Electronics, Inc', + 0x001506: u'BeamExpress, Inc', + 0x001507: u'Renaissance Learning Inc', + 0x001508: u'Global Target Enterprise Inc', + 0x001509: u'Plus Technology Co., Ltd', + 0x00150A: u'Sonoa Systems, Inc', + 0x00150B: u'SAGE INFOTECH LTD.', + 0x00150C: u'AVM GmbH', + 0x00150D: u'Hoana Medical, Inc.', + 0x00150E: u'OPENBRAIN TECHNOLOGIES CO., LTD.', + 0x00150F: u'mingjong', + 0x001510: u'Techsphere Co., Ltd', + 0x001511: u'Data Center Systems', + 0x001512: u'Zurich University of Applied Sciences', + 0x001513: u'EFS sas', + 0x001514: u'Hu Zhou NAVA Networks&Electronics Ltd.', + 0x001515: u'Leipold+Co.GmbH', + 0x001516: u'URIEL SYSTEMS INC.', + 0x001517: u'Intel Corporate', + 0x001518: u'Shenzhen 10MOONS Technology Development CO.,Ltd', + 0x001519: u'StoreAge Networking Technologies', + 0x00151A: u'Hunter Engineering Company', + 0x00151B: u'Isilon Systems Inc.', + 0x00151C: u'LENECO', + 0x00151D: u'M2I CORPORATION', + 0x00151E: u'Metaware Co., Ltd.', + 0x00151F: u'Multivision Intelligent Surveillance (Hong Kong) Ltd', + 0x001520: u'Radiocrafts AS', + 0x001521: u'Horoquartz', + 0x001522: u'Dea Security', + 0x001523: u'Meteor Communications Corporation', + 0x001524: u'Numatics, Inc.', + 0x001525: u'PTI Integrated Systems, Inc.', + 0x001526: u'Remote Technologies Inc', + 0x001527: u'Balboa Instruments', + 0x001528: u'Beacon Medical Products LLC d.b.a. BeaconMedaes', + 0x001529: u'N3 Corporation', + 0x00152A: u'Nokia GmbH', + 0x00152B: u'Cisco Systems', + 0x00152C: u'Cisco Systems', + 0x00152D: u'TenX Networks, LLC', + 0x00152E: u'PacketHop, Inc.', + 0x00152F: u'Motorola CHS', + 0x001530: u'Bus-Tech, Inc.', + 0x001531: u'KOCOM', + 0x001532: u'Consumer Technologies Group, LLC', + 0x001533: u'NADAM.CO.,LTD', + 0x001534: u'A BELTRÓNICA, Companhia de Comunicações, Lda', + 0x001535: u'OTE Spa', + 0x001536: u'Powertech co.,Ltd', + 0x001537: u'Ventus Networks', + 0x001538: u'RFID, Inc.', + 0x001539: u'Technodrive SRL', + 0x00153A: u'Shenzhen Syscan Technology Co.,Ltd.', + 0x00153B: u'EMH Elektrizitätszähler GmbH & CoKG', + 0x00153C: u'Kprotech Co., Ltd.', + 0x00153D: u'ELIM PRODUCT CO.', + 0x00153E: u'Q-Matic Sweden AB', + 0x00153F: u'Alcatel Alenia Space Italia', + 0x001540: u'Nortel', + 0x001541: u'StrataLight Communications, Inc.', + 0x001542: u'MICROHARD S.R.L.', + 0x001543: u'Aberdeen Test Center', + 0x001544: u'coM.s.a.t. AG', + 0x001545: u'SEECODE Co., Ltd.', + 0x001546: u'ITG Worldwide Sdn Bhd', + 0x001547: u'AiZen Solutions Inc.', + 0x001548: u'CUBE TECHNOLOGIES', + 0x001549: u'Dixtal Biomedica Ind. Com. Ltda', + 0x00154A: u'WANSHIH ELECTRONIC CO., LTD', + 0x00154B: u'Wonde Proud Technology Co., Ltd', + 0x00154C: u'Saunders Electronics', + 0x00154D: u'Netronome Systems, Inc.', + 0x00154E: u'Hirschmann Automation and Control GmbH', + 0x00154F: u'one RF Technology', + 0x001550: u'Nits Technology Inc', + 0x001551: u'RadioPulse Inc.', + 0x001552: u'Wi-Gear Inc.', + 0x001553: u'Cytyc Corporation', + 0x001554: u'Atalum Wireless S.A.', + 0x001555: u'DFM GmbH', + 0x001556: u'SAGEM SA', + 0x001557: u'Olivetti', + 0x001558: u'FOXCONN', + 0x001559: u'Securaplane Technologies, Inc.', + 0x00155A: u'DAINIPPON PHARMACEUTICAL CO., LTD.', + 0x00155B: u'Sampo Corporation', + 0x00155C: u'Dresser Wayne', + 0x00155D: u'Microsoft Corporation', + 0x00155E: u'Morgan Stanley', + 0x00155F: u'Ubiwave', + 0x001560: u'Hewlett Packard', + 0x001561: u'JJPlus Corporation', + 0x001562: u'Cisco Systems', + 0x001563: u'Cisco Systems', + 0x001564: u'BEHRINGER Spezielle Studiotechnik GmbH', + 0x001565: u'XIAMEN YEALINK NETWORK TECHNOLOGY CO.,LTD', + 0x001566: u'A-First Technology Co., Ltd.', + 0x001567: u'RADWIN Inc.', + 0x001568: u'Dilithium Networks', + 0x001569: u'PECO II, Inc.', + 0x00156A: u'DG2L Technologies Pvt. Ltd.', + 0x00156B: u'Perfisans Networks Corp.', + 0x00156C: u'SANE SYSTEM CO., LTD', + 0x00156D: u'Ubiquiti Networks', + 0x00156E: u'A. W. Communication Systems Ltd', + 0x00156F: u'Xiranet Communications GmbH', + 0x001570: u'Symbol Technologies', + 0x001571: u'Nolan Systems', + 0x001572: u'Red-Lemon', + 0x001573: u'NewSoft Technology Corporation', + 0x001574: u'Horizon Semiconductors Ltd.', + 0x001575: u'Nevis Networks Inc.', + 0x001576: u'scil animal care company GmbH', + 0x001577: u'Allied Telesyn, Inc.', + 0x001578: u'Audio / Video Innovations', + 0x001579: u'Lunatone Industrielle Elektronik GmbH', + 0x00157A: u'Telefin S.p.A.', + 0x00157B: u'Leuze electronic GmbH + Co. KG', + 0x00157C: u'Dave Networks, Inc.', + 0x00157D: u'POSDATA CO., LTD.', + 0x00157E: u'HEYFRA ELECTRONIC gmbH', + 0x00157F: u'ChuanG International Holding CO.,LTD.', + 0x001580: u'U-WAY CORPORATION', + 0x001581: u'MAKUS Inc.', + 0x001582: u'TVonics Ltd', + 0x001583: u'IVT corporation', + 0x001584: u'Schenck Process GmbH', + 0x001585: u'Aonvision Technolopy Corp.', + 0x001586: u'Xiamen Overseas Chinese Electronic Co., Ltd.', + 0x001587: u'Takenaka Seisakusho Co.,Ltd', + 0x001588: u'Balda-Thong Fook Solutions Sdn. Bhd.', + 0x001589: u'D-MAX Technology Co.,Ltd', + 0x00158A: u'SURECOM Technology Corp.', + 0x00158B: u'Park Air Systems Ltd', + 0x00158C: u'Liab ApS', + 0x00158D: u'Jennic Ltd', + 0x00158E: u'Plustek.INC', + 0x00158F: u'NTT Advanced Technology Corporation', + 0x001590: u'Hectronic GmbH', + 0x001591: u'RLW Inc.', + 0x001592: u'Facom UK Ltd (Melksham)', + 0x001593: u'U4EA Technologies Inc.', + 0x001594: u'BIXOLON CO.,LTD', + 0x001595: u'Quester Tangent Corporation', + 0x001596: u'ARRIS International', + 0x001597: u'AETA AUDIO SYSTEMS', + 0x001598: u'Kolektor group', + 0x001599: u'Samsung Electronics Co., LTD', + 0x00159A: u'Motorola CHS', + 0x00159B: u'Nortel', + 0x00159C: u'B-KYUNG SYSTEM Co.,Ltd.', + 0x00159D: u'Minicom Advanced Systems ltd', + 0x00159E: u'Saitek plc', + 0x00159F: u'Terascala, Inc.', + 0x0015A0: u'Nokia Danmark A/S', + 0x0015A1: u'SINTERS SAS', + 0x0015A2: u'ARRIS International', + 0x0015A3: u'ARRIS International', + 0x0015A4: u'ARRIS International', + 0x0015A5: u'DCI Co., Ltd.', + 0x0015A6: u'Digital Electronics Products Ltd.', + 0x0015A7: u'Robatech AG', + 0x0015A8: u'Motorola Mobile Devices', + 0x0015A9: u'KWANG WOO I&C CO.,LTD', + 0x0015AA: u'Rextechnik International Co.,', + 0x0015AB: u'PRO CO SOUND INC', + 0x0015AC: u'Capelon AB', + 0x0015AD: u'Accedian Networks', + 0x0015AE: u'kyung il', + 0x0015AF: u'AzureWave Technologies, Inc.', + 0x0015B0: u'AUTOTELENET CO.,LTD', + 0x0015B1: u'Ambient Corporation', + 0x0015B2: u'Advanced Industrial Computer, Inc.', + 0x0015B3: u'Caretech AB', + 0x0015B4: u'Polymap Wireless LLC', + 0x0015B5: u'CI Network Corp.', + 0x0015B6: u'ShinMaywa Industries, Ltd.', + 0x0015B7: u'Toshiba', + 0x0015B8: u'Tahoe', + 0x0015B9: u'Samsung Electronics Co., Ltd.', + 0x0015BA: u'iba AG', + 0x0015BB: u'SMA Technologie AG', + 0x0015BC: u'Develco', + 0x0015BD: u'Group 4 Technology Ltd', + 0x0015BE: u'Iqua Ltd.', + 0x0015BF: u'technicob', + 0x0015C0: u'DIGITAL TELEMEDIA CO.,LTD.', + 0x0015C1: u'SONY Computer Entertainment inc,', + 0x0015C2: u'3M Germany', + 0x0015C3: u'Ruf Telematik AG', + 0x0015C4: u'FLOVEL CO., LTD.', + 0x0015C5: u'Dell Inc', + 0x0015C6: u'Cisco Systems', + 0x0015C7: u'Cisco Systems', + 0x0015C8: u'FlexiPanel Ltd', + 0x0015C9: u'Gumstix, Inc', + 0x0015CA: u'TeraRecon, Inc.', + 0x0015CB: u'Surf Communication Solutions Ltd.', + 0x0015CC: u'TEPCO UQUEST, LTD.', + 0x0015CD: u'Exartech International Corp.', + 0x0015CE: u'ARRIS International', + 0x0015CF: u'ARRIS International', + 0x0015D0: u'ARRIS International', + 0x0015D1: u'ARRIS International', + 0x0015D2: u'Xantech Corporation', + 0x0015D3: u'Pantech&Curitel Communications, Inc.', + 0x0015D4: u'Emitor AB', + 0x0015D5: u'NICEVT', + 0x0015D6: u'OSLiNK Sp. z o.o.', + 0x0015D7: u'Reti Corporation', + 0x0015D8: u'Interlink Electronics', + 0x0015D9: u'PKC Electronics Oy', + 0x0015DA: u'IRITEL A.D.', + 0x0015DB: u'Canesta Inc.', + 0x0015DC: u'KT&C Co., Ltd.', + 0x0015DD: u'IP Control Systems Ltd.', + 0x0015DE: u'Nokia Danmark A/S', + 0x0015DF: u'Clivet S.p.A.', + 0x0015E0: u'Ericsson Mobile Platforms', + 0x0015E1: u'picoChip Designs Ltd', + 0x0015E2: u'Wissenschaftliche Geraetebau Dr. Ing. H. Knauer GmbH', + 0x0015E3: u'Dream Technologies Corporation', + 0x0015E4: u'Zimmer Elektromedizin', + 0x0015E5: u'Cheertek Inc.', + 0x0015E6: u'MOBILE TECHNIKA Inc.', + 0x0015E7: u'Quantec ProAudio', + 0x0015E8: u'Nortel', + 0x0015E9: u'D-Link Corporation', + 0x0015EA: u'Tellumat (Pty) Ltd', + 0x0015EB: u'ZTE CORPORATION', + 0x0015EC: u'Boca Devices LLC', + 0x0015ED: u'Fulcrum Microsystems, Inc.', + 0x0015EE: u'Omnex Control Systems', + 0x0015EF: u'NEC TOKIN Corporation', + 0x0015F0: u'EGO BV', + 0x0015F1: u'KYLINK Communications Corp.', + 0x0015F2: u'ASUSTek COMPUTER INC.', + 0x0015F3: u'PELTOR AB', + 0x0015F4: u'Eventide', + 0x0015F5: u'Sustainable Energy Systems', + 0x0015F6: u'SCIENCE AND ENGINEERING SERVICES, INC.', + 0x0015F7: u'Wintecronics Ltd.', + 0x0015F8: u'Kingtronics Industrial Co. Ltd.', + 0x0015F9: u'Cisco Systems', + 0x0015FA: u'Cisco Systems', + 0x0015FB: u'setex schermuly textile computer gmbh', + 0x0015FC: u'Startco Engineering Ltd.', + 0x0015FD: u'Complete Media Systems', + 0x0015FE: u'SCHILLING ROBOTICS LLC', + 0x0015FF: u'Novatel Wireless, Inc.', + 0x001600: u'CelleBrite Mobile Synchronization', + 0x001601: u'Buffalo Inc.', + 0x001602: u'CEYON TECHNOLOGY CO.,LTD.', + 0x001603: u'PRIVATE', + 0x001604: u'Sigpro', + 0x001605: u'YORKVILLE SOUND INC.', + 0x001606: u'Ideal Industries', + 0x001607: u'Curves International Inc.', + 0x001608: u'Sequans Communications', + 0x001609: u'Unitech electronics co., ltd.', + 0x00160A: u'SWEEX Europe BV', + 0x00160B: u'TVWorks LLC', + 0x00160C: u'LPL DEVELOPMENT S.A. DE C.V', + 0x00160D: u'Be Here Corporation', + 0x00160E: u'Optica Technologies Inc.', + 0x00160F: u'BADGER METER INC', + 0x001610: u'Carina Technology', + 0x001611: u'Altecon Srl', + 0x001612: u'Otsuka Electronics Co., Ltd.', + 0x001613: u'LibreStream Technologies Inc.', + 0x001614: u'Picosecond Pulse Labs', + 0x001615: u'Nittan Company, Limited', + 0x001616: u'BROWAN COMMUNICATION INC.', + 0x001617: u'MSI', + 0x001618: u'HIVION Co., Ltd.', + 0x001619: u'La Factoría de Comunicaciones Aplicadas,S.L.', + 0x00161A: u'Dametric AB', + 0x00161B: u'Micronet Corporation', + 0x00161C: u'e:cue', + 0x00161D: u'Innovative Wireless Technologies, Inc.', + 0x00161E: u'Woojinnet', + 0x00161F: u'SUNWAVETEC Co., Ltd.', + 0x001620: u'Sony Ericsson Mobile Communications AB', + 0x001621: u'Colorado Vnet', + 0x001622: u'BBH SYSTEMS GMBH', + 0x001623: u'Interval Media', + 0x001624: u'PRIVATE', + 0x001625: u'Impinj, Inc.', + 0x001626: u'Motorola CHS', + 0x001627: u'embedded-logic DESIGN AND MORE GmbH', + 0x001628: u'Ultra Electronics Manufacturing and Card Systems', + 0x001629: u'Nivus GmbH', + 0x00162A: u'Antik computers & communications s.r.o.', + 0x00162B: u'Togami Electric Mfg.co.,Ltd.', + 0x00162C: u'Xanboo', + 0x00162D: u'STNet Co., Ltd.', + 0x00162E: u'Space Shuttle Hi-Tech Co., Ltd.', + 0x00162F: u'Geutebrück GmbH', + 0x001630: u'Vativ Technologies', + 0x001631: u'Xteam', + 0x001632: u'SAMSUNG ELECTRONICS CO., LTD.', + 0x001633: u'Oxford Diagnostics Ltd.', + 0x001634: u'Mathtech, Inc.', + 0x001635: u'Hewlett Packard', + 0x001636: u'Quanta Computer Inc.', + 0x001637: u'Citel Srl', + 0x001638: u'TECOM Co., Ltd.', + 0x001639: u'UBIQUAM Co.,Ltd', + 0x00163A: u'YVES TECHNOLOGY CO., LTD.', + 0x00163B: u'VertexRSI/General Dynamics', + 0x00163C: u'Rebox B.V.', + 0x00163D: u'Tsinghua Tongfang Legend Silicon Tech. Co., Ltd.', + 0x00163E: u'Xensource, Inc.', + 0x00163F: u'CReTE SYSTEMS Inc.', + 0x001640: u'Asmobile Communication Inc.', + 0x001641: u'USI', + 0x001642: u'Pangolin', + 0x001643: u'Sunhillo Corproation', + 0x001644: u'LITE-ON Technology Corp.', + 0x001645: u'Power Distribution, Inc.', + 0x001646: u'Cisco Systems', + 0x001647: u'Cisco Systems', + 0x001648: u'SSD Company Limited', + 0x001649: u'SetOne GmbH', + 0x00164A: u'Vibration Technology Limited', + 0x00164B: u'Quorion Data Systems GmbH', + 0x00164C: u'PLANET INT Co., Ltd', + 0x00164D: u'Alcatel North America IP Division', + 0x00164E: u'Nokia Danmark A/S', + 0x00164F: u'World Ethnic Broadcastin Inc.', + 0x001650: u'EYAL MICROWAVE', + 0x001651: u'PRIVATE', + 0x001652: u'Hoatech Technologies, Inc.', + 0x001653: u'LEGO System A/S IE Electronics Division', + 0x001654: u'Flex-P Industries Sdn. Bhd.', + 0x001655: u'FUHO TECHNOLOGY Co., LTD', + 0x001656: u'Nintendo Co., Ltd.', + 0x001657: u'Aegate Ltd', + 0x001658: u'Fusiontech Technologies Inc.', + 0x001659: u'Z.M.P. RADWAG', + 0x00165A: u'Harman Specialty Group', + 0x00165B: u'Grip Audio', + 0x00165C: u'Trackflow Ltd', + 0x00165D: u'AirDefense, Inc.', + 0x00165E: u'Precision I/O', + 0x00165F: u'Fairmount Automation', + 0x001660: u'Nortel', + 0x001661: u'Novatium Solutions (P) Ltd', + 0x001662: u'Liyuh Technology Ltd.', + 0x001663: u'KBT Mobile', + 0x001664: u'Prod-El SpA', + 0x001665: u'Cellon France', + 0x001666: u'Quantier Communication Inc.', + 0x001667: u'A-TEC Subsystem INC.', + 0x001668: u'Eishin Electronics', + 0x001669: u'MRV Communication (Networks) LTD', + 0x00166A: u'TPS', + 0x00166B: u'Samsung Electronics', + 0x00166C: u'Samsung Electonics Digital Video System Division', + 0x00166D: u'Yulong Computer Telecommunication Scientific(shenzhen)Co.,Lt', + 0x00166E: u'Arbitron Inc.', + 0x00166F: u'Intel Corporation', + 0x001670: u'SKNET Corporation', + 0x001671: u'Symphox Information Co.', + 0x001672: u'Zenway enterprise ltd', + 0x001673: u'PRIVATE', + 0x001674: u'EuroCB (Phils.), Inc.', + 0x001675: u'Motorola MDb', + 0x001676: u'Intel Corporation', + 0x001677: u'Bihl+Wiedemann GmbH', + 0x001678: u'SHENZHEN BAOAN GAOKE ELECTRONICS CO., LTD', + 0x001679: u'eOn Communications', + 0x00167A: u'Skyworth Overseas Dvelopment Ltd.', + 0x00167B: u'Haver&Boecker', + 0x00167C: u'iRex Technologies BV', + 0x00167D: u'Sky-Line', + 0x00167E: u'DIBOSS.CO.,LTD', + 0x00167F: u'Bluebird Soft Inc.', + 0x001680: u'Bally Gaming + Systems', + 0x001681: u'Vector Informatik GmbH', + 0x001682: u'Pro Dex, Inc', + 0x001683: u'WEBIO International Co.,.Ltd.', + 0x001684: u'Donjin Co.,Ltd.', + 0x001685: u'FRWD Technologies Ltd.', + 0x001686: u'Karl Storz Imaging', + 0x001687: u'Chubb CSC-Vendor AP', + 0x001688: u'ServerEngines LLC', + 0x001689: u'Pilkor Electronics Co., Ltd', + 0x00168A: u'id-Confirm Inc', + 0x00168B: u'Paralan Corporation', + 0x00168C: u'DSL Partner AS', + 0x00168D: u'KORWIN CO., Ltd.', + 0x00168E: u'Vimicro corporation', + 0x00168F: u'GN Netcom as', + 0x001690: u'J-TEK INCORPORATION', + 0x001691: u'Moser-Baer AG', + 0x001692: u'Scientific-Atlanta, Inc.', + 0x001693: u'PowerLink Technology Inc.', + 0x001694: u'Sennheiser Communications A/S', + 0x001695: u'AVC Technology Limited', + 0x001696: u'QDI Technology (H.K.) Limited', + 0x001697: u'NEC Corporation', + 0x001698: u'T&A Mobile Phones SAS', + 0x001699: u'PRIVATE', + 0x00169A: u'Quadrics Ltd', + 0x00169B: u'Alstom Transport', + 0x00169C: u'Cisco Systems', + 0x00169D: u'Cisco Systems', + 0x00169E: u'TV One Ltd', + 0x00169F: u'Vimtron Electronics Co., Ltd.', + 0x0016A0: u'Auto-Maskin', + 0x0016A1: u'3Leaf Networks', + 0x0016A2: u'CentraLite Systems, Inc.', + 0x0016A3: u'TEAM ARTECHE, S.A.', + 0x0016A4: u'Ezurio Ltd', + 0x0016A5: u'Tandberg Storage ASA', + 0x0016A6: u'Dovado FZ-LLC', + 0x0016A7: u'AWETA G&P', + 0x0016A8: u'CWT CO., LTD.', + 0x0016A9: u'2EI', + 0x0016AA: u'Kei Communication Technology Inc.', + 0x0016AB: u'PBI-Dansensor A/S', + 0x0016AC: u'Toho Technology Corp.', + 0x0016AD: u'BT-Links Company Limited', + 0x0016AE: u'INVENTEL', + 0x0016AF: u'Shenzhen Union Networks Equipment Co.,Ltd.', + 0x0016B0: u'VK Corporation', + 0x0016B1: u'KBS', + 0x0016B2: u'DriveCam Inc', + 0x0016B3: u'Photonicbridges (China) Co., Ltd.', + 0x0016B4: u'PRIVATE', + 0x0016B5: u'Motorola CHS', + 0x0016B6: u'Cisco-Linksys', + 0x0016B7: u'Seoul Commtech', + 0x0016B8: u'Sony Ericsson Mobile Communications', + 0x0016B9: u'ProCurve Networking', + 0x0016BA: u'WEATHERNEWS INC.', + 0x0016BB: u'Law-Chain Computer Technology Co Ltd', + 0x0016BC: u'Nokia Danmark A/S', + 0x0016BD: u'ATI Industrial Automation', + 0x0016BE: u'INFRANET, Inc.', + 0x0016BF: u'PaloDEx Group Oy', + 0x0016C0: u'Semtech Corporation', + 0x0016C1: u'Eleksen Ltd', + 0x0016C2: u'Avtec Systems Inc', + 0x0016C3: u'BA Systems Inc', + 0x0016C4: u'SiRF Technology, Inc.', + 0x0016C5: u'Shenzhen Xing Feng Industry Co.,Ltd', + 0x0016C6: u'North Atlantic Industries', + 0x0016C7: u'Cisco Systems', + 0x0016C8: u'Cisco Systems', + 0x0016C9: u'NAT Seattle, Inc.', + 0x0016CA: u'Nortel', + 0x0016CB: u'Apple Computer', + 0x0016CC: u'Xcute Mobile Corp.', + 0x0016CD: u'HIJI HIGH-TECH CO., LTD.', + 0x0016CE: u'Hon Hai Precision Ind. Co., Ltd.', + 0x0016CF: u'Hon Hai Precision Ind. Co., Ltd.', + 0x0016D0: u'ATech elektronika d.o.o.', + 0x0016D1: u'ZAT a.s.', + 0x0016D2: u'Caspian', + 0x0016D3: u'Wistron Corporation', + 0x0016D4: u'Compal Communications, Inc.', + 0x0016D5: u'Synccom Co., Ltd', + 0x0016D6: u'TDA Tech Pty Ltd', + 0x0016D7: u'Sunways AG', + 0x0016D8: u'Senea AB', + 0x0016D9: u'NINGBO BIRD CO.,LTD.', + 0x0016DA: u'Futronic Technology Co. Ltd.', + 0x0016DB: u'Samsung Electronics Co., Ltd.', + 0x0016DC: u'ARCHOS', + 0x0016DD: u'Gigabeam Corporation', + 0x0016DE: u'FAST Inc', + 0x0016DF: u'Lundinova AB', + 0x0016E0: u'3Com Europe Ltd', + 0x0016E1: u'SiliconStor, Inc.', + 0x0016E2: u'American Fibertek, Inc.', + 0x0016E3: u'ASKEY COMPUTER CORP.', + 0x0016E4: u'VANGUARD SECURITY ENGINEERING CORP.', + 0x0016E5: u'FORDLEY DEVELOPMENT LIMITED', + 0x0016E6: u'GIGA-BYTE TECHNOLOGY CO.,LTD.', + 0x0016E7: u'Dynamix Promotions Limited', + 0x0016E8: u'Sigma Designs, Inc.', + 0x0016E9: u'Tiba Medical Inc', + 0x0016EA: u'Intel Corporation', + 0x0016EB: u'Intel Corporation', + 0x0016EC: u'Elitegroup Computer Systems Co., Ltd.', + 0x0016ED: u'Integrian, Inc.', + 0x0016EE: u'RoyalDigital Inc.', + 0x0016EF: u'Koko Fitness, Inc.', + 0x0016F0: u'Zermatt Systems, Inc', + 0x0016F1: u'OmniSense, LLC', + 0x0016F2: u'Dmobile System Co., Ltd.', + 0x0016F3: u'CAST Information Co., Ltd', + 0x0016F4: u'Eidicom Co., Ltd.', + 0x0016F5: u'Dalian Golden Hualu Digital Technology Co.,Ltd', + 0x0016F6: u'Video Products Group', + 0x0016F7: u'L-3 Communications, Electrodynamics, Inc.', + 0x0016F8: u'AVIQTECH TECHNOLOGY CO., LTD.', + 0x0016F9: u'CETRTA POT, d.o.o., Kranj', + 0x0016FA: u'ECI Telecom Ltd.', + 0x0016FB: u'SHENZHEN MTC CO.,LTD.', + 0x0016FC: u'TOHKEN CO.,LTD.', + 0x0016FD: u'Jaty Electronics', + 0x0016FE: u'Alps Electric Co., Ltd', + 0x0016FF: u'Wamin Optocomm Mfg Corp', + 0x001700: u'Motorola MDb', + 0x001701: u'KDE, Inc.', + 0x001702: u'Osung Midicom Co., Ltd', + 0x001703: u'MOSDAN Internation Co.,Ltd', + 0x001704: u'Shinco Electronics Group Co.,Ltd', + 0x001705: u'Methode Electronics', + 0x001706: u'Techfaith Wireless Communication Technology Limited.', + 0x001707: u'InGrid, Inc', + 0x001708: u'Hewlett Packard', + 0x001709: u'Exalt Communications', + 0x00170A: u'INEW DIGITAL COMPANY', + 0x00170B: u'Contela, Inc.', + 0x00170C: u'Benefon Oyj', + 0x00170D: u'Dust Networks Inc.', + 0x00170E: u'Cisco Systems', + 0x00170F: u'Cisco Systems', + 0x001710: u'Casa Systems Inc.', + 0x001711: u'GE Healthcare Bio-Sciences AB', + 0x001712: u'ISCO International', + 0x001713: u'Tiger NetCom', + 0x001714: u'BR Controls Nederland bv', + 0x001715: u'Qstik', + 0x001716: u'Qno Technology Inc.', + 0x001717: u'Leica Geosystems AG', + 0x001718: u'Vansco Electronics Oy', + 0x001719: u'AudioCodes USA, Inc', + 0x00171A: u'Winegard Company', + 0x00171B: u'Innovation Lab Corp.', + 0x00171C: u'NT MicroSystems, Inc.', + 0x00171D: u'DIGIT', + 0x00171E: u'Theo Benning GmbH & Co. KG', + 0x00171F: u'IMV Corporation', + 0x001720: u'Image Sensing Systems, Inc.', + 0x001721: u'FITRE S.p.A.', + 0x001722: u'Hanazeder Electronic GmbH', + 0x001723: u'Summit Data Communications', + 0x001724: u'Studer Professional Audio GmbH', + 0x001725: u'Liquid Computing', + 0x001726: u'm2c Electronic Technology Ltd.', + 0x001727: u'Thermo Ramsey Italia s.r.l.', + 0x001728: u'Selex Communications', + 0x001729: u'Ubicod Co.LTD', + 0x00172A: u'Proware Technology Corp.', + 0x00172B: u'Global Technologies Inc.', + 0x00172C: u'TAEJIN INFOTECH', + 0x00172D: u'Axcen Photonics Corporation', + 0x00172E: u'FXC Inc.', + 0x00172F: u'NeuLion Incorporated', + 0x001730: u'Automation Electronics', + 0x001731: u'ASUSTek COMPUTER INC.', + 0x001732: u'Science-Technical Center "RISSA"', + 0x001733: u'neuf cegetel', + 0x001734: u'LGC Wireless Inc.', + 0x001735: u'PRIVATE', + 0x001736: u'iiTron Inc.', + 0x001737: u'Industrie Dial Face S.p.A.', + 0x001738: u'XIV', + 0x001739: u'Bright Headphone Electronics Company', + 0x00173A: u'Edge Integration Systems Inc.', + 0x00173B: u'Arched Rock Corporation', + 0x00173C: u'Extreme Engineering Solutions', + 0x00173D: u'Neology', + 0x00173E: u'LeucotronEquipamentos Ltda.', + 0x00173F: u'Belkin Corporation', + 0x001740: u'Technologies Labtronix', + 0x001741: u'DEFIDEV', + 0x001742: u'FUJITSU LIMITED', + 0x001743: u'Deck Srl', + 0x001744: u'Araneo Ltd.', + 0x001745: u'INNOTZ CO., Ltd', + 0x001746: u'Freedom9 Inc.', + 0x001747: u'Trimble', + 0x001748: u'Neokoros Brasil Ltda', + 0x001749: u'HYUNDAE YONG-O-SA CO.,LTD', + 0x00174A: u'SOCOMEC', + 0x00174B: u'Nokia Danmark A/S', + 0x00174C: u'Millipore', + 0x00174D: u'DYNAMIC NETWORK FACTORY, INC.', + 0x00174E: u'Parama-tech Co.,Ltd.', + 0x00174F: u'iCatch Inc.', + 0x001750: u'GSI Group, MicroE Systems', + 0x001751: u'Online Corporation', + 0x001752: u'DAGS, Inc', + 0x001753: u'nFore Technology Inc.', + 0x001754: u'Arkino Corporation., Ltd', + 0x001755: u'GE Security', + 0x001756: u'Vinci Labs Oy', + 0x001757: u'RIX TECHNOLOGY LIMITED', + 0x001758: u'ThruVision Ltd', + 0x001759: u'Cisco Systems', + 0x00175A: u'Cisco Systems', + 0x00175B: u'ACS Solutions Switzerland Ltd.', + 0x00175C: u'SHARP CORPORATION', + 0x00175D: u'Dongseo system.', + 0x00175E: u'Anta Systems, Inc.', + 0x00175F: u'XENOLINK Communications Co., Ltd.', + 0x001760: u'Naito Densei Machida MFG.CO.,LTD', + 0x001761: u'ZKSoftware Inc.', + 0x001762: u'Solar Technology, Inc.', + 0x001763: u'Essentia S.p.A.', + 0x001764: u'ATMedia GmbH', + 0x001765: u'Nortel', + 0x001766: u'Accense Technology, Inc.', + 0x001767: u'Earforce AS', + 0x001768: u'Zinwave Ltd', + 0x001769: u'Cymphonix Corp', + 0x00176A: u'Avago Technologies', + 0x00176B: u'Kiyon, Inc.', + 0x00176C: u'Pivot3, Inc.', + 0x00176D: u'CORE CORPORATION', + 0x00176E: u'DUCATI SISTEMI', + 0x00176F: u'PAX Computer Technology(Shenzhen) Ltd.', + 0x001770: u'Arti Industrial Electronics Ltd.', + 0x001771: u'APD Communications Ltd', + 0x001772: u'ASTRO Strobel Kommunikationssysteme GmbH', + 0x001773: u'Laketune Technologies Co. Ltd', + 0x001774: u'Elesta GmbH', + 0x001775: u'TTE Germany GmbH', + 0x001776: u'Meso Scale Diagnostics, LLC', + 0x001777: u'Obsidian Research Corporation', + 0x001778: u'Central Music Co.', + 0x001779: u'QuickTel', + 0x00177A: u'ASSA ABLOY AB', + 0x00177B: u'Azalea Networks inc', + 0x00177C: u'D-Link India Ltd', + 0x00177D: u'IDT International Limited', + 0x00177E: u'Meshcom Technologies Inc.', + 0x00177F: u'Worldsmart Retech', + 0x001780: u'Applera Holding B.V. Singapore Operations', + 0x001781: u'Greystone Data System, Inc.', + 0x001782: u'LoBenn Inc.', + 0x001783: u'Texas Instruments', + 0x001784: u'Motorola Mobile Devices', + 0x001785: u'Sparr Electronics Ltd', + 0x001786: u'wisembed', + 0x001787: u'Brother, Brother & Sons ApS', + 0x001788: u'Philips Lighting BV', + 0x001789: u'Zenitron Corporation', + 0x00178A: u'DARTS TECHNOLOGIES CORP.', + 0x00178B: u'Teledyne Technologies Incorporated', + 0x00178C: u'Independent Witness, Inc', + 0x00178D: u'Checkpoint Systems, Inc.', + 0x00178E: u'Gunnebo Cash Automation AB', + 0x00178F: u'NINGBO YIDONG ELECTRONIC CO.,LTD.', + 0x001790: u'HYUNDAI DIGITECH Co, Ltd.', + 0x001791: u'LinTech GmbH', + 0x001792: u'Falcom Wireless Comunications Gmbh', + 0x001793: u'Tigi Corporation', + 0x001794: u'Cisco Systems', + 0x001795: u'Cisco Systems', + 0x001796: u'Rittmeyer AG', + 0x001797: u'Telsy Elettronica S.p.A.', + 0x001798: u'Azonic Technology Co., LTD', + 0x001799: u'SmarTire Systems Inc.', + 0x00179A: u'D-Link Corporation', + 0x00179B: u'Chant Sincere CO., LTD.', + 0x00179C: u'DEPRAG SCHULZ GMBH u. CO.', + 0x00179D: u'Kelman Limited', + 0x00179E: u'Sirit Inc', + 0x00179F: u'Apricorn', + 0x0017A0: u'RoboTech srl', + 0x0017A1: u'3soft inc.', + 0x0017A2: u'Camrivox Ltd.', + 0x0017A3: u'MIX s.r.l.', + 0x0017A4: u'Global Data Services', + 0x0017A5: u'TrendChip Technologies Corp.', + 0x0017A6: u'YOSIN ELECTRONICS CO., LTD.', + 0x0017A7: u'Mobile Computing Promotion Consortium', + 0x0017A8: u'EDM Corporation', + 0x0017A9: u'Sentivision', + 0x0017AA: u'elab-experience inc.', + 0x0017AB: u'Nintendo Co., Ltd.', + 0x0017AC: u'O\'Neil Product Development Inc.', + 0x0017AD: u'AceNet Corporation', + 0x0017AE: u'GAI-Tronics', + 0x0017AF: u'Enermet', + 0x0017B0: u'Nokia Danmark A/S', + 0x0017B1: u'ACIST Medical Systems, Inc.', + 0x0017B2: u'SK Telesys', + 0x0017B3: u'Aftek Infosys Limited', + 0x0017B4: u'Remote Security Systems, LLC', + 0x0017B5: u'Peerless Systems Corporation', + 0x0017B6: u'Aquantia', + 0x0017B7: u'Tonze Technology Co.', + 0x0017B8: u'NOVATRON CO., LTD.', + 0x0017B9: u'Gambro Lundia AB', + 0x0017BA: u'SEDO CO., LTD.', + 0x0017BB: u'Syrinx Industrial Electronics', + 0x0017BC: u'Touchtunes Music Corporation', + 0x0017BD: u'Tibetsystem', + 0x0017BE: u'Tratec Telecom B.V.', + 0x0017BF: u'Coherent Research Limited', + 0x0017C0: u'PureTech Systems, Inc.', + 0x0017C1: u'CM Precision Technology LTD.', + 0x0017C2: u'Pirelli Broadband Solutions', + 0x0017C3: u'KTF Technologies Inc.', + 0x0017C4: u'Quanta Microsystems, INC.', + 0x0017C5: u'SonicWALL', + 0x0017C6: u'Labcal Technologies', + 0x0017C7: u'MARA Systems Consulting AB', + 0x0017C8: u'Kyocera Mita Corporation', + 0x0017C9: u'Samsung Electronics Co., Ltd.', + 0x0017CA: u'BenQ Corporation', + 0x0017CB: u'Juniper Networks', + 0x0017CC: u'Alcatel USA Sourcing LP', + 0x0017CD: u'CEC Wireless R&D Ltd.', + 0x0017CE: u'MB International Telecom Labs srl', + 0x0017CF: u'iMCA-GmbH', + 0x0017D0: u'Opticom Communications, LLC', + 0x0017D1: u'Nortel', + 0x0017D2: u'THINLINX PTY LTD', + 0x0017D3: u'Etymotic Research, Inc.', + 0x0017D4: u'Monsoon Multimedia, Inc', + 0x0017D5: u'Samsung Electronics Co., Ltd.', + 0x0017D6: u'Bluechips Microhouse Co.,Ltd.', + 0x0017D7: u'Input/Output Inc.', + 0x0017D8: u'Magnum Semiconductor, Inc.', + 0x0017D9: u'AAI Corporation', + 0x0017DA: u'Spans Logic', + 0x0017DB: u'PRIVATE', + 0x0017DC: u'DAEMYUNG ZERO1', + 0x0017DD: u'Clipsal Australia', + 0x0017DE: u'Advantage Six Ltd', + 0x0017DF: u'Cisco Systems', + 0x0017E0: u'Cisco Systems', + 0x0017E1: u'DACOS Technologies Co., Ltd.', + 0x0017E2: u'Motorola Mobile Devices', + 0x0017E3: u'Texas Instruments', + 0x0017E4: u'Texas Instruments', + 0x0017E5: u'Texas Instruments', + 0x0017E6: u'Texas Instruments', + 0x0017E7: u'Texas Instruments', + 0x0017E8: u'Texas Instruments', + 0x0017E9: u'Texas Instruments', + 0x0017EA: u'Texas Instruments', + 0x0017EB: u'Texas Instruments', + 0x0017EC: u'Texas Instruments', + 0x0017ED: u'WooJooIT Ltd.', + 0x0017EE: u'Motorola CHS', + 0x0017EF: u'Blade Network Technologies, Inc.', + 0x0017F0: u'SZCOM Broadband Network Technology Co.,Ltd', + 0x0017F1: u'Renu Electronics Pvt Ltd', + 0x0017F2: u'Apple Computer', + 0x0017F3: u'M/A-COM Wireless Systems', + 0x0017F4: u'ZERON ALLIANCE', + 0x0017F5: u'NEOPTEK', + 0x0017F6: u'Pyramid Meriden Inc.', + 0x0017F7: u'CEM Solutions Pvt Ltd', + 0x0017F8: u'Motech Industries Inc.', + 0x0017F9: u'Forcom Sp. z o.o.', + 0x0017FA: u'Microsoft Corporation', + 0x0017FB: u'FA', + 0x0017FC: u'Suprema Inc.', + 0x0017FD: u'Amulet Hotkey', + 0x0017FE: u'TALOS SYSTEM INC.', + 0x0017FF: u'PLAYLINE Co.,Ltd.', + 0x001800: u'UNIGRAND LTD', + 0x001801: u'Actiontec Electronics, Inc', + 0x001802: u'Alpha Networks Inc.', + 0x001803: u'ArcSoft Shanghai Co. LTD', + 0x001804: u'E-TEK DIGITAL TECHNOLOGY LIMITED', + 0x001805: u'Beijing InHand Networking', + 0x001806: u'Hokkei Industries Co., Ltd.', + 0x001807: u'Fanstel Corp.', + 0x001808: u'SightLogix, Inc.', + 0x001809: u'CRESYN', + 0x00180A: u'Meraki Networks, Inc.', + 0x00180B: u'Brilliant Telecommunications', + 0x00180C: u'Optelian Access Networks Corporation', + 0x00180D: u'Terabytes Server Storage Tech Corp', + 0x00180E: u'Avega Systems', + 0x00180F: u'Nokia Danmark A/S', + 0x001810: u'IPTrade S.A.', + 0x001811: u'Neuros Technology International, LLC.', + 0x001812: u'Beijing Xinwei Telecom Technology Co., Ltd.', + 0x001813: u'Sony Ericsson Mobile Communications', + 0x001814: u'Mitutoyo Corporation', + 0x001815: u'GZ Technologies, Inc.', + 0x001816: u'Ubixon Co., Ltd.', + 0x001817: u'D. E. Shaw Research, LLC', + 0x001818: u'Cisco Systems', + 0x001819: u'Cisco Systems', + 0x00181A: u'AVerMedia Technologies Inc.', + 0x00181B: u'TaiJin Metal Co., Ltd.', + 0x00181C: u'Exterity Limited', + 0x00181D: u'ASIA ELECTRONICS CO.,LTD', + 0x00181E: u'GDX Technologies Ltd.', + 0x00181F: u'Palmmicro Communications', + 0x001820: u'w5networks', + 0x001821: u'SINDORICOH', + 0x001822: u'CEC TELECOM CO.,LTD.', + 0x001823: u'Delta Electronics, Inc.', + 0x001824: u'Kimaldi Electronics, S.L.', + 0x001825: u'Wavion LTD', + 0x001826: u'Cale Access AB', + 0x001827: u'NEC PHILIPS UNIFIED SOLUTIONS NEDERLAND BV', + 0x001828: u'e2v technologies (UK) ltd.', + 0x001829: u'Gatsometer', + 0x00182A: u'Taiwan Video & Monitor', + 0x00182B: u'Softier', + 0x00182C: u'Ascend Networks, Inc.', + 0x00182D: u'Artec Group OÜ', + 0x00182E: u'Wireless Ventures USA', + 0x00182F: u'Texas Instruments', + 0x001830: u'Texas Instruments', + 0x001831: u'Texas Instruments', + 0x001832: u'Texas Instruments', + 0x001833: u'Texas Instruments', + 0x001834: u'Texas Instruments', + 0x001835: u'ITC', + 0x001836: u'Reliance Electric Limited', + 0x001837: u'Universal ABIT Co., Ltd.', + 0x001838: u'PanAccess Communications,Inc.', + 0x001839: u'Cisco-Linksys LLC', + 0x00183A: u'Westell Technologies', + 0x00183B: u'CENITS Co., Ltd.', + 0x00183C: u'Encore Software Limited', + 0x00183D: u'Vertex Link Corporation', + 0x00183E: u'Digilent, Inc', + 0x00183F: u'2Wire, Inc', + 0x001840: u'3 Phoenix, Inc.', + 0x001841: u'High Tech Computer Corp', + 0x001842: u'Nokia Danmark A/S', + 0x001843: u'Dawevision Ltd', + 0x001844: u'Heads Up Technologies, Inc.', + 0x001845: u'NPL Pulsar Ltd.', + 0x001846: u'Crypto S.A.', + 0x001847: u'AceNet Technology Inc.', + 0x001848: u'Vecima Networks Inc.', + 0x001849: u'Pigeon Point Systems', + 0x00184A: u'Catcher, Inc.', + 0x00184B: u'Las Vegas Gaming, Inc.', + 0x00184C: u'Bogen Communications', + 0x00184D: u'Netgear Inc.', + 0x00184E: u'Lianhe Technologies, Inc.', + 0x00184F: u'8 Ways Technology Corp.', + 0x001850: u'Secfone Kft', + 0x001851: u'SWsoft', + 0x001852: u'StorLink Semiconductors, Inc.', + 0x001853: u'Atera Networks LTD.', + 0x001854: u'Argard Co., Ltd', + 0x001855: u'Aeromaritime Systembau GmbH', + 0x001856: u'EyeFi, Inc', + 0x001857: u'Unilever R&D', + 0x001858: u'TagMaster AB', + 0x001859: u'Strawberry Linux Co.,Ltd.', + 0x00185A: u'uControl, Inc.', + 0x00185B: u'Network Chemistry, Inc', + 0x00185C: u'EDS Lab Pte Ltd', + 0x00185D: u'TAIGUEN TECHNOLOGY (SHEN-ZHEN) CO., LTD.', + 0x00185E: u'Nexterm Inc.', + 0x00185F: u'TAC Inc.', + 0x001860: u'SIM Technology Group Shanghai Simcom Ltd.,', + 0x001861: u'Ooma, Inc.', + 0x001862: u'Seagate Technology', + 0x001863: u'Veritech Electronics Limited', + 0x001864: u'Cybectec Inc.', + 0x001865: u'Bayer Diagnostics Sudbury Ltd', + 0x001866: u'Leutron Vision', + 0x001867: u'Evolution Robotics Retail', + 0x001868: u'Scientific Atlanta, A Cisco Company', + 0x001869: u'KINGJIM', + 0x00186A: u'Global Link Digital Technology Co,.LTD', + 0x00186B: u'Sambu Communics CO., LTD.', + 0x00186C: u'Neonode AB', + 0x00186D: u'Zhenjiang Sapphire Electronic Industry CO.', + 0x00186E: u'3COM Europe Ltd', + 0x00186F: u'Setha Industria Eletronica LTDA', + 0x001870: u'E28 Shanghai Limited', + 0x001871: u'Global Data Services', + 0x001872: u'Expertise Engineering', + 0x001873: u'Cisco Systems', + 0x001874: u'Cisco Systems', + 0x001875: u'AnaCise Testnology Pte Ltd', + 0x001876: u'WowWee Ltd.', + 0x001877: u'Amplex A/S', + 0x001878: u'Mackware GmbH', + 0x001879: u'dSys', + 0x00187A: u'Wiremold', + 0x00187B: u'4NSYS Co. Ltd.', + 0x00187C: u'INTERCROSS, LLC', + 0x00187D: u'Armorlink shanghai Co. Ltd', + 0x00187E: u'RGB Spectrum', + 0x00187F: u'ZODIANET', + 0x001880: u'Mobilygen', + 0x001881: u'Buyang Electronics Industrial Co., Ltd', + 0x001882: u'Huawei Technologies Co., Ltd.', + 0x001883: u'FORMOSA21 INC.', + 0x001884: u'FON', + 0x001885: u'Avigilon Corporation', + 0x001886: u'EL-TECH, INC.', + 0x001887: u'Metasystem SpA', + 0x001888: u'GOTIVE a.s.', + 0x001889: u'WinNet Solutions Limited', + 0x00188A: u'Infinova LLC', + 0x00188B: u'Dell', + 0x00188C: u'Mobile Action Technology Inc.', + 0x00188D: u'Nokia Danmark A/S', + 0x00188E: u'Ekahau, Inc.', + 0x00188F: u'Montgomery Technology, Inc.', + 0x001890: u'RadioCOM, s.r.o.', + 0x001891: u'Zhongshan General K-mate Electronics Co., Ltd', + 0x001892: u'ads-tec GmbH', + 0x001893: u'SHENZHEN PHOTON BROADBAND TECHNOLOGY CO.,LTD', + 0x001894: u'zimocom', + 0x001895: u'Hansun Technologies Inc.', + 0x001896: u'Great Well Electronic LTD', + 0x001897: u'JESS-LINK PRODUCTS Co., LTD', + 0x001898: u'KINGSTATE ELECTRONICS CORPORATION', + 0x001899: u'ShenZhen jieshun Science&Technology Industry CO,LTD.', + 0x00189A: u'HANA Micron Inc.', + 0x00189B: u'Thomson Inc.', + 0x00189C: u'Weldex Corporation', + 0x00189D: u'Navcast Inc.', + 0x00189E: u'OMNIKEY GmbH.', + 0x00189F: u'Lenntek Corporation', + 0x0018A0: u'Cierma Ascenseurs', + 0x0018A1: u'Tiqit Computers, Inc.', + 0x0018A2: u'XIP Technology AB', + 0x0018A3: u'ZIPPY TECHNOLOGY CORP.', + 0x0018A4: u'Motorola Mobile Devices', + 0x0018A5: u'ADigit Technologies Corp.', + 0x0018A6: u'Persistent Systems, LLC', + 0x0018A7: u'Yoggie Security Systems LTD.', + 0x0018A8: u'AnNeal Technology Inc.', + 0x0018A9: u'Ethernet Direct Corporation', + 0x0018AA: u'PRIVATE', + 0x0018AB: u'BEIJING LHWT MICROELECTRONICS INC.', + 0x0018AC: u'Shanghai Jiao Da HISYS Technology Co. Ltd.', + 0x0018AD: u'NIDEC SANKYO CORPORATION', + 0x0018AE: u'Tongwei Video Technology CO.,LTD', + 0x0018AF: u'Samsung Electronics Co., Ltd.', + 0x0018B0: u'Nortel', + 0x0018B1: u'Blade Network Technologies', + 0x0018B2: u'ADEUNIS RF', + 0x0018B3: u'TEC WizHome Co., Ltd.', + 0x0018B4: u'Dawon Media Inc.', + 0x0018B5: u'Magna Carta', + 0x0018B6: u'S3C, Inc.', + 0x0018B7: u'D3 LED, LLC', + 0x0018B8: u'New Voice International AG', + 0x0018B9: u'Cisco Systems', + 0x0018BA: u'Cisco Systems', + 0x0018BB: u'Eliwell Controls srl', + 0x0018BC: u'ZAO NVP Bolid', + 0x0018BD: u'SHENZHEN DVBWORLD TECHNOLOGY CO., LTD.', + 0x0018BE: u'ANSA Corporation', + 0x0018BF: u'Essence Technology Solution, Inc.', + 0x0018C0: u'Motorola CHS', + 0x0018C1: u'Almitec Informática e Comércio Ltda.', + 0x0018C2: u'Firetide, Inc', + 0x0018C3: u'C&S Microwave', + 0x0018C4: u'Raba Technologies LLC', + 0x0018C5: u'Nokia Danmark A/S', + 0x0018C6: u'OPW Fuel Management Systems', + 0x0018C7: u'Real Time Automation', + 0x0018C8: u'ISONAS Inc.', + 0x0018C9: u'EOps Technology Limited', + 0x0018CA: u'Viprinet GmbH', + 0x0018CB: u'Tecobest Technology Limited', + 0x0018CC: u'AXIOHM SAS', + 0x0018CD: u'Erae Electronics Industry Co., Ltd', + 0x0018CE: u'Dreamtech Co., Ltd', + 0x0018CF: u'Baldor Electric Company', + 0x0018D0: u'@ROAD Inc', + 0x0018D1: u'Siemens Home & Office Comm. Devices', + 0x0018D2: u'High-Gain Antennas LLC', + 0x0018D3: u'TEAMCAST', + 0x0018D4: u'Unified Display Interface SIG', + 0x0018D5: u'REIGNCOM', + 0x0018D6: u'Swirlnet A/S', + 0x0018D7: u'Javad Navigation Systems Inc.', + 0x0018D8: u'ARCH METER Corporation', + 0x0018D9: u'Santosha Internatonal, Inc', + 0x0018DA: u'AMBER wireless GmbH', + 0x0018DB: u'EPL Technology Ltd', + 0x0018DC: u'Prostar Co., Ltd.', + 0x0018DD: u'Silicondust Engineering Ltd', + 0x0018DE: u'Intel Corporation', + 0x0018DF: u'The Morey Corporation', + 0x0018E0: u'ANAVEO', + 0x0018E1: u'Verkerk Service Systemen', + 0x0018E2: u'Topdata Sistemas de Automacao Ltda', + 0x0018E3: u'Visualgate Systems, Inc.', + 0x0018E4: u'YIGUANG', + 0x0018E5: u'Adhoco AG', + 0x0018E6: u'Computer Hardware Design SIA', + 0x0018E7: u'Cameo Communications, INC.', + 0x0018E8: u'Hacetron Corporation', + 0x0018E9: u'Numata Corporation', + 0x0018EA: u'Alltec GmbH', + 0x0018EB: u'BroVis Wireless Networks', + 0x0018EC: u'Welding Technology Corporation', + 0x0018ED: u'ACCUTECH INTERNATIONAL CO., LTD.', + 0x0018EE: u'Videology Imaging Solutions, Inc.', + 0x0018EF: u'Escape Communications, Inc.', + 0x0018F0: u'JOYTOTO Co., Ltd.', + 0x0018F1: u'Chunichi Denshi Co.,LTD.', + 0x0018F2: u'Beijing Tianyu Communication Equipment Co., Ltd', + 0x0018F3: u'ASUSTek COMPUTER INC.', + 0x0018F4: u'EO TECHNICS Co., Ltd.', + 0x0018F5: u'Shenzhen Streaming Video Technology Company Limited', + 0x0018F6: u'Thomson Telecom Belgium', + 0x0018F7: u'Kameleon Technologies', + 0x0018F8: u'Cisco-Linksys LLC', + 0x0018F9: u'VVOND, Inc.', + 0x0018FA: u'Yushin Precision Equipment Co.,Ltd.', + 0x0018FB: u'Compro Technology', + 0x0018FC: u'Altec Electronic AG', + 0x0018FD: u'Optimal Technologies International Inc.', + 0x0018FE: u'Hewlett Packard', + 0x0018FF: u'PowerQuattro Co.', + 0x001900: u'Intelliverese - DBA Voicecom', + 0x001901: u'F1MEDIA', + 0x001902: u'Cambridge Consultants Ltd', + 0x001903: u'Bigfoot Networks Inc', + 0x001904: u'WB Electronics Sp. z o.o.', + 0x001905: u'SCHRACK Seconet AG', + 0x001906: u'Cisco Systems', + 0x001907: u'Cisco Systems', + 0x001908: u'Duaxes Corporation', + 0x001909: u'Devi A/S', + 0x00190A: u'HASWARE INC.', + 0x00190B: u'Southern Vision Systems, Inc.', + 0x00190C: u'Encore Electronics, Inc.', + 0x00190D: u'IEEE 1394c', + 0x00190E: u'Atech Technology Co., Ltd.', + 0x00190F: u'Advansus Corp.', + 0x001910: u'Knick Elektronische Messgeraete GmbH & Co. KG', + 0x001911: u'Just In Mobile Information Technologies (Shanghai) Co., Ltd.', + 0x001912: u'Welcat Inc', + 0x001913: u'Chuang-Yi Network Equipment Co.Ltd.', + 0x001914: u'Winix Co., Ltd', + 0x001915: u'TECOM Co., Ltd.', + 0x001916: u'PayTec AG', + 0x001917: u'Posiflex Inc.', + 0x001918: u'Interactive Wear AG', + 0x001919: u'ASTEL Inc.', + 0x00191A: u'IRLINK', + 0x00191B: u'Sputnik Engineering AG', + 0x00191C: u'Sensicast Systems', + 0x00191D: u'Nintendo Co.,Ltd.', + 0x00191E: u'Beyondwiz Co., Ltd.', + 0x00191F: u'Microlink communications Inc.', + 0x001920: u'KUME electric Co.,Ltd.', + 0x001921: u'Elitegroup Computer System Co.', + 0x001922: u'CM Comandos Lineares', + 0x001923: u'Phonex Korea Co., LTD.', + 0x001924: u'LBNL Engineering', + 0x001925: u'Intelicis Corporation', + 0x001926: u'BitsGen Co., Ltd.', + 0x001927: u'ImCoSys Ltd', + 0x001928: u'Siemens AG, Transportation Systems', + 0x001929: u'2M2B Montadora de Maquinas Bahia Brasil LTDA', + 0x00192A: u'Antiope Associates', + 0x00192B: u'Hexagram, Inc.', + 0x00192C: u'Motorola Mobile Devices', + 0x00192D: u'Nokia Corporation', + 0x00192E: u'Spectral Instruments, Inc.', + 0x00192F: u'Cisco Systems', + 0x001930: u'Cisco Systems', + 0x001931: u'Balluff GmbH', + 0x001932: u'Gude Analog- und Digialsysteme GmbH', + 0x001933: u'Strix Systems, Inc.', + 0x001934: u'TRENDON TOUCH TECHNOLOGY CORP.', + 0x001935: u'Duerr Dental GmbH & Co. KG', + 0x001936: u'STERLITE OPTICAL TECHNOLOGIES LIMITED', + 0x001937: u'CommerceGuard AB', + 0x001938: u'UMB Communications Co., Ltd.', + 0x001939: u'Gigamips', + 0x00193A: u'OESOLUTIONS', + 0x00193B: u'Deliberant LLC', + 0x00193C: u'HighPoint Technologies Incorporated', + 0x00193D: u'GMC Guardian Mobility Corp.', + 0x00193E: u'PIRELLI BROADBAND SOLUTIONS', + 0x00193F: u'RDI technology(Shenzhen) Co.,LTD', + 0x001940: u'Rackable Systems', + 0x001941: u'Pitney Bowes, Inc', + 0x001942: u'ON SOFTWARE INTERNATIONAL LIMITED', + 0x001943: u'Belden', + 0x001944: u'Fossil Partners, L.P.', + 0x001945: u'Ten-Tec Inc.', + 0x001946: u'Cianet Industria e Comercio S/A', + 0x001947: u'Scientific Atlanta, A Cisco Company', + 0x001948: u'AireSpider Networks', + 0x001949: u'TENTEL COMTECH CO., LTD.', + 0x00194A: u'TESTO AG', + 0x00194B: u'SAGEM COMMUNICATION', + 0x00194C: u'Fujian Stelcom information & Technology CO.,Ltd', + 0x00194D: u'Avago Technologies Sdn Bhd', + 0x00194E: u'Ultra Electronics - TCS (Tactical Communication Systems)', + 0x00194F: u'Nokia Danmark A/S', + 0x001950: u'Harman Multimedia', + 0x001951: u'NETCONS, s.r.o.', + 0x001952: u'ACOGITO Co., Ltd', + 0x001953: u'Chainleader Communications Corp.', + 0x001954: u'Leaf Corporation.', + 0x001955: u'Cisco Systems', + 0x001956: u'Cisco Systems', + 0x001957: u'Saafnet Canada Inc.', + 0x001958: u'Bluetooth SIG, Inc.', + 0x001959: u'Staccato Communications Inc.', + 0x00195A: u'Jenaer Antriebstechnik GmbH', + 0x00195B: u'D-Link Corporation', + 0x00195C: u'Innotech Corporation', + 0x00195D: u'ShenZhen XinHuaTong Opto Electronics Co.,Ltd', + 0x00195E: u'Motorola CHS', + 0x00195F: u'Valemount Networks Corporation', + 0x001960: u'DoCoMo Systems, Inc.', + 0x001961: u'Blaupunkt GmbH', + 0x001962: u'Commerciant, LP', + 0x001963: u'Sony Ericsson Mobile Communications AB', + 0x001964: u'Doorking Inc.', + 0x001965: u'YuHua TelTech (ShangHai) Co., Ltd.', + 0x001966: u'Asiarock Technology Limited', + 0x001967: u'TELDAT Sp.J.', + 0x001968: u'Digital Video Networks(Shanghai) CO. LTD.', + 0x001969: u'Nortel', + 0x00196A: u'MikroM GmbH', + 0x00196B: u'Danpex Corporation', + 0x00196C: u'ETROVISION TECHNOLOGY', + 0x00196D: u'Raybit Systems Korea, Inc', + 0x00196E: u'Metacom (Pty) Ltd.', + 0x00196F: u'SensoPart GmbH', + 0x001970: u'Z-Com, Inc.', + 0x001971: u'Guangzhou Unicomp Technology Co.,Ltd', + 0x001972: u'Plexus (Xiamen) Co.,ltd', + 0x001973: u'Zeugma Systems', + 0x001974: u'AboCom Systems, Inc.', + 0x001975: u'Beijing Huisen networks technology Inc', + 0x001976: u'Xipher Technologies, LLC', + 0x001977: u'Aerohive Networks, Inc.', + 0x001978: u'Datum Systems, Inc.', + 0x001979: u'Nokia Danmark A/S', + 0x00197A: u'MAZeT GmbH', + 0x00197B: u'Picotest Corp.', + 0x00197C: u'Riedel Communications GmbH', + 0x00197D: u'Hon Hai Precision Ind. Co., Ltd', + 0x00197E: u'Hon Hai Precision Ind. Co., Ltd', + 0x00197F: u'PLANTRONICS, INC.', + 0x001980: u'Gridpoint Systems', + 0x001981: u'Vivox Inc', + 0x001982: u'SmarDTV', + 0x001983: u'CCT R&D Limited', + 0x001984: u'ESTIC Corporation', + 0x001985: u'IT Watchdogs, Inc', + 0x001986: u'Cheng Hongjian', + 0x001987: u'Panasonic Mobile Communications Co., Ltd.', + 0x001988: u'Wi2Wi, Inc', + 0x001989: u'Sonitrol Corporation', + 0x00198A: u'Northrop Grumman Systems Corp.', + 0x00198B: u'Novera Optics Korea, Inc.', + 0x00198C: u'iXSea', + 0x00198D: u'Ocean Optics, Inc.', + 0x00198E: u'Oticon A/S', + 0x00198F: u'Alcatel Bell N.V.', + 0x001990: u'ELM DATA Co., Ltd.', + 0x001991: u'avinfo', + 0x001992: u'Bluesocket, Inc', + 0x001993: u'Changshu Switchgear MFG. Co.,Ltd. (Former Changshu Switchgea', + 0x001994: u'Jorjin technologies inc.', + 0x001995: u'Jurong Hi-Tech (Suzhou)Co.ltd', + 0x001996: u'TurboChef Technologies Inc.', + 0x001997: u'Soft Device Sdn Bhd', + 0x001998: u'SATO CORPORATION', + 0x001999: u'Fujitsu Siemens Computers', + 0x00199A: u'EDO-EVI', + 0x00199B: u'Diversified Technical Systems, Inc.', + 0x00199C: u'CTRING', + 0x00199D: u'V, Inc.', + 0x00199E: u'SHOWADENSHI ELECTRONICS,INC.', + 0x00199F: u'DKT A/S', + 0x0019A0: u'NIHON DATA SYSTENS, INC.', + 0x0019A1: u'LG INFORMATION & COMM.', + 0x0019A2: u'ORION TELE-EQUIPMENTS PVT LTD', + 0x0019A3: u'asteel electronique atlantique', + 0x0019A4: u'Austar Technology (hang zhou) Co.,Ltd', + 0x0019A5: u'RadarFind Corporation', + 0x0019A6: u'Motorola CHS', + 0x0019A7: u'ITU-T', + 0x0019A8: u'WiQuest Communications, Inc', + 0x0019A9: u'Cisco Systems', + 0x0019AA: u'Cisco Systems', + 0x0019AB: u'Raycom CO ., LTD', + 0x0019AC: u'GSP SYSTEMS Inc.', + 0x0019AD: u'BOBST SA', + 0x0019AE: u'Hopling Technologies b.v.', + 0x0019AF: u'Rigol Technologies, Inc.', + 0x0019B0: u'HanYang System', + 0x0019B1: u'Arrow7 Corporation', + 0x0019B2: u'XYnetsoft Co.,Ltd', + 0x0019B3: u'Stanford Research Systems', + 0x0019B4: u'VideoCast Ltd.', + 0x0019B5: u'Famar Fueguina S.A.', + 0x0019B6: u'Euro Emme s.r.l.', + 0x0019B7: u'Nokia Danmark A/S', + 0x0019B8: u'Boundary Devices', + 0x0019B9: u'Dell Inc.', + 0x0019BA: u'Paradox Security Systems Ltd', + 0x0019BB: u'Hewlett Packard', + 0x0019BC: u'ELECTRO CHANCE SRL', + 0x0019BD: u'New Media Life', + 0x0019BE: u'Altai Technologies Limited', + 0x0019BF: u'Citiway technology Co.,ltd', + 0x0019C0: u'Motorola Mobile Devices', + 0x0019C1: u'Alps Electric Co., Ltd', + 0x0019C2: u'Equustek Solutions, Inc.', + 0x0019C3: u'Qualitrol', + 0x0019C4: u'Infocrypt Inc.', + 0x0019C5: u'SONY Computer Entertainment inc,', + 0x0019C6: u'ZTE Corporation', + 0x0019C7: u'Cambridge Industries(Group) Co.,Ltd.', + 0x0019C8: u'AnyDATA Corporation', + 0x0019C9: u'S&C ELECTRIC COMPANY', + 0x0019CA: u'Broadata Communications, Inc', + 0x0019CB: u'ZyXEL Communications Corporation', + 0x0019CC: u'RCG (HK) Ltd', + 0x0019CD: u'Chengdu ethercom information technology Ltd.', + 0x0019CE: u'Progressive Gaming International', + 0x0019CF: u'SALICRU, S.A.', + 0x0019D0: u'Cathexis', + 0x0019D1: u'Intel Corporation', + 0x0019D2: u'Intel Corporation', + 0x0019D3: u'TRAK Microwave', + 0x0019D4: u'ICX Technologies', + 0x0019D5: u'IP Innovations, Inc.', + 0x0019D6: u'LS Cable Ltd.', + 0x0019D7: u'FORTUNETEK CO., LTD', + 0x0019D8: u'MAXFOR', + 0x0019D9: u'Zeutschel GmbH', + 0x0019DA: u'Welltrans O&E Technology Co. , Ltd.', + 0x0019DB: u'MICRO-STAR INTERNATIONAL CO., LTD.', + 0x0019DC: u'ENENSYS Technologies', + 0x0019DD: u'FEI-Zyfer, Inc.', + 0x0019DE: u'MOBITEK', + 0x0019DF: u'THOMSON APDG', + 0x0019E0: u'TP-LINK Technologies Co., Ltd.', + 0x0019E1: u'Nortel', + 0x0019E2: u'Juniper Networks', + 0x0019E3: u'Apple Computers', + 0x0019E4: u'2Wire, Inc', + 0x0019E5: u'Lynx Studio Technology, Inc.', + 0x0019E6: u'TOYO MEDIC CO.,LTD.', + 0x0019E7: u'Cisco Systems', + 0x0019E8: u'Cisco Systems', + 0x0019E9: u'S-Information Technolgy, Co., Ltd.', + 0x0019EA: u'TeraMage Technologies Co., Ltd.', + 0x0019EB: u'Pyronix Ltd', + 0x0019EC: u'Sagamore Systems, Inc.', + 0x0019ED: u'Axesstel Inc.', + 0x0019EE: u'CARLO GAVAZZI CONTROLS SPA-Controls Division', + 0x0019EF: u'SHENZHEN LINNKING ELECTRONICS CO.,LTD', + 0x0019F0: u'UNIONMAN TECHNOLOGY CO.,LTD', + 0x0019F1: u'Star Communication Network Technology Co.,Ltd', + 0x0019F2: u'Teradyne K.K.', + 0x0019F3: u'Telematrix, Inc', + 0x0019F4: u'Convergens Oy Ltd', + 0x0019F5: u'Imagination Technologies Ltd', + 0x0019F6: u'Acconet (PTE) Ltd', + 0x0019F7: u'Onset Computer Corporation', + 0x0019F8: u'Embedded Systems Design, Inc.', + 0x0019F9: u'Lambda', + 0x0019FA: u'Cable Vision Electronics CO., LTD.', + 0x0019FB: u'AMSTRAD PLC', + 0x0019FC: u'PT. Ufoakses Sukses Luarbiasa', + 0x0019FD: u'Nintendo Co., Ltd.', + 0x0019FE: u'SHENZHEN SEECOMM TECHNOLOGY CO.,LTD.', + 0x0019FF: u'Finnzymes', + 0x001A00: u'MATRIX INC.', + 0x001A01: u'Smiths Medical', + 0x001A02: u'SECURE CARE PRODUCTS, INC', + 0x001A03: u'Angel Electronics Co., Ltd.', + 0x001A04: u'Interay Solutions BV', + 0x001A05: u'OPTIBASE LTD', + 0x001A06: u'OpVista, Inc.', + 0x001A07: u'Arecont Vision', + 0x001A08: u'Dalman Technical Services', + 0x001A09: u'Wayfarer Transit Systems Ltd', + 0x001A0A: u'Adaptive Micro-Ware Inc.', + 0x001A0B: u'BONA TECHNOLOGY INC.', + 0x001A0C: u'Swe-Dish Satellite Systems AB', + 0x001A0D: u'HandHeld entertainment, Inc.', + 0x001A0E: u'Cheng Uei Precision Industry Co.,Ltd', + 0x001A0F: u'Sistemas Avanzados de Control, S.A.', + 0x001A10: u'LUCENT TRANS ELECTRONICS CO.,LTD', + 0x001A11: u'Google Inc.', + 0x001A12: u'PRIVATE', + 0x001A13: u'Wanlida Group Co., LTD', + 0x001A14: u'Xin Hua Control Engineering Co.,Ltd.', + 0x001A15: u'gemalto e-Payment', + 0x001A16: u'Nokia Danmark A/S', + 0x001A17: u'Teak Technologies, Inc.', + 0x001A18: u'Advanced Simulation Technology inc.', + 0x001A19: u'Computer Engineering Limited', + 0x001A1A: u'Gentex Corporation/Electro-Acoustic Products', + 0x001A1B: u'Motorola Mobile Devices', + 0x001A1C: u'GT&T Engineering Pte Ltd', + 0x001A1D: u'PChome Online Inc.', + 0x001A1E: u'Aruba Networks', + 0x001A1F: u'Coastal Environmental Systems', + 0x001A20: u'CMOTECH Co. Ltd.', + 0x001A21: u'Indac B.V.', + 0x001A22: u'eq-3 GmbH', + 0x001A23: u'Ice Qube, Inc', + 0x001A24: u'Galaxy Telecom Technologies Ltd', + 0x001A25: u'DELTA DORE', + 0x001A26: u'Deltanode Solutions AB', + 0x001A27: u'Ubistar', + 0x001A28: u'ASWT Co., LTD. Taiwan Branch H.K.', + 0x001A29: u'Techsonic Industries d/b/a Humminbird', + 0x001A2A: u'Arcadyan Technology Corporation', + 0x001A2B: u'Ayecom Technology Co., Ltd.', + 0x001A2C: u'SATEC Co.,LTD', + 0x001A2D: u'The Navvo Group', + 0x001A2E: u'Ziova Coporation', + 0x001A2F: u'Cisco Systems', + 0x001A30: u'Cisco Systems', + 0x001A31: u'SCAN COIN Industries AB', + 0x001A32: u'ACTIVA MULTIMEDIA', + 0x001A33: u'ASI Communications, Inc.', + 0x001A34: u'Konka Group Co., Ltd.', + 0x001A35: u'BARTEC GmbH', + 0x001A36: u'Actimon GmbH & Co. KG', + 0x001A37: u'Lear Corporation', + 0x001A38: u'SCI Technology', + 0x001A39: u'Merten GmbH&CoKG', + 0x001A3A: u'Dongahelecomm', + 0x001A3B: u'Doah Elecom Inc.', + 0x001A3C: u'Technowave Ltd.', + 0x001A3D: u'Ajin Vision Co.,Ltd', + 0x001A3E: u'Faster Technology LLC', + 0x001A3F: u'intelbras', + 0x001A40: u'A-FOUR TECH CO., LTD.', + 0x001A41: u'INOCOVA Co.,Ltd', + 0x001A42: u'Techcity Technology co., Ltd.', + 0x001A43: u'Logical Link Communications', + 0x001A44: u'JWTrading Co., Ltd', + 0x001A45: u'GN Netcom as', + 0x001A46: u'Digital Multimedia Technology Co., Ltd', + 0x001A47: u'Agami Systems, Inc.', + 0x001A48: u'Takacom Corporation', + 0x001A49: u'Micro Vision Co.,LTD', + 0x001A4A: u'Qumranet Inc.', + 0x001A4B: u'Hewlett Packard', + 0x001A4C: u'Crossbow Technology, Inc', + 0x001A4D: u'GIGABYTE TECHNOLOGY CO.,LTD.', + 0x001A4E: u'NTI AG / LinMot', + 0x001A4F: u'AVM GmbH', + 0x001A50: u'PheeNet Technology Corp.', + 0x001A51: u'Alfred Mann Foundation', + 0x001A52: u'Meshlinx Wireless Inc.', + 0x001A53: u'Zylaya', + 0x001A54: u'Hip Shing Electronics Ltd.', + 0x001A55: u'ACA-Digital Corporation', + 0x001A56: u'ViewTel Co,. Ltd.', + 0x001A57: u'Matrix Design Group, LLC', + 0x001A58: u'Celectronic GmbH', + 0x001A59: u'Ircona', + 0x001A5A: u'Korea Electric Power Data Network (KDN) Co., Ltd', + 0x001A5B: u'NetCare Service Co., Ltd.', + 0x001A5C: u'Euchner GmbH+Co. KG', + 0x001A5D: u'Mobinnova Corp.', + 0x001A5E: u'Thincom Technology Co.,Ltd', + 0x001A5F: u'KitWorks.fi Ltd.', + 0x001A60: u'Wave Electronics Co.,Ltd.', + 0x001A61: u'PacStar Corp.', + 0x001A62: u'trusted data', + 0x001A63: u'Elster Electricity, LLC', + 0x001A64: u'IBM Corp.', + 0x001A65: u'Seluxit', + 0x001A66: u'Motorola CHS', + 0x001A67: u'Infinite QL Sdn Bhd', + 0x001A68: u'Weltec Enterprise Co., Ltd.', + 0x001A69: u'Wuhan Yangtze Optical Technology CO.,Ltd.', + 0x001A6A: u'Tranzas, Inc.', + 0x001A6B: u'USI', + 0x001A6C: u'Cisco Systems', + 0x001A6D: u'Cisco Systems', + 0x001A6E: u'Impro Technologies', + 0x001A6F: u'MI.TEL s.r.l.', + 0x001A70: u'Cisco-Linksys, LLC', + 0x001A71: u'Diostech Co., Ltd.', + 0x001A72: u'Mosart Semiconductor Corp.', + 0x001A73: u'Gemtek Technology Co., Ltd.', + 0x001A74: u'Procare International Co', + 0x001A75: u'Sony Ericsson Mobile Communications', + 0x001A76: u'SDT information Technology Co.,LTD.', + 0x001A77: u'Motorola Mobile Devices', + 0x001A78: u'ubtos', + 0x001A79: u'TELECOMUNICATION TECHNOLOGIES LTD.', + 0x001A7A: u'Lismore Instruments Limited', + 0x001A7B: u'Teleco, Inc.', + 0x001A7C: u'Hirschmann Automation and Control B.V.', + 0x001A7D: u'cyber-blue(HK)Ltd', + 0x001A7E: u'LN Srithai Comm Ltd.', + 0x001A7F: u'GCI Science&Technology Co.,Ltd.', + 0x001A80: u'Sony Corporation', + 0x001A81: u'Zelax', + 0x001A82: u'PROBA Building Automation Co.,LTD', + 0x001A83: u'Pegasus Technologies Inc.', + 0x001A84: u'V One Multimedia Pte Ltd', + 0x001A85: u'NV Michel Van de Wiele', + 0x001A86: u'AdvancedIO Systems Inc', + 0x001A87: u'Canhold International Limited', + 0x001A88: u'Venergy,Co,Ltd', + 0x001A89: u'Nokia Danmark A/S', + 0x001A8A: u'Samsung Electronics Co., Ltd.', + 0x001A8B: u'CHUNIL ELECTRIC IND., CO.', + 0x001A8C: u'Astaro AG', + 0x001A8D: u'AVECS Bergen GmbH', + 0x001A8E: u'3Way Networks Ltd', + 0x001A8F: u'Nortel', + 0x001A90: u'Trópico Sistemas e Telecomunicações da Amazônia LTDA.', + 0x001A91: u'FusionDynamic Ltd.', + 0x001A92: u'ASUSTek COMPUTER INC.', + 0x001A93: u'ERCO Leuchten GmbH', + 0x001A94: u'Votronic GmbH', + 0x001A95: u'Hisense Mobile Communications Technoligy Co.,Ltd.', + 0x001A96: u'ECLER S.A.', + 0x001A97: u'fitivision technology Inc.', + 0x001A98: u'Asotel Communication Limited Taiwan Branch', + 0x001A99: u'Smarty (HZ) Information Electronics Co., Ltd', + 0x001A9A: u'Skyworth Digital technology(shenzhen)co.ltd.', + 0x001A9B: u'ADEC & Parter AG', + 0x001A9C: u'RightHand Technologies, Inc.', + 0x001A9D: u'Skipper Wireless, Inc.', + 0x001A9E: u'ICON Digital International Limited', + 0x001A9F: u'A-Link Europe Ltd', + 0x001AA0: u'Dell Inc', + 0x001AA1: u'Cisco Systems', + 0x001AA2: u'Cisco Systems', + 0x001AA3: u'DELORME', + 0x001AA4: u'Future University-Hakodate', + 0x001AA5: u'BRN Phoenix', + 0x001AA6: u'Telefunken Radio Communication Systems GmbH &CO.KG', + 0x001AA7: u'Torian Wireless', + 0x001AA8: u'Mamiya Digital Imaging Co., Ltd.', + 0x001AA9: u'FUJIAN STAR-NET COMMUNICATION CO.,LTD', + 0x001AAA: u'Analogic Corp.', + 0x001AAB: u'eWings s.r.l.', + 0x001AAC: u'Corelatus AB', + 0x001AAD: u'Motorola CHS', + 0x001AAE: u'Savant Systems LLC', + 0x001AAF: u'BLUSENS TECHNOLOGY', + 0x001AB0: u'Signal Networks Pvt. Ltd.,', + 0x001AB1: u'Asia Pacific Satellite Industries Co., Ltd.', + 0x001AB2: u'Cyber Solutions Inc.', + 0x001AB3: u'VISIONITE INC.', + 0x001AB4: u'FFEI Ltd.', + 0x001AB5: u'Home Network System', + 0x001AB6: u'Luminary Micro Inc', + 0x001AB7: u'Ethos Networks LTD.', + 0x001AB8: u'Anseri Corporation', + 0x001AB9: u'PMC', + 0x001ABA: u'Caton Overseas Limited', + 0x001ABB: u'Fontal Technology Incorporation', + 0x001ABC: u'U4EA Technologies Ltd', + 0x001ABD: u'Impatica Inc.', + 0x001ABE: u'COMPUTER HI-TECH INC.', + 0x001ABF: u'TRUMPF Laser Marking Systems AG', + 0x001AC0: u'JOYBIEN TECHNOLOGIES CO., LTD.', + 0x001AC1: u'3COM EUROPE', + 0x001AC2: u'YEC Co.,Ltd.', + 0x001AC3: u'Scientific-Atlanta, Inc', + 0x001AC4: u'2Wire, Inc', + 0x001AC5: u'BreakingPoint Systems, Inc.', + 0x001AC6: u'Micro Control Designs', + 0x001AC7: u'UNIPOINT', + 0x001AC8: u'ISL (Instrumentation Scientifique de Laboratoire)', + 0x001AC9: u'SUZUKEN CO.,LTD', + 0x001ACA: u'Tilera Corporation', + 0x001ACB: u'Autocom Products Ltd', + 0x001ACC: u'Celestial Semiconductor, Ltd', + 0x001ACD: u'Tidel Engineering LP', + 0x001ACE: u'YUPITERU INDUSTRIES CO., LTD.', + 0x001ACF: u'C.T. ELETTRONICA', + 0x001AD0: u'Siemens Schweiz AG', + 0x001AD1: u'FARGO CO., LTD.', + 0x001AD2: u'Eletronica Nitron Ltda', + 0x001AD3: u'Vamp Ltd.', + 0x001AD4: u'iPOX Technology Co., Ltd.', + 0x001AD5: u'KMC CHAIN INDUSTRIAL CO., LTD.', + 0x001AD6: u'JIAGNSU AETNA ELECTRIC CO.,LTD', + 0x001AD7: u'Christie Digital Systems, Inc.', + 0x001AD8: u'AlsterAero GmbH', + 0x001AD9: u'International Broadband Electric Communications, Inc.', + 0x001ADA: u'Biz-2-Me Inc.', + 0x001ADB: u'Motorola Mobile Devices', + 0x001ADC: u'Nokia Danmark A/S', + 0x001ADD: u'PePWave Ltd', + 0x001ADE: u'Motorola CHS', + 0x001ADF: u'Interactivetv Pty Limited', + 0x001AE0: u'Mythology Tech Express Inc.', + 0x001AE1: u'EDGE ACCESS INC', + 0x001AE2: u'Cisco Systems', + 0x001AE3: u'Cisco Systems', + 0x001AE4: u'Liposonix Inc,', + 0x001AE5: u'Mvox Technologies Inc.', + 0x001AE6: u'Atlanta Advanced Communications Holdings Limited', + 0x001AE7: u'Aztek Networks, Inc.', + 0x001AE8: u'Siemens Enterprise Communications GmbH & Co. KG', + 0x001AE9: u'Nintendo Co., Ltd.', + 0x001AEA: u'Radio Terminal Systems Pty Ltd', + 0x001AEB: u'Allied Telesis K.K.', + 0x001AEC: u'Keumbee Electronics Co.,Ltd.', + 0x001AED: u'INCOTEC GmbH', + 0x001AEE: u'Shenztech Ltd', + 0x001AEF: u'Loopcomm Technology, Inc.', + 0x001AF0: u'Alcatel - IPD', + 0x001AF1: u'Embedded Artists AB', + 0x001AF2: u'Dynavisions GmbH', + 0x001AF3: u'Samyoung Electronics', + 0x001AF4: u'Handreamnet', + 0x001AF5: u'PENTAONE. CO., LTD.', + 0x001AF6: u'Woven Systems, Inc.', + 0x001AF7: u'dataschalt e+a GmbH', + 0x001AF8: u'Copley Controls Corporation', + 0x001AF9: u'AeroVIronment (AV Inc)', + 0x001AFA: u'Welch Allyn, Inc.', + 0x001AFB: u'Joby Inc.', + 0x001AFC: u'ModusLink Corporation', + 0x001AFD: u'EVOLIS', + 0x001AFE: u'SOFACREAL', + 0x001AFF: u'Wizyoung Tech.', + 0x001B00: u'Neopost Technologies', + 0x001B01: u'Applied Radio Technologies', + 0x001B02: u'ED Co.Ltd', + 0x001B03: u'Action Technology (SZ) Co., Ltd', + 0x001B04: u'Affinity International S.p.a', + 0x001B05: u'Young Media Concepts GmbH', + 0x001B06: u'Ateliers R. LAUMONIER', + 0x001B07: u'Mendocino Software', + 0x001B08: u'Danfoss Drives A/S', + 0x001B09: u'Matrix Telecom Pvt. Ltd.', + 0x001B0A: u'Intelligent Distributed Controls Ltd', + 0x001B0B: u'Phidgets Inc.', + 0x001B0C: u'Cisco Systems', + 0x001B0D: u'Cisco Systems', + 0x001B0E: u'InoTec GmbH Organisationssysteme', + 0x001B0F: u'Petratec', + 0x001B10: u'ShenZhen Kang Hui Technology Co.,ltd', + 0x001B11: u'D-Link Corporation', + 0x001B12: u'Apprion', + 0x001B13: u'Icron Technologies Corporation', + 0x001B14: u'Carex Lighting Equipment Factory', + 0x001B15: u'Voxtel, Inc.', + 0x001B16: u'Celtro Ltd.', + 0x001B17: u'Palo Alto Networks', + 0x001B18: u'Tsuken Electric Ind. Co.,Ltd', + 0x001B19: u'IEEE 1588 Standard', + 0x001B1A: u'e-trees Japan, Inc.', + 0x001B1B: u'Siemens AG, A&D AS EWK PU1', + 0x001B1C: u'Coherent', + 0x001B1D: u'Phoenix International Co., Ltd', + 0x001B1E: u'HART Communication Foundation', + 0x001B1F: u'DELTA - Danish Electronics, Light & Acoustics', + 0x001B20: u'TPine Technology', + 0x001B21: u'Intel Corporate', + 0x001B22: u'Palit Microsystems ( H.K.) Ltd.', + 0x001B23: u'SimpleComTools', + 0x001B24: u'Quanta Computer Inc.', + 0x001B25: u'Nortel', + 0x001B26: u'RON-Telecom ZAO', + 0x001B27: u'Merlin CSI', + 0x001B28: u'POLYGON, JSC', + 0x001B29: u'Avantis.Co.,Ltd', + 0x001B2A: u'Cisco Systems', + 0x001B2B: u'Cisco Systems', + 0x001B2C: u'ATRON electronic GmbH', + 0x001B2D: u'PRIVATE', + 0x001B2E: u'Sinkyo Electron Inc', + 0x001B2F: u'NETGEAR Inc.', + 0x001B30: u'Solitech Inc.', + 0x001B31: u'Neural Image. Co. Ltd.', + 0x001B32: u'QLogic Corporation', + 0x001B33: u'Nokia Danmark A/S', + 0x001B34: u'Focus System Inc.', + 0x001B35: u'ChongQing JINOU Science & Technology Development CO.,Ltd', + 0x001B36: u'Tsubata Engineering Co.,Ltd. (Head Office)', + 0x001B37: u'Computec Oy', + 0x001B38: u'COMPAL ELECTRONICS TECHNOLOGIC CO., LTD.', + 0x001B39: u'Proxicast', + 0x001B3A: u'SIMS Corp.', + 0x001B3B: u'Yi-Qing CO., LTD', + 0x001B3C: u'Software Technologies Group,Inc.', + 0x001B3D: u'EuroTel Spa', + 0x001B3E: u'Curtis, Inc.', + 0x001B3F: u'ProCurve Networking by HP', + 0x001B40: u'Network Automation mxc AB', + 0x001B41: u'General Infinity Co.,Ltd.', + 0x001B42: u'Wise & Blue', + 0x001B43: u'Beijing DG Telecommunications equipment Co.,Ltd', + 0x001B44: u'SanDisk Corporation', + 0x001B45: u'ABB AS, Division Automation Products', + 0x001B46: u'Blueone Technology Co.,Ltd', + 0x001B47: u'Futarque A/S', + 0x001B48: u'Shenzhen Lantech Electronics Co., Ltd.', + 0x001B49: u'Roberts Radio limited', + 0x001B4A: u'W&W Communications, Inc.', + 0x001B4B: u'SANION Co., Ltd.', + 0x001B4C: u'Signtech', + 0x001B4D: u'Areca Technology Corporation', + 0x001B4E: u'Navman New Zealand', + 0x001B4F: u'Avaya Inc.', + 0x001B50: u'Nizhny Novgorod Factory named after M.Frunze, FSUE (NZiF)', + 0x001B51: u'Vector Technology Corp.', + 0x001B52: u'Motorola Mobile Devices', + 0x001B53: u'Cisco Systems', + 0x001B54: u'Cisco Systems', + 0x001B55: u'Hurco Automation Ltd.', + 0x001B56: u'Tehuti Networks Ltd.', + 0x001B57: u'SEMINDIA SYSTEMS PRIVATE LIMITED', + 0x001B58: u'PRIVATE', + 0x001B59: u'Sony Ericsson Mobile Communications AB', + 0x001B5A: u'Apollo Imaging Technologies, Inc.', + 0x001B5B: u'2Wire, Inc.', + 0x001B5C: u'Azuretec Co., Ltd.', + 0x001B5D: u'Vololink Pty Ltd', + 0x001B5E: u'BPL Limited', + 0x001B5F: u'Alien Technology', + 0x001B60: u'NAVIGON AG', + 0x001B61: u'Digital Acoustics, LLC', + 0x001B62: u'JHT Optoelectronics Co.,Ltd.', + 0x001B63: u'Apple Inc.', + 0x001B64: u'IsaacLandKorea', + 0x001B65: u'China Gridcom Co., Ltd', + 0x001B66: u'Sennheiser electronic GmbH & Co. KG', + 0x001B67: u'Ubiquisys Ltd', + 0x001B68: u'Modnnet Co., Ltd', + 0x001B69: u'Equaline Corporation', + 0x001B6A: u'Powerwave UK Ltd', + 0x001B6B: u'Swyx Solutions AG', + 0x001B6C: u'LookX Digital Media BV', + 0x001B6D: u'Midtronics, Inc.', + 0x001B6E: u'Anue Systems, Inc.', + 0x001B6F: u'Teletrak Ltd', + 0x001B70: u'IRI Ubiteq, INC.', + 0x001B71: u'Telular Corp.', + 0x001B72: u'Sicep s.p.a.', + 0x001B73: u'DTL Broadcast Ltd', + 0x001B74: u'MiraLink Corporation', + 0x001B75: u'Hypermedia Systems', + 0x001B76: u'Ripcode, Inc.', + 0x001B77: u'Intel Corporate', + 0x001B78: u'Hewlett Packard', + 0x001B79: u'FAIVELEY TRANSPORT', + 0x001B7A: u'Nintendo Co., Ltd.', + 0x001B7B: u'The Tintometer Ltd', + 0x001B7C: u'A & R Cambridge', + 0x001B7D: u'CXR Anderson Jacobson', + 0x001B7E: u'Beckmann GmbH', + 0x001B7F: u'TMN Technologies Telecomunicacoes Ltda', + 0x001B80: u'LORD Corporation', + 0x001B81: u'DATAQ Instruments, Inc.', + 0x001B82: u'Taiwan Semiconductor Co., Ltd.', + 0x001B83: u'Finsoft Ltd', + 0x001B84: u'Scan Engineering Telecom', + 0x001B85: u'MAN Diesel A/S', + 0x001B86: u'Bosch Access Systems GmbH', + 0x001B87: u'Deepsound Tech. Co., Ltd', + 0x001B88: u'Divinet Access Technologies Ltd', + 0x001B89: u'EMZA Visual Sense Ltd.', + 0x001B8A: u'2M Electronic A/S', + 0x001B8B: u'NEC AccessTechnica,Ltd.', + 0x001B8C: u'JMicron Technology Corp.', + 0x001B8D: u'Electronic Computer Systems, Inc.', + 0x001B8E: u'Hulu Sweden AB', + 0x001B8F: u'Cisco Systems', + 0x001B90: u'Cisco Systems', + 0x001B91: u'EFKON AG', + 0x001B92: u'l-acoustics', + 0x001B93: u'JC Decaux SA DNT', + 0x001B94: u'T.E.M.A. S.p.A.', + 0x001B95: u'VIDEO SYSTEMS SRL', + 0x001B96: u'Snif Labs, Inc.', + 0x001B97: u'Violin Technologies', + 0x001B98: u'Samsung Electronics Co., Ltd.', + 0x001B99: u'KS System GmbH', + 0x001B9A: u'Apollo Fire Detectors Ltd', + 0x001B9B: u'Hose-McCann Communications', + 0x001B9C: u'SATEL sp. z o.o.', + 0x001B9D: u'Novus Security Sp. z o.o.', + 0x001B9E: u'ASKEY COMPUTER CORP', + 0x001B9F: u'Calyptech Pty Ltd', + 0x001BA0: u'Awox', + 0x001BA1: u'Ã…mic AB', + 0x001BA2: u'IDS Imaging Development Systems GmbH', + 0x001BA3: u'Flexit Group GmbH', + 0x001BA4: u'S.A.E Afikim', + 0x001BA5: u'MyungMin Systems, Inc.', + 0x001BA6: u'intotech inc.', + 0x001BA7: u'Lorica Solutions', + 0x001BA8: u'UBI&MOBI,.Inc', + 0x001BA9: u'BROTHER INDUSTRIES, LTD. Printing & Solutions Company', + 0x001BAA: u'XenICs nv', + 0x001BAB: u'Telchemy, Incorporated', + 0x001BAC: u'Curtiss Wright Controls Embedded Computing', + 0x001BAD: u'iControl Incorporated', + 0x001BAE: u'Micro Control Systems, Inc', + 0x001BAF: u'Nokia Danmark A/S', + 0x001BB0: u'BHARAT ELECTRONICS', + 0x001BB1: u'Wistron Neweb Corp.', + 0x001BB2: u'Intellect International NV', + 0x001BB3: u'Condalo GmbH', + 0x001BB4: u'Airvod Limited', + 0x001BB5: u'Cherry GmbH', + 0x001BB6: u'Bird Electronic Corp.', + 0x001BB7: u'Alta Heights Technology Corp.', + 0x001BB8: u'BLUEWAY ELECTRONIC CO;LTD', + 0x001BB9: u'Elitegroup Computer System Co.', + 0x001C7C: u'PERQ SYSTEMS CORPORATION', + 0x002000: u'LEXMARK INTERNATIONAL, INC.', + 0x002001: u'DSP SOLUTIONS, INC.', + 0x002002: u'SERITECH ENTERPRISE CO., LTD.', + 0x002003: u'PIXEL POWER LTD.', + 0x002004: u'YAMATAKE-HONEYWELL CO., LTD.', + 0x002005: u'SIMPLE TECHNOLOGY', + 0x002006: u'GARRETT COMMUNICATIONS, INC.', + 0x002007: u'SFA, INC.', + 0x002008: u'CABLE & COMPUTER TECHNOLOGY', + 0x002009: u'PACKARD BELL ELEC., INC.', + 0x00200A: u'SOURCE-COMM CORP.', + 0x00200B: u'OCTAGON SYSTEMS CORP.', + 0x00200C: u'ADASTRA SYSTEMS CORP.', + 0x00200D: u'CARL ZEISS', + 0x00200E: u'SATELLITE TECHNOLOGY MGMT, INC', + 0x00200F: u'TANBAC CO., LTD.', + 0x002010: u'JEOL SYSTEM TECHNOLOGY CO. LTD', + 0x002011: u'CANOPUS CO., LTD.', + 0x002012: u'CAMTRONICS MEDICAL SYSTEMS', + 0x002013: u'DIVERSIFIED TECHNOLOGY, INC.', + 0x002014: u'GLOBAL VIEW CO., LTD.', + 0x002015: u'ACTIS COMPUTER SA', + 0x002016: u'SHOWA ELECTRIC WIRE & CABLE CO', + 0x002017: u'ORBOTECH', + 0x002018: u'CIS TECHNOLOGY INC.', + 0x002019: u'OHLER GmbH', + 0x00201A: u'MRV Communications, Inc.', + 0x00201B: u'NORTHERN TELECOM/NETWORK', + 0x00201C: u'EXCEL, INC.', + 0x00201D: u'KATANA PRODUCTS', + 0x00201E: u'NETQUEST CORPORATION', + 0x00201F: u'BEST POWER TECHNOLOGY, INC.', + 0x002020: u'MEGATRON COMPUTER INDUSTRIES PTY, LTD.', + 0x002021: u'ALGORITHMS SOFTWARE PVT. LTD.', + 0x002022: u'NMS Communications', + 0x002023: u'T.C. TECHNOLOGIES PTY. LTD', + 0x002024: u'PACIFIC COMMUNICATION SCIENCES', + 0x002025: u'CONTROL TECHNOLOGY, INC.', + 0x002026: u'AMKLY SYSTEMS, INC.', + 0x002027: u'MING FORTUNE INDUSTRY CO., LTD', + 0x002028: u'WEST EGG SYSTEMS, INC.', + 0x002029: u'TELEPROCESSING PRODUCTS, INC.', + 0x00202A: u'N.V. DZINE', + 0x00202B: u'ADVANCED TELECOMMUNICATIONS MODULES, LTD.', + 0x00202C: u'WELLTRONIX CO., LTD.', + 0x00202D: u'TAIYO CORPORATION', + 0x00202E: u'DAYSTAR DIGITAL', + 0x00202F: u'ZETA COMMUNICATIONS, LTD.', + 0x002030: u'ANALOG & DIGITAL SYSTEMS', + 0x002031: u'ERTEC GmbH', + 0x002032: u'ALCATEL TAISEL', + 0x002033: u'SYNAPSE TECHNOLOGIES, INC.', + 0x002034: u'ROTEC INDUSTRIEAUTOMATION GMBH', + 0x002035: u'IBM CORPORATION', + 0x002036: u'BMC SOFTWARE', + 0x002037: u'SEAGATE TECHNOLOGY', + 0x002038: u'VME MICROSYSTEMS INTERNATIONAL CORPORATION', + 0x002039: u'SCINETS', + 0x00203A: u'DIGITAL BI0METRICS INC.', + 0x00203B: u'WISDM LTD.', + 0x00203C: u'EUROTIME AB', + 0x00203D: u'NOVAR ELECTRONICS CORPORATION', + 0x00203E: u'LogiCan Technologies, Inc.', + 0x00203F: u'JUKI CORPORATION', + 0x002040: u'Motorola Broadband Communications Sector', + 0x002041: u'DATA NET', + 0x002042: u'DATAMETRICS CORP.', + 0x002043: u'NEURON COMPANY LIMITED', + 0x002044: u'GENITECH PTY LTD', + 0x002045: u'ION Networks, Inc.', + 0x002046: u'CIPRICO, INC.', + 0x002047: u'STEINBRECHER CORP.', + 0x002048: u'Marconi Communications', + 0x002049: u'COMTRON, INC.', + 0x00204A: u'PRONET GMBH', + 0x00204B: u'AUTOCOMPUTER CO., LTD.', + 0x00204C: u'MITRON COMPUTER PTE LTD.', + 0x00204D: u'INOVIS GMBH', + 0x00204E: u'NETWORK SECURITY SYSTEMS, INC.', + 0x00204F: u'DEUTSCHE AEROSPACE AG', + 0x002050: u'KOREA COMPUTER INC.', + 0x002051: u'Verilink Corporation', + 0x002052: u'RAGULA SYSTEMS', + 0x002053: u'HUNTSVILLE MICROSYSTEMS, INC.', + 0x002054: u'EASTERN RESEARCH, INC.', + 0x002055: u'ALTECH CO., LTD.', + 0x002056: u'NEOPRODUCTS', + 0x002057: u'TITZE DATENTECHNIK GmbH', + 0x002058: u'ALLIED SIGNAL INC.', + 0x002059: u'MIRO COMPUTER PRODUCTS AG', + 0x00205A: u'COMPUTER IDENTICS', + 0x00205B: u'Kentrox, LLC', + 0x00205C: u'InterNet Systems of Florida, Inc.', + 0x00205D: u'NANOMATIC OY', + 0x00205E: u'CASTLE ROCK, INC.', + 0x00205F: u'GAMMADATA COMPUTER GMBH', + 0x002060: u'ALCATEL ITALIA S.p.A.', + 0x002061: u'DYNATECH COMMUNICATIONS, INC.', + 0x002062: u'SCORPION LOGIC, LTD.', + 0x002063: u'WIPRO INFOTECH LTD.', + 0x002064: u'PROTEC MICROSYSTEMS, INC.', + 0x002065: u'SUPERNET NETWORKING INC.', + 0x002066: u'GENERAL MAGIC, INC.', + 0x002067: u'PRIVATE', + 0x002068: u'ISDYNE', + 0x002069: u'ISDN SYSTEMS CORPORATION', + 0x00206A: u'OSAKA COMPUTER CORP.', + 0x00206B: u'KONICA MINOLTA HOLDINGS, INC.', + 0x00206C: u'EVERGREEN TECHNOLOGY CORP.', + 0x00206D: u'DATA RACE, INC.', + 0x00206E: u'XACT, INC.', + 0x00206F: u'FLOWPOINT CORPORATION', + 0x002070: u'HYNET, LTD.', + 0x002071: u'IBR GMBH', + 0x002072: u'WORKLINK INNOVATIONS', + 0x002073: u'FUSION SYSTEMS CORPORATION', + 0x002074: u'SUNGWOON SYSTEMS', + 0x002075: u'MOTOROLA COMMUNICATION ISRAEL', + 0x002076: u'REUDO CORPORATION', + 0x002077: u'KARDIOS SYSTEMS CORP.', + 0x002078: u'RUNTOP, INC.', + 0x002079: u'MIKRON GMBH', + 0x00207A: u'WiSE Communications, Inc.', + 0x00207B: u'Intel Corporation', + 0x00207C: u'AUTEC GmbH', + 0x00207D: u'ADVANCED COMPUTER APPLICATIONS', + 0x00207E: u'FINECOM Co., Ltd.', + 0x00207F: u'KYOEI SANGYO CO., LTD.', + 0x002080: u'SYNERGY (UK) LTD.', + 0x002081: u'TITAN ELECTRONICS', + 0x002082: u'ONEAC CORPORATION', + 0x002083: u'PRESTICOM INCORPORATED', + 0x002084: u'OCE PRINTING SYSTEMS, GMBH', + 0x002085: u'EXIDE ELECTRONICS', + 0x002086: u'MICROTECH ELECTRONICS LIMITED', + 0x002087: u'MEMOTEC COMMUNICATIONS CORP.', + 0x002088: u'GLOBAL VILLAGE COMMUNICATION', + 0x002089: u'T3PLUS NETWORKING, INC.', + 0x00208A: u'SONIX COMMUNICATIONS, LTD.', + 0x00208B: u'LAPIS TECHNOLOGIES, INC.', + 0x00208C: u'GALAXY NETWORKS, INC.', + 0x00208D: u'CMD TECHNOLOGY', + 0x00208E: u'CHEVIN SOFTWARE ENG. LTD.', + 0x00208F: u'ECI TELECOM LTD.', + 0x002090: u'ADVANCED COMPRESSION TECHNOLOGY, INC.', + 0x002091: u'J125, NATIONAL SECURITY AGENCY', + 0x002092: u'CHESS ENGINEERING B.V.', + 0x002093: u'LANDINGS TECHNOLOGY CORP.', + 0x002094: u'CUBIX CORPORATION', + 0x002095: u'RIVA ELECTRONICS', + 0x002096: u'Invensys', + 0x002097: u'APPLIED SIGNAL TECHNOLOGY', + 0x002098: u'HECTRONIC AB', + 0x002099: u'BON ELECTRIC CO., LTD.', + 0x00209A: u'THE 3DO COMPANY', + 0x00209B: u'ERSAT ELECTRONIC GMBH', + 0x00209C: u'PRIMARY ACCESS CORP.', + 0x00209D: u'LIPPERT AUTOMATIONSTECHNIK', + 0x00209E: u'BROWN\'S OPERATING SYSTEM SERVICES, LTD.', + 0x00209F: u'MERCURY COMPUTER SYSTEMS, INC.', + 0x0020A0: u'OA LABORATORY CO., LTD.', + 0x0020A1: u'DOVATRON', + 0x0020A2: u'GALCOM NETWORKING LTD.', + 0x0020A3: u'DIVICOM INC.', + 0x0020A4: u'MULTIPOINT NETWORKS', + 0x0020A5: u'API ENGINEERING', + 0x0020A6: u'PROXIM, INC.', + 0x0020A7: u'PAIRGAIN TECHNOLOGIES, INC.', + 0x0020A8: u'SAST TECHNOLOGY CORP.', + 0x0020A9: u'WHITE HORSE INDUSTRIAL', + 0x0020AA: u'DIGIMEDIA VISION LTD.', + 0x0020AB: u'MICRO INDUSTRIES CORP.', + 0x0020AC: u'INTERFLEX DATENSYSTEME GMBH', + 0x0020AD: u'LINQ SYSTEMS', + 0x0020AE: u'ORNET DATA COMMUNICATION TECH.', + 0x0020AF: u'3COM CORPORATION', + 0x0020B0: u'GATEWAY DEVICES, INC.', + 0x0020B1: u'COMTECH RESEARCH INC.', + 0x0020B2: u'GKD Gesellschaft Fur Kommunikation Und Datentechnik', + 0x0020B3: u'SCLTEC COMMUNICATIONS SYSTEMS', + 0x0020B4: u'TERMA ELEKTRONIK AS', + 0x0020B5: u'YASKAWA ELECTRIC CORPORATION', + 0x0020B6: u'AGILE NETWORKS, INC.', + 0x0020B7: u'NAMAQUA COMPUTERWARE', + 0x0020B8: u'PRIME OPTION, INC.', + 0x0020B9: u'METRICOM, INC.', + 0x0020BA: u'CENTER FOR HIGH PERFORMANCE', + 0x0020BB: u'ZAX CORPORATION', + 0x0020BC: u'Long Reach Networks Pty Ltd', + 0x0020BD: u'NIOBRARA R & D CORPORATION', + 0x0020BE: u'LAN ACCESS CORP.', + 0x0020BF: u'AEHR TEST SYSTEMS', + 0x0020C0: u'PULSE ELECTRONICS, INC.', + 0x0020C1: u'SAXA, Inc.', + 0x0020C2: u'TEXAS MEMORY SYSTEMS, INC.', + 0x0020C3: u'COUNTER SOLUTIONS LTD.', + 0x0020C4: u'INET,INC.', + 0x0020C5: u'EAGLE TECHNOLOGY', + 0x0020C6: u'NECTEC', + 0x0020C7: u'AKAI Professional M.I. Corp.', + 0x0020C8: u'LARSCOM INCORPORATED', + 0x0020C9: u'VICTRON BV', + 0x0020CA: u'DIGITAL OCEAN', + 0x0020CB: u'PRETEC ELECTRONICS CORP.', + 0x0020CC: u'DIGITAL SERVICES, LTD.', + 0x0020CD: u'HYBRID NETWORKS, INC.', + 0x0020CE: u'LOGICAL DESIGN GROUP, INC.', + 0x0020CF: u'TEST & MEASUREMENT SYSTEMS INC', + 0x0020D0: u'VERSALYNX CORPORATION', + 0x0020D1: u'MICROCOMPUTER SYSTEMS (M) SDN.', + 0x0020D2: u'RAD DATA COMMUNICATIONS, LTD.', + 0x0020D3: u'OST (OUEST STANDARD TELEMATIQU', + 0x0020D4: u'CABLETRON - ZEITTNET INC.', + 0x0020D5: u'VIPA GMBH', + 0x0020D6: u'BREEZECOM', + 0x0020D7: u'JAPAN MINICOMPUTER SYSTEMS CO., Ltd.', + 0x0020D8: u'Nortel Networks', + 0x0020D9: u'PANASONIC TECHNOLOGIES, INC./MIECO-US', + 0x0020DA: u'Alcatel North America ESD', + 0x0020DB: u'XNET TECHNOLOGY, INC.', + 0x0020DC: u'DENSITRON TAIWAN LTD.', + 0x0020DD: u'Cybertec Pty Ltd', + 0x0020DE: u'JAPAN DIGITAL LABORAT\'Y CO.LTD', + 0x0020DF: u'KYOSAN ELECTRIC MFG. CO., LTD.', + 0x0020E0: u'Actiontec Electronics, Inc.', + 0x0020E1: u'ALAMAR ELECTRONICS', + 0x0020E2: u'INFORMATION RESOURCE ENGINEERING', + 0x0020E3: u'MCD KENCOM CORPORATION', + 0x0020E4: u'HSING TECH ENTERPRISE CO., LTD', + 0x0020E5: u'APEX DATA, INC.', + 0x0020E6: u'LIDKOPING MACHINE TOOLS AB', + 0x0020E7: u'B&W NUCLEAR SERVICE COMPANY', + 0x0020E8: u'DATATREK CORPORATION', + 0x0020E9: u'DANTEL', + 0x0020EA: u'EFFICIENT NETWORKS, INC.', + 0x0020EB: u'CINCINNATI MICROWAVE, INC.', + 0x0020EC: u'TECHWARE SYSTEMS CORP.', + 0x0020ED: u'GIGA-BYTE TECHNOLOGY CO., LTD.', + 0x0020EE: u'GTECH CORPORATION', + 0x0020EF: u'USC CORPORATION', + 0x0020F0: u'UNIVERSAL MICROELECTRONICS CO.', + 0x0020F1: u'ALTOS INDIA LIMITED', + 0x0020F2: u'SUN MICROSYSTEMS, INC.', + 0x0020F3: u'RAYNET CORPORATION', + 0x0020F4: u'SPECTRIX CORPORATION', + 0x0020F5: u'PANDATEL AG', + 0x0020F6: u'NET TEK AND KARLNET, INC.', + 0x0020F7: u'CYBERDATA', + 0x0020F8: u'CARRERA COMPUTERS, INC.', + 0x0020F9: u'PARALINK NETWORKS, INC.', + 0x0020FA: u'GDE SYSTEMS, INC.', + 0x0020FB: u'OCTEL COMMUNICATIONS CORP.', + 0x0020FC: u'MATROX', + 0x0020FD: u'ITV TECHNOLOGIES, INC.', + 0x0020FE: u'TOPWARE INC. / GRAND COMPUTER', + 0x0020FF: u'SYMMETRICAL TECHNOLOGIES', + 0x002654: u'3Com Corporation', + 0x003000: u'ALLWELL TECHNOLOGY CORP.', + 0x003001: u'SMP', + 0x003002: u'Expand Networks', + 0x003003: u'Phasys Ltd.', + 0x003004: u'LEADTEK RESEARCH INC.', + 0x003005: u'Fujitsu Siemens Computers', + 0x003006: u'SUPERPOWER COMPUTER', + 0x003007: u'OPTI, INC.', + 0x003008: u'AVIO DIGITAL, INC.', + 0x003009: u'Tachion Networks, Inc.', + 0x00300A: u'AZTECH SYSTEMS LTD.', + 0x00300B: u'mPHASE Technologies, Inc.', + 0x00300C: u'CONGRUENCY, LTD.', + 0x00300D: u'MMC Technology, Inc.', + 0x00300E: u'Klotz Digital AG', + 0x00300F: u'IMT - Information Management T', + 0x003010: u'VISIONETICS INTERNATIONAL', + 0x003011: u'HMS FIELDBUS SYSTEMS AB', + 0x003012: u'DIGITAL ENGINEERING LTD.', + 0x003013: u'NEC Corporation', + 0x003014: u'DIVIO, INC.', + 0x003015: u'CP CLARE CORP.', + 0x003016: u'ISHIDA CO., LTD.', + 0x003017: u'BlueArc UK Ltd', + 0x003018: u'Jetway Information Co., Ltd.', + 0x003019: u'CISCO SYSTEMS, INC.', + 0x00301A: u'SMARTBRIDGES PTE. LTD.', + 0x00301B: u'SHUTTLE, INC.', + 0x00301C: u'ALTVATER AIRDATA SYSTEMS', + 0x00301D: u'SKYSTREAM, INC.', + 0x00301E: u'3COM Europe Ltd.', + 0x00301F: u'OPTICAL NETWORKS, INC.', + 0x003020: u'TSI, Inc..', + 0x003021: u'HSING TECH. ENTERPRISE CO.,LTD', + 0x003022: u'Fong Kai Industrial Co., Ltd.', + 0x003023: u'COGENT COMPUTER SYSTEMS, INC.', + 0x003024: u'CISCO SYSTEMS, INC.', + 0x003025: u'CHECKOUT COMPUTER SYSTEMS, LTD', + 0x003026: u'HeiTel Digital Video GmbH', + 0x003027: u'KERBANGO, INC.', + 0x003028: u'FASE Saldatura srl', + 0x003029: u'OPICOM', + 0x00302A: u'SOUTHERN INFORMATION', + 0x00302B: u'INALP NETWORKS, INC.', + 0x00302C: u'SYLANTRO SYSTEMS CORPORATION', + 0x00302D: u'QUANTUM BRIDGE COMMUNICATIONS', + 0x00302E: u'Hoft & Wessel AG', + 0x00302F: u'Smiths Industries', + 0x003030: u'HARMONIX CORPORATION', + 0x003031: u'LIGHTWAVE COMMUNICATIONS, INC.', + 0x003032: u'MagicRam, Inc.', + 0x003033: u'ORIENT TELECOM CO., LTD.', + 0x003034: u'SET ENGINEERING', + 0x003035: u'Corning Incorporated', + 0x003036: u'RMP ELEKTRONIKSYSTEME GMBH', + 0x003037: u'Packard Bell Nec Services', + 0x003038: u'XCP, INC.', + 0x003039: u'SOFTBOOK PRESS', + 0x00303A: u'MAATEL', + 0x00303B: u'PowerCom Technology', + 0x00303C: u'ONNTO CORP.', + 0x00303D: u'IVA CORPORATION', + 0x00303E: u'Radcom Ltd.', + 0x00303F: u'TurboComm Tech Inc.', + 0x003040: u'CISCO SYSTEMS, INC.', + 0x003041: u'SAEJIN T & M CO., LTD.', + 0x003042: u'DeTeWe-Deutsche Telephonwerke', + 0x003043: u'IDREAM TECHNOLOGIES, PTE. LTD.', + 0x003044: u'Portsmith LLC', + 0x003045: u'Village Networks, Inc. (VNI)', + 0x003046: u'Controlled Electronic Manageme', + 0x003047: u'NISSEI ELECTRIC CO., LTD.', + 0x003048: u'Supermicro Computer, Inc.', + 0x003049: u'BRYANT TECHNOLOGY, LTD.', + 0x00304A: u'Fraunhofer IPMS', + 0x00304B: u'ORBACOM SYSTEMS, INC.', + 0x00304C: u'APPIAN COMMUNICATIONS, INC.', + 0x00304D: u'ESI', + 0x00304E: u'BUSTEC PRODUCTION LTD.', + 0x00304F: u'PLANET Technology Corporation', + 0x003050: u'Versa Technology', + 0x003051: u'ORBIT AVIONIC & COMMUNICATION', + 0x003052: u'ELASTIC NETWORKS', + 0x003053: u'Basler AG', + 0x003054: u'CASTLENET TECHNOLOGY, INC.', + 0x003055: u'Hitachi Semiconductor America,', + 0x003056: u'Beck IPC GmbH', + 0x003057: u'QTelNet, Inc.', + 0x003058: u'API MOTION', + 0x003059: u'DIGITAL-LOGIC AG', + 0x00305A: u'TELGEN CORPORATION', + 0x00305B: u'MODULE DEPARTMENT', + 0x00305C: u'SMAR Laboratories Corp.', + 0x00305D: u'DIGITRA SYSTEMS, INC.', + 0x00305E: u'Abelko Innovation', + 0x00305F: u'IMACON APS', + 0x003060: u'Powerfile, Inc.', + 0x003061: u'MobyTEL', + 0x003062: u'PATH 1 NETWORK TECHNOL\'S INC.', + 0x003063: u'SANTERA SYSTEMS, INC.', + 0x003064: u'ADLINK TECHNOLOGY, INC.', + 0x003065: u'APPLE COMPUTER, INC.', + 0x003066: u'DIGITAL WIRELESS CORPORATION', + 0x003067: u'BIOSTAR MICROTECH INT\'L CORP.', + 0x003068: u'CYBERNETICS TECH. CO., LTD.', + 0x003069: u'IMPACCT TECHNOLOGY CORP.', + 0x00306A: u'PENTA MEDIA CO., LTD.', + 0x00306B: u'CMOS SYSTEMS, INC.', + 0x00306C: u'Hitex Holding GmbH', + 0x00306D: u'LUCENT TECHNOLOGIES', + 0x00306E: u'HEWLETT PACKARD', + 0x00306F: u'SEYEON TECH. CO., LTD.', + 0x003070: u'1Net Corporation', + 0x003071: u'Cisco Systems, Inc.', + 0x003072: u'INTELLIBYTE INC.', + 0x003073: u'International Microsystems, In', + 0x003074: u'EQUIINET LTD.', + 0x003075: u'ADTECH', + 0x003076: u'Akamba Corporation', + 0x003077: u'ONPREM NETWORKS', + 0x003078: u'Cisco Systems, Inc.', + 0x003079: u'CQOS, INC.', + 0x00307A: u'Advanced Technology & Systems', + 0x00307B: u'Cisco Systems, Inc.', + 0x00307C: u'ADID SA', + 0x00307D: u'GRE AMERICA, INC.', + 0x00307E: u'Redflex Communication Systems', + 0x00307F: u'IRLAN LTD.', + 0x003080: u'CISCO SYSTEMS, INC.', + 0x003081: u'ALTOS C&C', + 0x003082: u'TAIHAN ELECTRIC WIRE CO., LTD.', + 0x003083: u'Ivron Systems', + 0x003084: u'ALLIED TELESYN INTERNAIONAL', + 0x003085: u'CISCO SYSTEMS, INC.', + 0x003086: u'Transistor Devices, Inc.', + 0x003087: u'VEGA GRIESHABER KG', + 0x003088: u'Siara Systems, Inc.', + 0x003089: u'Spectrapoint Wireless, LLC', + 0x00308A: u'NICOTRA SISTEMI S.P.A', + 0x00308B: u'Brix Networks', + 0x00308C: u'ADVANCED DIGITAL INFORMATION', + 0x00308D: u'PINNACLE SYSTEMS, INC.', + 0x00308E: u'CROSS MATCH TECHNOLOGIES, INC.', + 0x00308F: u'MICRILOR, Inc.', + 0x003090: u'CYRA TECHNOLOGIES, INC.', + 0x003091: u'TAIWAN FIRST LINE ELEC. CORP.', + 0x003092: u'ModuNORM GmbH', + 0x003093: u'SONNET TECHNOLOGIES, INC.', + 0x003094: u'Cisco Systems, Inc.', + 0x003095: u'Procomp Informatics, Ltd.', + 0x003096: u'CISCO SYSTEMS, INC.', + 0x003097: u'EXOMATIC AB', + 0x003098: u'Global Converging Technologies', + 0x003099: u'BOENIG UND KALLENBACH OHG', + 0x00309A: u'ASTRO TERRA CORP.', + 0x00309B: u'Smartware', + 0x00309C: u'Timing Applications, Inc.', + 0x00309D: u'Nimble Microsystems, Inc.', + 0x00309E: u'WORKBIT CORPORATION.', + 0x00309F: u'AMBER NETWORKS', + 0x0030A0: u'TYCO SUBMARINE SYSTEMS, LTD.', + 0x0030A1: u'WEBGATE Inc.', + 0x0030A2: u'Lightner Engineering', + 0x0030A3: u'CISCO SYSTEMS, INC.', + 0x0030A4: u'Woodwind Communications System', + 0x0030A5: u'ACTIVE POWER', + 0x0030A6: u'VIANET TECHNOLOGIES, LTD.', + 0x0030A7: u'SCHWEITZER ENGINEERING', + 0x0030A8: u'OL\'E COMMUNICATIONS, INC.', + 0x0030A9: u'Netiverse, Inc.', + 0x0030AA: u'AXUS MICROSYSTEMS, INC.', + 0x0030AB: u'DELTA NETWORKS, INC.', + 0x0030AC: u'Systeme Lauer GmbH & Co., Ltd.', + 0x0030AD: u'SHANGHAI COMMUNICATION', + 0x0030AE: u'Times N System, Inc.', + 0x0030AF: u'Honeywell GmbH', + 0x0030B0: u'Convergenet Technologies', + 0x0030B1: u'aXess-pro networks GmbH', + 0x0030B2: u'L-3 Sonoma EO', + 0x0030B3: u'San Valley Systems, Inc.', + 0x0030B4: u'INTERSIL CORP.', + 0x0030B5: u'Tadiran Microwave Networks', + 0x0030B6: u'CISCO SYSTEMS, INC.', + 0x0030B7: u'Teletrol Systems, Inc.', + 0x0030B8: u'RiverDelta Networks', + 0x0030B9: u'ECTEL', + 0x0030BA: u'AC&T SYSTEM CO., LTD.', + 0x0030BB: u'CacheFlow, Inc.', + 0x0030BC: u'Optronic AG', + 0x0030BD: u'BELKIN COMPONENTS', + 0x0030BE: u'City-Net Technology, Inc.', + 0x0030BF: u'MULTIDATA GMBH', + 0x0030C0: u'Lara Technology, Inc.', + 0x0030C1: u'HEWLETT-PACKARD', + 0x0030C2: u'COMONE', + 0x0030C3: u'FLUECKIGER ELEKTRONIK AG', + 0x0030C4: u'Canon Imaging System Technologies, Inc.', + 0x0030C5: u'CADENCE DESIGN SYSTEMS', + 0x0030C6: u'CONTROL SOLUTIONS, INC.', + 0x0030C7: u'MACROMATE CORP.', + 0x0030C8: u'GAD LINE, LTD.', + 0x0030C9: u'LuxN, N', + 0x0030CA: u'Discovery Com', + 0x0030CB: u'OMNI FLOW COMPUTERS, INC.', + 0x0030CC: u'Tenor Networks, Inc.', + 0x0030CD: u'CONEXANT SYSTEMS, INC.', + 0x0030CE: u'Zaffire', + 0x0030CF: u'TWO TECHNOLOGIES, INC.', + 0x0030D0: u'Tellabs', + 0x0030D1: u'INOVA CORPORATION', + 0x0030D2: u'WIN TECHNOLOGIES, CO., LTD.', + 0x0030D3: u'Agilent Technologies', + 0x0030D4: u'AAE Systems, Inc', + 0x0030D5: u'DResearch GmbH', + 0x0030D6: u'MSC VERTRIEBS GMBH', + 0x0030D7: u'Innovative Systems, L.L.C.', + 0x0030D8: u'SITEK', + 0x0030D9: u'DATACORE SOFTWARE CORP.', + 0x0030DA: u'COMTREND CO.', + 0x0030DB: u'Mindready Solutions, Inc.', + 0x0030DC: u'RIGHTECH CORPORATION', + 0x0030DD: u'INDIGITA CORPORATION', + 0x0030DE: u'WAGO Kontakttechnik GmbH', + 0x0030DF: u'KB/TEL TELECOMUNICACIONES', + 0x0030E0: u'OXFORD SEMICONDUCTOR LTD.', + 0x0030E1: u'ACROTRON SYSTEMS, INC.', + 0x0030E2: u'GARNET SYSTEMS CO., LTD.', + 0x0030E3: u'SEDONA NETWORKS CORP.', + 0x0030E4: u'CHIYODA SYSTEM RIKEN', + 0x0030E5: u'Amper Datos S.A.', + 0x0030E6: u'Draeger Medical Systems, Inc.', + 0x0030E7: u'CNF MOBILE SOLUTIONS, INC.', + 0x0030E8: u'ENSIM CORP.', + 0x0030E9: u'GMA COMMUNICATION MANUFACT\'G', + 0x0030EA: u'TeraForce Technology Corporation', + 0x0030EB: u'TURBONET COMMUNICATIONS, INC.', + 0x0030EC: u'BORGARDT', + 0x0030ED: u'Expert Magnetics Corp.', + 0x0030EE: u'DSG Technology, Inc.', + 0x0030EF: u'NEON TECHNOLOGY, INC.', + 0x0030F0: u'Uniform Industrial Corp.', + 0x0030F1: u'Accton Technology Corp.', + 0x0030F2: u'CISCO SYSTEMS, INC.', + 0x0030F3: u'At Work Computers', + 0x0030F4: u'STARDOT TECHNOLOGIES', + 0x0030F5: u'Wild Lab. Ltd.', + 0x0030F6: u'SECURELOGIX CORPORATION', + 0x0030F7: u'RAMIX INC.', + 0x0030F8: u'Dynapro Systems, Inc.', + 0x0030F9: u'Sollae Systems Co., Ltd.', + 0x0030FA: u'TELICA, INC.', + 0x0030FB: u'AZS Technology AG', + 0x0030FC: u'Terawave Communications, Inc.', + 0x0030FD: u'INTEGRATED SYSTEMS DESIGN', + 0x0030FE: u'DSA GmbH', + 0x0030FF: u'DATAFAB SYSTEMS, INC.', + 0x004000: u'PCI COMPONENTES DA AMZONIA LTD', + 0x004001: u'ZYXEL COMMUNICATIONS, INC.', + 0x004002: u'PERLE SYSTEMS LIMITED', + 0x004003: u'Emerson Process Management Power & Water Solutions, Inc.', + 0x004004: u'ICM CO. LTD.', + 0x004005: u'ANI COMMUNICATIONS INC.', + 0x004006: u'SAMPO TECHNOLOGY CORPORATION', + 0x004007: u'TELMAT INFORMATIQUE', + 0x004008: u'A PLUS INFO CORPORATION', + 0x004009: u'TACHIBANA TECTRON CO., LTD.', + 0x00400A: u'PIVOTAL TECHNOLOGIES, INC.', + 0x00400B: u'CISCO SYSTEMS, INC.', + 0x00400C: u'GENERAL MICRO SYSTEMS, INC.', + 0x00400D: u'LANNET DATA COMMUNICATIONS,LTD', + 0x00400E: u'MEMOTEC COMMUNICATIONS, INC.', + 0x00400F: u'DATACOM TECHNOLOGIES', + 0x004010: u'SONIC SYSTEMS, INC.', + 0x004011: u'ANDOVER CONTROLS CORPORATION', + 0x004012: u'WINDATA, INC.', + 0x004013: u'NTT DATA COMM. SYSTEMS CORP.', + 0x004014: u'COMSOFT GMBH', + 0x004015: u'ASCOM INFRASYS AG', + 0x004016: u'HADAX ELECTRONICS, INC.', + 0x004017: u'Silex Technology America', + 0x004018: u'ADOBE SYSTEMS, INC.', + 0x004019: u'AEON SYSTEMS, INC.', + 0x00401A: u'FUJI ELECTRIC CO., LTD.', + 0x00401B: u'PRINTER SYSTEMS CORP.', + 0x00401C: u'AST RESEARCH, INC.', + 0x00401D: u'INVISIBLE SOFTWARE, INC.', + 0x00401E: u'ICC', + 0x00401F: u'COLORGRAPH LTD', + 0x004020: u'PINACL COMMUNICATION', + 0x004021: u'RASTER GRAPHICS', + 0x004022: u'KLEVER COMPUTERS, INC.', + 0x004023: u'LOGIC CORPORATION', + 0x004024: u'COMPAC INC.', + 0x004025: u'MOLECULAR DYNAMICS', + 0x004026: u'MELCO, INC.', + 0x004027: u'SMC MASSACHUSETTS, INC.', + 0x004028: u'NETCOMM LIMITED', + 0x004029: u'COMPEX', + 0x00402A: u'CANOGA-PERKINS', + 0x00402B: u'TRIGEM COMPUTER, INC.', + 0x00402C: u'ISIS DISTRIBUTED SYSTEMS, INC.', + 0x00402D: u'HARRIS ADACOM CORPORATION', + 0x00402E: u'PRECISION SOFTWARE, INC.', + 0x00402F: u'XLNT DESIGNS INC.', + 0x004030: u'GK COMPUTER', + 0x004031: u'KOKUSAI ELECTRIC CO., LTD', + 0x004032: u'DIGITAL COMMUNICATIONS', + 0x004033: u'ADDTRON TECHNOLOGY CO., LTD.', + 0x004034: u'BUSTEK CORPORATION', + 0x004035: u'OPCOM', + 0x004036: u'TRIBE COMPUTER WORKS, INC.', + 0x004037: u'SEA-ILAN, INC.', + 0x004038: u'TALENT ELECTRIC INCORPORATED', + 0x004039: u'OPTEC DAIICHI DENKO CO., LTD.', + 0x00403A: u'IMPACT TECHNOLOGIES', + 0x00403B: u'SYNERJET INTERNATIONAL CORP.', + 0x00403C: u'FORKS, INC.', + 0x00403D: u'TERADATA', + 0x00403E: u'RASTER OPS CORPORATION', + 0x00403F: u'SSANGYONG COMPUTER SYSTEMS', + 0x004040: u'RING ACCESS, INC.', + 0x004041: u'FUJIKURA LTD.', + 0x004042: u'N.A.T. GMBH', + 0x004043: u'NOKIA TELECOMMUNICATIONS', + 0x004044: u'QNIX COMPUTER CO., LTD.', + 0x004045: u'TWINHEAD CORPORATION', + 0x004046: u'UDC RESEARCH LIMITED', + 0x004047: u'WIND RIVER SYSTEMS', + 0x004048: u'SMD INFORMATICA S.A.', + 0x004049: u'TEGIMENTA AG', + 0x00404A: u'WEST AUSTRALIAN DEPARTMENT', + 0x00404B: u'MAPLE COMPUTER SYSTEMS', + 0x00404C: u'HYPERTEC PTY LTD.', + 0x00404D: u'TELECOMMUNICATIONS TECHNIQUES', + 0x00404E: u'FLUENT, INC.', + 0x00404F: u'SPACE & NAVAL WARFARE SYSTEMS', + 0x004050: u'IRONICS, INCORPORATED', + 0x004051: u'GRACILIS, INC.', + 0x004052: u'STAR TECHNOLOGIES, INC.', + 0x004053: u'AMPRO COMPUTERS', + 0x004054: u'CONNECTION MACHINES SERVICES', + 0x004055: u'METRONIX GMBH', + 0x004056: u'MCM JAPAN LTD.', + 0x004057: u'LOCKHEED - SANDERS', + 0x004058: u'KRONOS, INC.', + 0x004059: u'YOSHIDA KOGYO K. K.', + 0x00405A: u'GOLDSTAR INFORMATION & COMM.', + 0x00405B: u'FUNASSET LIMITED', + 0x00405C: u'FUTURE SYSTEMS, INC.', + 0x00405D: u'STAR-TEK, INC.', + 0x00405E: u'NORTH HILLS ISRAEL', + 0x00405F: u'AFE COMPUTERS LTD.', + 0x004060: u'COMENDEC LTD', + 0x004061: u'DATATECH ENTERPRISES CO., LTD.', + 0x004062: u'E-SYSTEMS, INC./GARLAND DIV.', + 0x004063: u'VIA TECHNOLOGIES, INC.', + 0x004064: u'KLA INSTRUMENTS CORPORATION', + 0x004065: u'GTE SPACENET', + 0x004066: u'HITACHI CABLE, LTD.', + 0x004067: u'OMNIBYTE CORPORATION', + 0x004068: u'EXTENDED SYSTEMS', + 0x004069: u'LEMCOM SYSTEMS, INC.', + 0x00406A: u'KENTEK INFORMATION SYSTEMS,INC', + 0x00406B: u'SYSGEN', + 0x00406C: u'COPERNIQUE', + 0x00406D: u'LANCO, INC.', + 0x00406E: u'COROLLARY, INC.', + 0x00406F: u'SYNC RESEARCH INC.', + 0x004070: u'INTERWARE CO., LTD.', + 0x004071: u'ATM COMPUTER GMBH', + 0x004072: u'Applied Innovation Inc.', + 0x004073: u'BASS ASSOCIATES', + 0x004074: u'CABLE AND WIRELESS', + 0x004075: u'M-TRADE (UK) LTD', + 0x004076: u'Sun Conversion Technologies', + 0x004077: u'MAXTON TECHNOLOGY CORPORATION', + 0x004078: u'WEARNES AUTOMATION PTE LTD', + 0x004079: u'JUKO MANUFACTURE COMPANY, LTD.', + 0x00407A: u'SOCIETE D\'EXPLOITATION DU CNIT', + 0x00407B: u'SCIENTIFIC ATLANTA', + 0x00407C: u'QUME CORPORATION', + 0x00407D: u'EXTENSION TECHNOLOGY CORP.', + 0x00407E: u'EVERGREEN SYSTEMS, INC.', + 0x00407F: u'FLIR Systems', + 0x004080: u'ATHENIX CORPORATION', + 0x004081: u'MANNESMANN SCANGRAPHIC GMBH', + 0x004082: u'LABORATORY EQUIPMENT CORP.', + 0x004083: u'TDA INDUSTRIA DE PRODUTOS', + 0x004084: u'HONEYWELL INC.', + 0x004085: u'SAAB INSTRUMENTS AB', + 0x004086: u'MICHELS & KLEBERHOFF COMPUTER', + 0x004087: u'UBITREX CORPORATION', + 0x004088: u'MOBIUS TECHNOLOGIES, INC.', + 0x004089: u'MEIDENSHA CORPORATION', + 0x00408A: u'TPS TELEPROCESSING SYS. GMBH', + 0x00408B: u'RAYLAN CORPORATION', + 0x00408C: u'AXIS COMMUNICATIONS AB', + 0x00408D: u'THE GOODYEAR TIRE & RUBBER CO.', + 0x00408E: u'DIGILOG, INC.', + 0x00408F: u'WM-DATA MINFO AB', + 0x004090: u'ANSEL COMMUNICATIONS', + 0x004091: u'PROCOMP INDUSTRIA ELETRONICA', + 0x004092: u'ASP COMPUTER PRODUCTS, INC.', + 0x004093: u'PAXDATA NETWORKS LTD.', + 0x004094: u'SHOGRAPHICS, INC.', + 0x004095: u'R.P.T. INTERGROUPS INT\'L LTD.', + 0x004096: u'Cisco Systems, Inc.', + 0x004097: u'DATEX DIVISION OF', + 0x004098: u'DRESSLER GMBH & CO.', + 0x004099: u'NEWGEN SYSTEMS CORP.', + 0x00409A: u'NETWORK EXPRESS, INC.', + 0x00409B: u'HAL COMPUTER SYSTEMS INC.', + 0x00409C: u'TRANSWARE', + 0x00409D: u'DIGIBOARD, INC.', + 0x00409E: u'CONCURRENT TECHNOLOGIES LTD.', + 0x00409F: u'LANCAST/CASAT TECHNOLOGY, INC.', + 0x0040A0: u'GOLDSTAR CO., LTD.', + 0x0040A1: u'ERGO COMPUTING', + 0x0040A2: u'KINGSTAR TECHNOLOGY INC.', + 0x0040A3: u'MICROUNITY SYSTEMS ENGINEERING', + 0x0040A4: u'ROSE ELECTRONICS', + 0x0040A5: u'CLINICOMP INTL.', + 0x0040A6: u'Cray, Inc.', + 0x0040A7: u'ITAUTEC PHILCO S.A.', + 0x0040A8: u'IMF INTERNATIONAL LTD.', + 0x0040A9: u'DATACOM INC.', + 0x0040AA: u'VALMET AUTOMATION INC.', + 0x0040AB: u'ROLAND DG CORPORATION', + 0x0040AC: u'SUPER WORKSTATION, INC.', + 0x0040AD: u'SMA REGELSYSTEME GMBH', + 0x0040AE: u'DELTA CONTROLS, INC.', + 0x0040AF: u'DIGITAL PRODUCTS, INC.', + 0x0040B0: u'BYTEX CORPORATION, ENGINEERING', + 0x0040B1: u'CODONICS INC.', + 0x0040B2: u'SYSTEMFORSCHUNG', + 0x0040B3: u'PAR MICROSYSTEMS CORPORATION', + 0x0040B4: u'NEXTCOM K.K.', + 0x0040B5: u'VIDEO TECHNOLOGY COMPUTERS LTD', + 0x0040B6: u'COMPUTERM CORPORATION', + 0x0040B7: u'STEALTH COMPUTER SYSTEMS', + 0x0040B8: u'IDEA ASSOCIATES', + 0x0040B9: u'MACQ ELECTRONIQUE SA', + 0x0040BA: u'ALLIANT COMPUTER SYSTEMS CORP.', + 0x0040BB: u'GOLDSTAR CABLE CO., LTD.', + 0x0040BC: u'ALGORITHMICS LTD.', + 0x0040BD: u'STARLIGHT NETWORKS, INC.', + 0x0040BE: u'BOEING DEFENSE & SPACE', + 0x0040BF: u'CHANNEL SYSTEMS INTERN\'L INC.', + 0x0040C0: u'VISTA CONTROLS CORPORATION', + 0x0040C1: u'BIZERBA-WERKE WILHEIM KRAUT', + 0x0040C2: u'APPLIED COMPUTING DEVICES', + 0x0040C3: u'FISCHER AND PORTER CO.', + 0x0040C4: u'KINKEI SYSTEM CORPORATION', + 0x0040C5: u'MICOM COMMUNICATIONS INC.', + 0x0040C6: u'FIBERNET RESEARCH, INC.', + 0x0040C7: u'RUBY TECH CORPORATION', + 0x0040C8: u'MILAN TECHNOLOGY CORPORATION', + 0x0040C9: u'NCUBE', + 0x0040CA: u'FIRST INTERNAT\'L COMPUTER, INC', + 0x0040CB: u'LANWAN TECHNOLOGIES', + 0x0040CC: u'SILCOM MANUF\'G TECHNOLOGY INC.', + 0x0040CD: u'TERA MICROSYSTEMS, INC.', + 0x0040CE: u'NET-SOURCE, INC.', + 0x0040CF: u'STRAWBERRY TREE, INC.', + 0x0040D0: u'MITAC INTERNATIONAL CORP.', + 0x0040D1: u'FUKUDA DENSHI CO., LTD.', + 0x0040D2: u'PAGINE CORPORATION', + 0x0040D3: u'KIMPSION INTERNATIONAL CORP.', + 0x0040D4: u'GAGE TALKER CORP.', + 0x0040D5: u'SARTORIUS AG', + 0x0040D6: u'LOCAMATION B.V.', + 0x0040D7: u'STUDIO GEN INC.', + 0x0040D8: u'OCEAN OFFICE AUTOMATION LTD.', + 0x0040D9: u'AMERICAN MEGATRENDS INC.', + 0x0040DA: u'TELSPEC LTD', + 0x0040DB: u'ADVANCED TECHNICAL SOLUTIONS', + 0x0040DC: u'TRITEC ELECTRONIC GMBH', + 0x0040DD: u'HONG TECHNOLOGIES', + 0x0040DE: u'ELETTRONICA SAN GIORGIO', + 0x0040DF: u'DIGALOG SYSTEMS, INC.', + 0x0040E0: u'ATOMWIDE LTD.', + 0x0040E1: u'MARNER INTERNATIONAL, INC.', + 0x0040E2: u'MESA RIDGE TECHNOLOGIES, INC.', + 0x0040E3: u'QUIN SYSTEMS LTD', + 0x0040E4: u'E-M TECHNOLOGY, INC.', + 0x0040E5: u'SYBUS CORPORATION', + 0x0040E6: u'C.A.E.N.', + 0x0040E7: u'ARNOS INSTRUMENTS & COMPUTER', + 0x0040E8: u'CHARLES RIVER DATA SYSTEMS,INC', + 0x0040E9: u'ACCORD SYSTEMS, INC.', + 0x0040EA: u'PLAIN TREE SYSTEMS INC', + 0x0040EB: u'MARTIN MARIETTA CORPORATION', + 0x0040EC: u'MIKASA SYSTEM ENGINEERING', + 0x0040ED: u'NETWORK CONTROLS INT\'NATL INC.', + 0x0040EE: u'OPTIMEM', + 0x0040EF: u'HYPERCOM, INC.', + 0x0040F0: u'MICRO SYSTEMS, INC.', + 0x0040F1: u'CHUO ELECTRONICS CO., LTD.', + 0x0040F2: u'JANICH & KLASS COMPUTERTECHNIK', + 0x0040F3: u'NETCOR', + 0x0040F4: u'CAMEO COMMUNICATIONS, INC.', + 0x0040F5: u'OEM ENGINES', + 0x0040F6: u'KATRON COMPUTERS INC.', + 0x0040F7: u'POLAROID MEDICAL IMAGING SYS.', + 0x0040F8: u'SYSTEMHAUS DISCOM', + 0x0040F9: u'COMBINET', + 0x0040FA: u'MICROBOARDS, INC.', + 0x0040FB: u'CASCADE COMMUNICATIONS CORP.', + 0x0040FC: u'IBR COMPUTER TECHNIK GMBH', + 0x0040FD: u'LXE', + 0x0040FE: u'SYMPLEX COMMUNICATIONS', + 0x0040FF: u'TELEBIT CORPORATION', + 0x004252: u'RLX Technologies', + 0x004501: u'Versus Technology, Inc.', + 0x005000: u'NEXO COMMUNICATIONS, INC.', + 0x005001: u'YAMASHITA SYSTEMS CORP.', + 0x005002: u'OMNISEC AG', + 0x005003: u'GRETAG MACBETH AG', + 0x005004: u'3COM CORPORATION', + 0x005006: u'TAC AB', + 0x005007: u'SIEMENS TELECOMMUNICATION SYSTEMS LIMITED', + 0x005008: u'TIVA MICROCOMPUTER CORP. (TMC)', + 0x005009: u'PHILIPS BROADBAND NETWORKS', + 0x00500A: u'IRIS TECHNOLOGIES, INC.', + 0x00500B: u'CISCO SYSTEMS, INC.', + 0x00500C: u'e-Tek Labs, Inc.', + 0x00500D: u'SATORI ELECTORIC CO., LTD.', + 0x00500E: u'CHROMATIS NETWORKS, INC.', + 0x00500F: u'CISCO SYSTEMS, INC.', + 0x005010: u'NovaNET Learning, Inc.', + 0x005012: u'CBL - GMBH', + 0x005013: u'Chaparral Network Storage', + 0x005014: u'CISCO SYSTEMS, INC.', + 0x005015: u'BRIGHT STAR ENGINEERING', + 0x005016: u'SST/WOODHEAD INDUSTRIES', + 0x005017: u'RSR S.R.L.', + 0x005018: u'AMIT, Inc.', + 0x005019: u'SPRING TIDE NETWORKS, INC.', + 0x00501A: u'UISIQN', + 0x00501B: u'ABL CANADA, INC.', + 0x00501C: u'JATOM SYSTEMS, INC.', + 0x00501E: u'Miranda Technologies, Inc.', + 0x00501F: u'MRG SYSTEMS, LTD.', + 0x005020: u'MEDIASTAR CO., LTD.', + 0x005021: u'EIS INTERNATIONAL, INC.', + 0x005022: u'ZONET TECHNOLOGY, INC.', + 0x005023: u'PG DESIGN ELECTRONICS, INC.', + 0x005024: u'NAVIC SYSTEMS, INC.', + 0x005026: u'COSYSTEMS, INC.', + 0x005027: u'GENICOM CORPORATION', + 0x005028: u'AVAL COMMUNICATIONS', + 0x005029: u'1394 PRINTER WORKING GROUP', + 0x00502A: u'CISCO SYSTEMS, INC.', + 0x00502B: u'GENRAD LTD.', + 0x00502C: u'SOYO COMPUTER, INC.', + 0x00502D: u'ACCEL, INC.', + 0x00502E: u'CAMBEX CORPORATION', + 0x00502F: u'TollBridge Technologies, Inc.', + 0x005030: u'FUTURE PLUS SYSTEMS', + 0x005031: u'AEROFLEX LABORATORIES, INC.', + 0x005032: u'PICAZO COMMUNICATIONS, INC.', + 0x005033: u'MAYAN NETWORKS', + 0x005036: u'NETCAM, LTD.', + 0x005037: u'KOGA ELECTRONICS CO.', + 0x005038: u'DAIN TELECOM CO., LTD.', + 0x005039: u'MARINER NETWORKS', + 0x00503A: u'DATONG ELECTRONICS LTD.', + 0x00503B: u'MEDIAFIRE CORPORATION', + 0x00503C: u'TSINGHUA NOVEL ELECTRONICS', + 0x00503E: u'CISCO SYSTEMS, INC.', + 0x00503F: u'ANCHOR GAMES', + 0x005040: u'Matsushita Electric Works, Ltd.', + 0x005041: u'Coretronic Corporation', + 0x005042: u'SCI MANUFACTURING SINGAPORE PTE, LTD.', + 0x005043: u'MARVELL SEMICONDUCTOR, INC.', + 0x005044: u'ASACA CORPORATION', + 0x005045: u'RIOWORKS SOLUTIONS, INC.', + 0x005046: u'MENICX INTERNATIONAL CO., LTD.', + 0x005047: u'PRIVATE', + 0x005048: u'INFOLIBRIA', + 0x005049: u'ELLACOYA NETWORKS, INC.', + 0x00504A: u'ELTECO A.S.', + 0x00504B: u'BARCONET N.V.', + 0x00504C: u'GALIL MOTION CONTROL, INC.', + 0x00504D: u'TOKYO ELECTRON DEVICE LTD.', + 0x00504E: u'SIERRA MONITOR CORP.', + 0x00504F: u'OLENCOM ELECTRONICS', + 0x005050: u'CISCO SYSTEMS, INC.', + 0x005051: u'IWATSU ELECTRIC CO., LTD.', + 0x005052: u'TIARA NETWORKS, INC.', + 0x005053: u'CISCO SYSTEMS, INC.', + 0x005054: u'CISCO SYSTEMS, INC.', + 0x005055: u'DOMS A/S', + 0x005056: u'VMWare, Inc.', + 0x005057: u'BROADBAND ACCESS SYSTEMS', + 0x005058: u'VegaStream Limted', + 0x005059: u'iBAHN', + 0x00505A: u'NETWORK ALCHEMY, INC.', + 0x00505B: u'KAWASAKI LSI U.S.A., INC.', + 0x00505C: u'TUNDO CORPORATION', + 0x00505E: u'DIGITEK MICROLOGIC S.A.', + 0x00505F: u'BRAND INNOVATORS', + 0x005060: u'TANDBERG TELECOM AS', + 0x005062: u'KOUWELL ELECTRONICS CORP. **', + 0x005063: u'OY COMSEL SYSTEM AB', + 0x005064: u'CAE ELECTRONICS', + 0x005065: u'DENSEI-LAMBAD Co., Ltd.', + 0x005066: u'AtecoM GmbH advanced telecomunication modules', + 0x005067: u'AEROCOMM, INC.', + 0x005068: u'ELECTRONIC INDUSTRIES ASSOCIATION', + 0x005069: u'PixStream Incorporated', + 0x00506A: u'EDEVA, INC.', + 0x00506B: u'SPX-ATEG', + 0x00506C: u'G & L BEIJER ELECTRONICS AB', + 0x00506D: u'VIDEOJET SYSTEMS', + 0x00506E: u'CORDER ENGINEERING CORPORATION', + 0x00506F: u'G-CONNECT', + 0x005070: u'CHAINTECH COMPUTER CO., LTD.', + 0x005071: u'AIWA CO., LTD.', + 0x005072: u'CORVIS CORPORATION', + 0x005073: u'CISCO SYSTEMS, INC.', + 0x005074: u'ADVANCED HI-TECH CORP.', + 0x005075: u'KESTREL SOLUTIONS', + 0x005076: u'IBM', + 0x005077: u'PROLIFIC TECHNOLOGY, INC.', + 0x005078: u'MEGATON HOUSE, LTD.', + 0x005079: u'PRIVATE', + 0x00507A: u'XPEED, INC.', + 0x00507B: u'MERLOT COMMUNICATIONS', + 0x00507C: u'VIDEOCON AG', + 0x00507D: u'IFP', + 0x00507E: u'NEWER TECHNOLOGY', + 0x00507F: u'DrayTek Corp.', + 0x005080: u'CISCO SYSTEMS, INC.', + 0x005081: u'MURATA MACHINERY, LTD.', + 0x005082: u'FORESSON CORPORATION', + 0x005083: u'GILBARCO, INC.', + 0x005084: u'ATL PRODUCTS', + 0x005086: u'TELKOM SA, LTD.', + 0x005087: u'TERASAKI ELECTRIC CO., LTD.', + 0x005088: u'AMANO CORPORATION', + 0x005089: u'SAFETY MANAGEMENT SYSTEMS', + 0x00508B: u'COMPAQ COMPUTER CORPORATION', + 0x00508C: u'RSI SYSTEMS', + 0x00508D: u'ABIT COMPUTER CORPORATION', + 0x00508E: u'OPTIMATION, INC.', + 0x00508F: u'ASITA TECHNOLOGIES INT\'L LTD.', + 0x005090: u'DCTRI', + 0x005091: u'NETACCESS, INC.', + 0x005092: u'RIGAKU INDUSTRIAL CORPORATION', + 0x005093: u'BOEING', + 0x005094: u'PACE MICRO TECHNOLOGY PLC', + 0x005095: u'PERACOM NETWORKS', + 0x005096: u'SALIX TECHNOLOGIES, INC.', + 0x005097: u'MMC-EMBEDDED COMPUTERTECHNIK GmbH', + 0x005098: u'GLOBALOOP, LTD.', + 0x005099: u'3COM EUROPE, LTD.', + 0x00509A: u'TAG ELECTRONIC SYSTEMS', + 0x00509B: u'SWITCHCORE AB', + 0x00509C: u'BETA RESEARCH', + 0x00509D: u'THE INDUSTREE B.V.', + 0x00509E: u'Les Technologies SoftAcoustik Inc.', + 0x00509F: u'HORIZON COMPUTER', + 0x0050A0: u'DELTA COMPUTER SYSTEMS, INC.', + 0x0050A1: u'CARLO GAVAZZI, INC.', + 0x0050A2: u'CISCO SYSTEMS, INC.', + 0x0050A3: u'TransMedia Communications, Inc.', + 0x0050A4: u'IO TECH, INC.', + 0x0050A5: u'CAPITOL BUSINESS SYSTEMS, LTD.', + 0x0050A6: u'OPTRONICS', + 0x0050A7: u'CISCO SYSTEMS, INC.', + 0x0050A8: u'OpenCon Systems, Inc.', + 0x0050A9: u'MOLDAT WIRELESS TECHNOLGIES', + 0x0050AA: u'KONICA MINOLTA HOLDINGS, INC.', + 0x0050AB: u'NALTEC, INC.', + 0x0050AC: u'MAPLE COMPUTER CORPORATION', + 0x0050AD: u'CommUnique Wireless Corp.', + 0x0050AE: u'IWAKI ELECTRONICS CO., LTD.', + 0x0050AF: u'INTERGON, INC.', + 0x0050B0: u'TECHNOLOGY ATLANTA CORPORATION', + 0x0050B1: u'GIDDINGS & LEWIS', + 0x0050B2: u'BRODEL AUTOMATION', + 0x0050B3: u'VOICEBOARD CORPORATION', + 0x0050B4: u'SATCHWELL CONTROL SYSTEMS, LTD', + 0x0050B5: u'FICHET-BAUCHE', + 0x0050B6: u'GOOD WAY IND. CO., LTD.', + 0x0050B7: u'BOSER TECHNOLOGY CO., LTD.', + 0x0050B8: u'INOVA COMPUTERS GMBH & CO. KG', + 0x0050B9: u'XITRON TECHNOLOGIES, INC.', + 0x0050BA: u'D-LINK', + 0x0050BB: u'CMS TECHNOLOGIES', + 0x0050BC: u'HAMMER STORAGE SOLUTIONS', + 0x0050BD: u'CISCO SYSTEMS, INC.', + 0x0050BE: u'FAST MULTIMEDIA AG', + 0x0050BF: u'MOTOTECH INC.', + 0x0050C0: u'GATAN, INC.', + 0x0050C1: u'GEMFLEX NETWORKS, LTD.', + 0x0050C2: u'IEEE REGISTRATION AUTHORITY', + 0x0050C4: u'IMD', + 0x0050C5: u'ADS TECHNOLOGIES, INC.', + 0x0050C6: u'LOOP TELECOMMUNICATION INTERNATIONAL, INC.', + 0x0050C8: u'ADDONICS COMMUNICATIONS, INC.', + 0x0050C9: u'MASPRO DENKOH CORP.', + 0x0050CA: u'NET TO NET TECHNOLOGIES', + 0x0050CB: u'JETTER', + 0x0050CC: u'XYRATEX', + 0x0050CD: u'DIGIANSWER A/S', + 0x0050CE: u'LG INTERNATIONAL CORP.', + 0x0050CF: u'VANLINK COMMUNICATION TECHNOLOGY RESEARCH INSTITUTE', + 0x0050D0: u'MINERVA SYSTEMS', + 0x0050D1: u'CISCO SYSTEMS, INC.', + 0x0050D2: u'CMC Electronics Inc', + 0x0050D3: u'DIGITAL AUDIO PROCESSING PTY. LTD.', + 0x0050D4: u'JOOHONG INFORMATION &', + 0x0050D5: u'AD SYSTEMS CORP.', + 0x0050D6: u'ATLAS COPCO TOOLS AB', + 0x0050D7: u'TELSTRAT', + 0x0050D8: u'UNICORN COMPUTER CORP.', + 0x0050D9: u'ENGETRON-ENGENHARIA ELETRONICA IND. e COM. LTDA', + 0x0050DA: u'3COM CORPORATION', + 0x0050DB: u'CONTEMPORARY CONTROL', + 0x0050DC: u'TAS TELEFONBAU A. SCHWABE GMBH & CO. KG', + 0x0050DD: u'SERRA SOLDADURA, S.A.', + 0x0050DE: u'SIGNUM SYSTEMS CORP.', + 0x0050DF: u'AirFiber, Inc.', + 0x0050E1: u'NS TECH ELECTRONICS SDN BHD', + 0x0050E2: u'CISCO SYSTEMS, INC.', + 0x0050E3: u'Terayon Communications Systems', + 0x0050E4: u'APPLE COMPUTER, INC.', + 0x0050E6: u'HAKUSAN CORPORATION', + 0x0050E7: u'PARADISE INNOVATIONS (ASIA)', + 0x0050E8: u'NOMADIX INC.', + 0x0050EA: u'XEL COMMUNICATIONS, INC.', + 0x0050EB: u'ALPHA-TOP CORPORATION', + 0x0050EC: u'OLICOM A/S', + 0x0050ED: u'ANDA NETWORKS', + 0x0050EE: u'TEK DIGITEL CORPORATION', + 0x0050EF: u'SPE Systemhaus GmbH', + 0x0050F0: u'CISCO SYSTEMS, INC.', + 0x0050F1: u'LIBIT SIGNAL PROCESSING, LTD.', + 0x0050F2: u'MICROSOFT CORP.', + 0x0050F3: u'GLOBAL NET INFORMATION CO., Ltd.', + 0x0050F4: u'SIGMATEK GMBH & CO. KG', + 0x0050F6: u'PAN-INTERNATIONAL INDUSTRIAL CORP.', + 0x0050F7: u'VENTURE MANUFACTURING (SINGAPORE) LTD.', + 0x0050F8: u'ENTREGA TECHNOLOGIES, INC.', + 0x0050F9: u'SENSORMATIC ACD', + 0x0050FA: u'OXTEL, LTD.', + 0x0050FB: u'VSK ELECTRONICS', + 0x0050FC: u'EDIMAX TECHNOLOGY CO., LTD.', + 0x0050FD: u'VISIONCOMM CO., LTD.', + 0x0050FE: u'PCTVnet ASA', + 0x0050FF: u'HAKKO ELECTRONICS CO., LTD.', + 0x006000: u'XYCOM INC.', + 0x006001: u'InnoSys, Inc.', + 0x006002: u'SCREEN SUBTITLING SYSTEMS, LTD', + 0x006003: u'TERAOKA WEIGH SYSTEM PTE, LTD.', + 0x006004: u'COMPUTADORES MODULARES SA', + 0x006005: u'FEEDBACK DATA LTD.', + 0x006006: u'SOTEC CO., LTD', + 0x006007: u'ACRES GAMING, INC.', + 0x006008: u'3COM CORPORATION', + 0x006009: u'CISCO SYSTEMS, INC.', + 0x00600A: u'SORD COMPUTER CORPORATION', + 0x00600B: u'LOGWARE GmbH', + 0x00600C: u'APPLIED DATA SYSTEMS, INC.', + 0x00600D: u'Digital Logic GmbH', + 0x00600E: u'WAVENET INTERNATIONAL, INC.', + 0x00600F: u'WESTELL, INC.', + 0x006010: u'NETWORK MACHINES, INC.', + 0x006011: u'CRYSTAL SEMICONDUCTOR CORP.', + 0x006012: u'POWER COMPUTING CORPORATION', + 0x006013: u'NETSTAL MASCHINEN AG', + 0x006014: u'EDEC CO., LTD.', + 0x006015: u'NET2NET CORPORATION', + 0x006016: u'CLARIION', + 0x006017: u'TOKIMEC INC.', + 0x006018: u'STELLAR ONE CORPORATION', + 0x006019: u'Roche Diagnostics', + 0x00601A: u'KEITHLEY INSTRUMENTS', + 0x00601B: u'MESA ELECTRONICS', + 0x00601C: u'TELXON CORPORATION', + 0x00601D: u'LUCENT TECHNOLOGIES', + 0x00601E: u'SOFTLAB, INC.', + 0x00601F: u'STALLION TECHNOLOGIES', + 0x006020: u'PIVOTAL NETWORKING, INC.', + 0x006021: u'DSC CORPORATION', + 0x006022: u'VICOM SYSTEMS, INC.', + 0x006023: u'PERICOM SEMICONDUCTOR CORP.', + 0x006024: u'GRADIENT TECHNOLOGIES, INC.', + 0x006025: u'ACTIVE IMAGING PLC', + 0x006026: u'VIKING COMPONENTS, INC.', + 0x006027: u'Superior Modular Products', + 0x006028: u'MACROVISION CORPORATION', + 0x006029: u'CARY PERIPHERALS INC.', + 0x00602A: u'SYMICRON COMPUTER COMMUNICATIONS, LTD.', + 0x00602B: u'PEAK AUDIO', + 0x00602C: u'LINX Data Terminals, Inc.', + 0x00602D: u'ALERTON TECHNOLOGIES, INC.', + 0x00602E: u'CYCLADES CORPORATION', + 0x00602F: u'CISCO SYSTEMS, INC.', + 0x006030: u'VILLAGE TRONIC ENTWICKLUNG', + 0x006031: u'HRK SYSTEMS', + 0x006032: u'I-CUBE, INC.', + 0x006033: u'ACUITY IMAGING, INC.', + 0x006034: u'ROBERT BOSCH GmbH', + 0x006035: u'DALLAS SEMICONDUCTOR, INC.', + 0x006036: u'AUSTRIAN RESEARCH CENTER SEIBERSDORF', + 0x006037: u'NXP Semiconductors', + 0x006038: u'Nortel Networks', + 0x006039: u'SanCom Technology, Inc.', + 0x00603A: u'QUICK CONTROLS LTD.', + 0x00603B: u'AMTEC spa', + 0x00603C: u'HAGIWARA SYS-COM CO., LTD.', + 0x00603D: u'3CX', + 0x00603E: u'CISCO SYSTEMS, INC.', + 0x00603F: u'PATAPSCO DESIGNS', + 0x006040: u'NETRO CORP.', + 0x006041: u'Yokogawa Electric Corporation', + 0x006042: u'TKS (USA), INC.', + 0x006043: u'ComSoft Systems, Inc.', + 0x006044: u'LITTON/POLY-SCIENTIFIC', + 0x006045: u'PATHLIGHT TECHNOLOGIES', + 0x006046: u'VMETRO, INC.', + 0x006047: u'CISCO SYSTEMS, INC.', + 0x006048: u'EMC CORPORATION', + 0x006049: u'VINA TECHNOLOGIES', + 0x00604A: u'SAIC IDEAS GROUP', + 0x00604B: u'Safe-com GmbH & Co. KG', + 0x00604C: u'SAGEM SA', + 0x00604D: u'MMC NETWORKS, INC.', + 0x00604E: u'CYCLE COMPUTER CORPORATION, INC.', + 0x00604F: u'SUZUKI MFG. CO., LTD.', + 0x006050: u'INTERNIX INC.', + 0x006051: u'QUALITY SEMICONDUCTOR', + 0x006052: u'PERIPHERALS ENTERPRISE CO., Ltd.', + 0x006053: u'TOYODA MACHINE WORKS, LTD.', + 0x006054: u'CONTROLWARE GMBH', + 0x006055: u'CORNELL UNIVERSITY', + 0x006056: u'NETWORK TOOLS, INC.', + 0x006057: u'MURATA MANUFACTURING CO., LTD.', + 0x006058: u'COPPER MOUNTAIN COMMUNICATIONS, INC.', + 0x006059: u'TECHNICAL COMMUNICATIONS CORP.', + 0x00605A: u'CELCORE, INC.', + 0x00605B: u'IntraServer Technology, Inc.', + 0x00605C: u'CISCO SYSTEMS, INC.', + 0x00605D: u'SCANIVALVE CORP.', + 0x00605E: u'LIBERTY TECHNOLOGY NETWORKING', + 0x00605F: u'NIPPON UNISOFT CORPORATION', + 0x006060: u'DAWNING TECHNOLOGIES, INC.', + 0x006061: u'WHISTLE COMMUNICATIONS CORP.', + 0x006062: u'TELESYNC, INC.', + 0x006063: u'PSION DACOM PLC.', + 0x006064: u'NETCOMM LIMITED', + 0x006065: u'BERNECKER & RAINER INDUSTRIE-ELEKTRONIC GmbH', + 0x006066: u'LACROIX TECHNOLGIE', + 0x006067: u'ACER NETXUS INC.', + 0x006068: u'EICON TECHNOLOGY CORPORATION', + 0x006069: u'BROCADE COMMUNICATIONS SYSTEMS, Inc.', + 0x00606A: u'MITSUBISHI WIRELESS COMMUNICATIONS. INC.', + 0x00606B: u'Synclayer Inc.', + 0x00606C: u'ARESCOM', + 0x00606D: u'DIGITAL EQUIPMENT CORP.', + 0x00606E: u'DAVICOM SEMICONDUCTOR, INC.', + 0x00606F: u'CLARION CORPORATION OF AMERICA', + 0x006070: u'CISCO SYSTEMS, INC.', + 0x006071: u'MIDAS LAB, INC.', + 0x006072: u'VXL INSTRUMENTS, LIMITED', + 0x006073: u'REDCREEK COMMUNICATIONS, INC.', + 0x006074: u'QSC AUDIO PRODUCTS', + 0x006075: u'PENTEK, INC.', + 0x006076: u'SCHLUMBERGER TECHNOLOGIES RETAIL PETROLEUM SYSTEMS', + 0x006077: u'PRISA NETWORKS', + 0x006078: u'POWER MEASUREMENT LTD.', + 0x006079: u'Mainstream Data, Inc.', + 0x00607A: u'DVS GmbH', + 0x00607B: u'FORE SYSTEMS, INC.', + 0x00607C: u'WaveAccess, Ltd.', + 0x00607D: u'SENTIENT NETWORKS INC.', + 0x00607E: u'GIGALABS, INC.', + 0x00607F: u'AURORA TECHNOLOGIES, INC.', + 0x006080: u'MICROTRONIX DATACOM LTD.', + 0x006081: u'TV/COM INTERNATIONAL', + 0x006082: u'NOVALINK TECHNOLOGIES, INC.', + 0x006083: u'CISCO SYSTEMS, INC.', + 0x006084: u'DIGITAL VIDEO', + 0x006085: u'Storage Concepts', + 0x006086: u'LOGIC REPLACEMENT TECH. LTD.', + 0x006087: u'KANSAI ELECTRIC CO., LTD.', + 0x006088: u'WHITE MOUNTAIN DSP, INC.', + 0x006089: u'XATA', + 0x00608A: u'CITADEL COMPUTER', + 0x00608B: u'ConferTech International', + 0x00608C: u'3COM CORPORATION', + 0x00608D: u'UNIPULSE CORP.', + 0x00608E: u'HE ELECTRONICS, TECHNOLOGIE & SYSTEMTECHNIK GmbH', + 0x00608F: u'TEKRAM TECHNOLOGY CO., LTD.', + 0x006090: u'ABLE COMMUNICATIONS, INC.', + 0x006091: u'FIRST PACIFIC NETWORKS, INC.', + 0x006092: u'MICRO/SYS, INC.', + 0x006093: u'VARIAN', + 0x006094: u'IBM CORP.', + 0x006095: u'ACCU-TIME SYSTEMS, INC.', + 0x006096: u'T.S. MICROTECH INC.', + 0x006097: u'3COM CORPORATION', + 0x006098: u'HT COMMUNICATIONS', + 0x006099: u'SBE, Inc.', + 0x00609A: u'NJK TECHNO CO.', + 0x00609B: u'ASTRO-MED, INC.', + 0x00609C: u'Perkin-Elmer Incorporated', + 0x00609D: u'PMI FOOD EQUIPMENT GROUP', + 0x00609E: u'ASC X3 - INFORMATION TECHNOLOGY STANDARDS SECRETARIATS', + 0x00609F: u'PHAST CORPORATION', + 0x0060A0: u'SWITCHED NETWORK TECHNOLOGIES, INC.', + 0x0060A1: u'VPNet, Inc.', + 0x0060A2: u'NIHON UNISYS LIMITED CO.', + 0x0060A3: u'CONTINUUM TECHNOLOGY CORP.', + 0x0060A4: u'GRINAKER SYSTEM TECHNOLOGIES', + 0x0060A5: u'PERFORMANCE TELECOM CORP.', + 0x0060A6: u'PARTICLE MEASURING SYSTEMS', + 0x0060A7: u'MICROSENS GmbH & CO. KG', + 0x0060A8: u'TIDOMAT AB', + 0x0060A9: u'GESYTEC MbH', + 0x0060AA: u'INTELLIGENT DEVICES INC. (IDI)', + 0x0060AB: u'LARSCOM INCORPORATED', + 0x0060AC: u'RESILIENCE CORPORATION', + 0x0060AD: u'MegaChips Corporation', + 0x0060AE: u'TRIO INFORMATION SYSTEMS AB', + 0x0060AF: u'PACIFIC MICRO DATA, INC.', + 0x0060B0: u'HEWLETT-PACKARD CO.', + 0x0060B1: u'INPUT/OUTPUT, INC.', + 0x0060B2: u'PROCESS CONTROL CORP.', + 0x0060B3: u'Z-COM, INC.', + 0x0060B4: u'GLENAYRE R&D INC.', + 0x0060B5: u'KEBA GmbH', + 0x0060B6: u'LAND COMPUTER CO., LTD.', + 0x0060B7: u'CHANNELMATIC, INC.', + 0x0060B8: u'CORELIS INC.', + 0x0060B9: u'NITSUKO CORPORATION', + 0x0060BA: u'SAHARA NETWORKS, INC.', + 0x0060BB: u'CABLETRON - NETLINK, INC.', + 0x0060BC: u'KeunYoung Electronics & Communication Co., Ltd.', + 0x0060BD: u'HUBBELL-PULSECOM', + 0x0060BE: u'WEBTRONICS', + 0x0060BF: u'MACRAIGOR SYSTEMS, INC.', + 0x0060C0: u'NERA AS', + 0x0060C1: u'WaveSpan Corporation', + 0x0060C2: u'MPL AG', + 0x0060C3: u'NETVISION CORPORATION', + 0x0060C4: u'SOLITON SYSTEMS K.K.', + 0x0060C5: u'ANCOT CORP.', + 0x0060C6: u'DCS AG', + 0x0060C7: u'AMATI COMMUNICATIONS CORP.', + 0x0060C8: u'KUKA WELDING SYSTEMS & ROBOTS', + 0x0060C9: u'ControlNet, Inc.', + 0x0060CA: u'HARMONIC SYSTEMS INCORPORATED', + 0x0060CB: u'HITACHI ZOSEN CORPORATION', + 0x0060CC: u'EMTRAK, INCORPORATED', + 0x0060CD: u'VideoServer, Inc.', + 0x0060CE: u'ACCLAIM COMMUNICATIONS', + 0x0060CF: u'ALTEON NETWORKS, INC.', + 0x0060D0: u'SNMP RESEARCH INCORPORATED', + 0x0060D1: u'CASCADE COMMUNICATIONS', + 0x0060D2: u'LUCENT TECHNOLOGIES TAIWAN TELECOMMUNICATIONS CO., LTD.', + 0x0060D3: u'AT&T', + 0x0060D4: u'ELDAT COMMUNICATION LTD.', + 0x0060D5: u'MIYACHI TECHNOS CORP.', + 0x0060D6: u'NovAtel Wireless Technologies Ltd.', + 0x0060D7: u'ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (EPFL)', + 0x0060D8: u'ELMIC SYSTEMS, INC.', + 0x0060D9: u'TRANSYS NETWORKS INC.', + 0x0060DA: u'JBM ELECTRONICS CO.', + 0x0060DB: u'NTP ELEKTRONIK A/S', + 0x0060DC: u'Toyo Network Systems Co, Ltd.', + 0x0060DD: u'MYRICOM, INC.', + 0x0060DE: u'KAYSER-THREDE GmbH', + 0x0060DF: u'CNT Corporation', + 0x0060E0: u'AXIOM TECHNOLOGY CO., LTD.', + 0x0060E1: u'ORCKIT COMMUNICATIONS LTD.', + 0x0060E2: u'QUEST ENGINEERING & DEVELOPMENT', + 0x0060E3: u'ARBIN INSTRUMENTS', + 0x0060E4: u'COMPUSERVE, INC.', + 0x0060E5: u'FUJI AUTOMATION CO., LTD.', + 0x0060E6: u'SHOMITI SYSTEMS INCORPORATED', + 0x0060E7: u'RANDATA', + 0x0060E8: u'HITACHI COMPUTER PRODUCTS (AMERICA), INC.', + 0x0060E9: u'ATOP TECHNOLOGIES, INC.', + 0x0060EA: u'StreamLogic', + 0x0060EB: u'FOURTHTRACK SYSTEMS', + 0x0060EC: u'HERMARY OPTO ELECTRONICS INC.', + 0x0060ED: u'RICARDO TEST AUTOMATION LTD.', + 0x0060EE: u'APOLLO', + 0x0060EF: u'FLYTECH TECHNOLOGY CO., LTD.', + 0x0060F0: u'JOHNSON & JOHNSON MEDICAL, INC', + 0x0060F1: u'EXP COMPUTER, INC.', + 0x0060F2: u'LASERGRAPHICS, INC.', + 0x0060F3: u'Performance Analysis Broadband, Spirent plc', + 0x0060F4: u'ADVANCED COMPUTER SOLUTIONS, Inc.', + 0x0060F5: u'ICON WEST, INC.', + 0x0060F6: u'NEXTEST COMMUNICATIONS PRODUCTS, INC.', + 0x0060F7: u'DATAFUSION SYSTEMS', + 0x0060F8: u'Loran International Technologies Inc.', + 0x0060F9: u'DIAMOND LANE COMMUNICATIONS', + 0x0060FA: u'EDUCATIONAL TECHNOLOGY RESOURCES, INC.', + 0x0060FB: u'PACKETEER, INC.', + 0x0060FC: u'CONSERVATION THROUGH INNOVATION LTD.', + 0x0060FD: u'NetICs, Inc.', + 0x0060FE: u'LYNX SYSTEM DEVELOPERS, INC.', + 0x0060FF: u'QuVis, Inc.', + 0x0070B0: u'M/A-COM INC. COMPANIES', + 0x0070B3: u'DATA RECALL LTD.', + 0x008000: u'MULTITECH SYSTEMS, INC.', + 0x008001: u'PERIPHONICS CORPORATION', + 0x008002: u'SATELCOM (UK) LTD', + 0x008003: u'HYTEC ELECTRONICS LTD.', + 0x008004: u'ANTLOW COMMUNICATIONS, LTD.', + 0x008005: u'CACTUS COMPUTER INC.', + 0x008006: u'COMPUADD CORPORATION', + 0x008007: u'DLOG NC-SYSTEME', + 0x008008: u'DYNATECH COMPUTER SYSTEMS', + 0x008009: u'JUPITER SYSTEMS, INC.', + 0x00800A: u'JAPAN COMPUTER CORP.', + 0x00800B: u'CSK CORPORATION', + 0x00800C: u'VIDECOM LIMITED', + 0x00800D: u'VOSSWINKEL F.U.', + 0x00800E: u'ATLANTIX CORPORATION', + 0x00800F: u'STANDARD MICROSYSTEMS', + 0x008010: u'COMMODORE INTERNATIONAL', + 0x008011: u'DIGITAL SYSTEMS INT\'L. INC.', + 0x008012: u'INTEGRATED MEASUREMENT SYSTEMS', + 0x008013: u'THOMAS-CONRAD CORPORATION', + 0x008014: u'ESPRIT SYSTEMS', + 0x008015: u'SEIKO SYSTEMS, INC.', + 0x008016: u'WANDEL AND GOLTERMANN', + 0x008017: u'PFU LIMITED', + 0x008018: u'KOBE STEEL, LTD.', + 0x008019: u'DAYNA COMMUNICATIONS, INC.', + 0x00801A: u'BELL ATLANTIC', + 0x00801B: u'KODIAK TECHNOLOGY', + 0x00801C: u'NEWPORT SYSTEMS SOLUTIONS', + 0x00801D: u'INTEGRATED INFERENCE MACHINES', + 0x00801E: u'XINETRON, INC.', + 0x00801F: u'KRUPP ATLAS ELECTRONIK GMBH', + 0x008020: u'NETWORK PRODUCTS', + 0x008021: u'Alcatel Canada Inc.', + 0x008022: u'SCAN-OPTICS', + 0x008023: u'INTEGRATED BUSINESS NETWORKS', + 0x008024: u'KALPANA, INC.', + 0x008025: u'STOLLMANN GMBH', + 0x008026: u'NETWORK PRODUCTS CORPORATION', + 0x008027: u'ADAPTIVE SYSTEMS, INC.', + 0x008028: u'TRADPOST (HK) LTD', + 0x008029: u'EAGLE TECHNOLOGY, INC.', + 0x00802A: u'TEST SYSTEMS & SIMULATIONS INC', + 0x00802B: u'INTEGRATED MARKETING CO', + 0x00802C: u'THE SAGE GROUP PLC', + 0x00802D: u'XYLOGICS INC', + 0x00802E: u'CASTLE ROCK COMPUTING', + 0x00802F: u'NATIONAL INSTRUMENTS CORP.', + 0x008030: u'NEXUS ELECTRONICS', + 0x008031: u'BASYS, CORP.', + 0x008032: u'ACCESS CO., LTD.', + 0x008033: u'FORMATION, INC.', + 0x008034: u'SMT GOUPIL', + 0x008035: u'TECHNOLOGY WORKS, INC.', + 0x008036: u'REFLEX MANUFACTURING SYSTEMS', + 0x008037: u'Ericsson Group', + 0x008038: u'DATA RESEARCH & APPLICATIONS', + 0x008039: u'ALCATEL STC AUSTRALIA', + 0x00803A: u'VARITYPER, INC.', + 0x00803B: u'APT COMMUNICATIONS, INC.', + 0x00803C: u'TVS ELECTRONICS LTD', + 0x00803D: u'SURIGIKEN CO., LTD.', + 0x00803E: u'SYNERNETICS', + 0x00803F: u'TATUNG COMPANY', + 0x008040: u'JOHN FLUKE MANUFACTURING CO.', + 0x008041: u'VEB KOMBINAT ROBOTRON', + 0x008042: u'FORCE COMPUTERS', + 0x008043: u'NETWORLD, INC.', + 0x008044: u'SYSTECH COMPUTER CORP.', + 0x008045: u'MATSUSHITA ELECTRIC IND. CO', + 0x008046: u'UNIVERSITY OF TORONTO', + 0x008047: u'IN-NET CORP.', + 0x008048: u'COMPEX INCORPORATED', + 0x008049: u'NISSIN ELECTRIC CO., LTD.', + 0x00804A: u'PRO-LOG', + 0x00804B: u'EAGLE TECHNOLOGIES PTY.LTD.', + 0x00804C: u'CONTEC CO., LTD.', + 0x00804D: u'CYCLONE MICROSYSTEMS, INC.', + 0x00804E: u'APEX COMPUTER COMPANY', + 0x00804F: u'DAIKIN INDUSTRIES, LTD.', + 0x008050: u'ZIATECH CORPORATION', + 0x008051: u'FIBERMUX', + 0x008052: u'TECHNICALLY ELITE CONCEPTS', + 0x008053: u'INTELLICOM, INC.', + 0x008054: u'FRONTIER TECHNOLOGIES CORP.', + 0x008055: u'FERMILAB', + 0x008056: u'SPHINX ELEKTRONIK GMBH', + 0x008057: u'ADSOFT, LTD.', + 0x008058: u'PRINTER SYSTEMS CORPORATION', + 0x008059: u'STANLEY ELECTRIC CO., LTD', + 0x00805A: u'TULIP COMPUTERS INTERNAT\'L B.V', + 0x00805B: u'CONDOR SYSTEMS, INC.', + 0x00805C: u'AGILIS CORPORATION', + 0x00805D: u'CANSTAR', + 0x00805E: u'LSI LOGIC CORPORATION', + 0x00805F: u'COMPAQ COMPUTER CORPORATION', + 0x008060: u'NETWORK INTERFACE CORPORATION', + 0x008061: u'LITTON SYSTEMS, INC.', + 0x008062: u'INTERFACE CO.', + 0x008063: u'RICHARD HIRSCHMANN GMBH & CO.', + 0x008064: u'WYSE TECHNOLOGY', + 0x008065: u'CYBERGRAPHIC SYSTEMS PTY LTD.', + 0x008066: u'ARCOM CONTROL SYSTEMS, LTD.', + 0x008067: u'SQUARE D COMPANY', + 0x008068: u'YAMATECH SCIENTIFIC LTD.', + 0x008069: u'COMPUTONE SYSTEMS', + 0x00806A: u'ERI (EMPAC RESEARCH INC.)', + 0x00806B: u'SCHMID TELECOMMUNICATION', + 0x00806C: u'CEGELEC PROJECTS LTD', + 0x00806D: u'CENTURY SYSTEMS CORP.', + 0x00806E: u'NIPPON STEEL CORPORATION', + 0x00806F: u'ONELAN LTD.', + 0x008070: u'COMPUTADORAS MICRON', + 0x008071: u'SAI TECHNOLOGY', + 0x008072: u'MICROPLEX SYSTEMS LTD.', + 0x008073: u'DWB ASSOCIATES', + 0x008074: u'FISHER CONTROLS', + 0x008075: u'PARSYTEC GMBH', + 0x008076: u'MCNC', + 0x008077: u'BROTHER INDUSTRIES, LTD.', + 0x008078: u'PRACTICAL PERIPHERALS, INC.', + 0x008079: u'MICROBUS DESIGNS LTD.', + 0x00807A: u'AITECH SYSTEMS LTD.', + 0x00807B: u'ARTEL COMMUNICATIONS CORP.', + 0x00807C: u'FIBERCOM, INC.', + 0x00807D: u'EQUINOX SYSTEMS INC.', + 0x00807E: u'SOUTHERN PACIFIC LTD.', + 0x00807F: u'DY-4 INCORPORATED', + 0x008080: u'DATAMEDIA CORPORATION', + 0x008081: u'KENDALL SQUARE RESEARCH CORP.', + 0x008082: u'PEP MODULAR COMPUTERS GMBH', + 0x008083: u'AMDAHL', + 0x008084: u'THE CLOUD INC.', + 0x008085: u'H-THREE SYSTEMS CORPORATION', + 0x008086: u'COMPUTER GENERATION INC.', + 0x008087: u'OKI ELECTRIC INDUSTRY CO., LTD', + 0x008088: u'VICTOR COMPANY OF JAPAN, LTD.', + 0x008089: u'TECNETICS (PTY) LTD.', + 0x00808A: u'SUMMIT MICROSYSTEMS CORP.', + 0x00808B: u'DACOLL LIMITED', + 0x00808C: u'NetScout Systems, Inc.', + 0x00808D: u'WESTCOAST TECHNOLOGY B.V.', + 0x00808E: u'RADSTONE TECHNOLOGY', + 0x00808F: u'C. ITOH ELECTRONICS, INC.', + 0x008090: u'MICROTEK INTERNATIONAL, INC.', + 0x008091: u'TOKYO ELECTRIC CO.,LTD', + 0x008092: u'JAPAN COMPUTER INDUSTRY, INC.', + 0x008093: u'XYRON CORPORATION', + 0x008094: u'ALFA LAVAL AUTOMATION AB', + 0x008095: u'BASIC MERTON HANDELSGES.M.B.H.', + 0x008096: u'HUMAN DESIGNED SYSTEMS, INC.', + 0x008097: u'CENTRALP AUTOMATISMES', + 0x008098: u'TDK CORPORATION', + 0x008099: u'KLOCKNER MOELLER IPC', + 0x00809A: u'NOVUS NETWORKS LTD', + 0x00809B: u'JUSTSYSTEM CORPORATION', + 0x00809C: u'LUXCOM, INC.', + 0x00809D: u'Commscraft Ltd.', + 0x00809E: u'DATUS GMBH', + 0x00809F: u'ALCATEL BUSINESS SYSTEMS', + 0x0080A0: u'EDISA HEWLETT PACKARD S/A', + 0x0080A1: u'MICROTEST, INC.', + 0x0080A2: u'CREATIVE ELECTRONIC SYSTEMS', + 0x0080A3: u'LANTRONIX', + 0x0080A4: u'LIBERTY ELECTRONICS', + 0x0080A5: u'SPEED INTERNATIONAL', + 0x0080A6: u'REPUBLIC TECHNOLOGY, INC.', + 0x0080A7: u'MEASUREX CORP.', + 0x0080A8: u'VITACOM CORPORATION', + 0x0080A9: u'CLEARPOINT RESEARCH', + 0x0080AA: u'MAXPEED', + 0x0080AB: u'DUKANE NETWORK INTEGRATION', + 0x0080AC: u'IMLOGIX, DIVISION OF GENESYS', + 0x0080AD: u'CNET TECHNOLOGY, INC.', + 0x0080AE: u'HUGHES NETWORK SYSTEMS', + 0x0080AF: u'ALLUMER CO., LTD.', + 0x0080B0: u'ADVANCED INFORMATION', + 0x0080B1: u'SOFTCOM A/S', + 0x0080B2: u'NETWORK EQUIPMENT TECHNOLOGIES', + 0x0080B3: u'AVAL DATA CORPORATION', + 0x0080B4: u'SOPHIA SYSTEMS', + 0x0080B5: u'UNITED NETWORKS INC.', + 0x0080B6: u'THEMIS COMPUTER', + 0x0080B7: u'STELLAR COMPUTER', + 0x0080B8: u'BUG, INCORPORATED', + 0x0080B9: u'ARCHE TECHNOLIGIES INC.', + 0x0080BA: u'SPECIALIX (ASIA) PTE, LTD', + 0x0080BB: u'HUGHES LAN SYSTEMS', + 0x0080BC: u'HITACHI ENGINEERING CO., LTD', + 0x0080BD: u'THE FURUKAWA ELECTRIC CO., LTD', + 0x0080BE: u'ARIES RESEARCH', + 0x0080BF: u'TAKAOKA ELECTRIC MFG. CO. LTD.', + 0x0080C0: u'PENRIL DATACOMM', + 0x0080C1: u'LANEX CORPORATION', + 0x0080C2: u'IEEE 802.1 COMMITTEE', + 0x0080C3: u'BICC INFORMATION SYSTEMS & SVC', + 0x0080C4: u'DOCUMENT TECHNOLOGIES, INC.', + 0x0080C5: u'NOVELLCO DE MEXICO', + 0x0080C6: u'NATIONAL DATACOMM CORPORATION', + 0x0080C7: u'XIRCOM', + 0x0080C8: u'D-LINK SYSTEMS, INC.', + 0x0080C9: u'ALBERTA MICROELECTRONIC CENTRE', + 0x0080CA: u'NETCOM RESEARCH INCORPORATED', + 0x0080CB: u'FALCO DATA PRODUCTS', + 0x0080CC: u'MICROWAVE BYPASS SYSTEMS', + 0x0080CD: u'MICRONICS COMPUTER, INC.', + 0x0080CE: u'BROADCAST TELEVISION SYSTEMS', + 0x0080CF: u'EMBEDDED PERFORMANCE INC.', + 0x0080D0: u'COMPUTER PERIPHERALS, INC.', + 0x0080D1: u'KIMTRON CORPORATION', + 0x0080D2: u'SHINNIHONDENKO CO., LTD.', + 0x0080D3: u'SHIVA CORP.', + 0x0080D4: u'CHASE RESEARCH LTD.', + 0x0080D5: u'CADRE TECHNOLOGIES', + 0x0080D6: u'NUVOTECH, INC.', + 0x0080D7: u'Fantum Engineering', + 0x0080D8: u'NETWORK PERIPHERALS INC.', + 0x0080D9: u'EMK ELEKTRONIK', + 0x0080DA: u'BRUEL & KJAER', + 0x0080DB: u'GRAPHON CORPORATION', + 0x0080DC: u'PICKER INTERNATIONAL', + 0x0080DD: u'GMX INC/GIMIX', + 0x0080DE: u'GIPSI S.A.', + 0x0080DF: u'ADC CODENOLL TECHNOLOGY CORP.', + 0x0080E0: u'XTP SYSTEMS, INC.', + 0x0080E1: u'STMICROELECTRONICS', + 0x0080E2: u'T.D.I. CO., LTD.', + 0x0080E3: u'CORAL NETWORK CORPORATION', + 0x0080E4: u'NORTHWEST DIGITAL SYSTEMS, INC', + 0x0080E5: u'LSI Logic Corporation', + 0x0080E6: u'PEER NETWORKS, INC.', + 0x0080E7: u'LYNWOOD SCIENTIFIC DEV. LTD.', + 0x0080E8: u'CUMULUS CORPORATIION', + 0x0080E9: u'Madge Ltd.', + 0x0080EA: u'ADVA Optical Networking Ltd.', + 0x0080EB: u'COMPCONTROL B.V.', + 0x0080EC: u'SUPERCOMPUTING SOLUTIONS, INC.', + 0x0080ED: u'IQ TECHNOLOGIES, INC.', + 0x0080EE: u'THOMSON CSF', + 0x0080EF: u'RATIONAL', + 0x0080F0: u'Panasonic Communications Co., Ltd.', + 0x0080F1: u'OPUS SYSTEMS', + 0x0080F2: u'RAYCOM SYSTEMS INC', + 0x0080F3: u'SUN ELECTRONICS CORP.', + 0x0080F4: u'TELEMECANIQUE ELECTRIQUE', + 0x0080F5: u'QUANTEL LTD', + 0x0080F6: u'SYNERGY MICROSYSTEMS', + 0x0080F7: u'ZENITH ELECTRONICS', + 0x0080F8: u'MIZAR, INC.', + 0x0080F9: u'HEURIKON CORPORATION', + 0x0080FA: u'RWT GMBH', + 0x0080FB: u'BVM LIMITED', + 0x0080FC: u'AVATAR CORPORATION', + 0x0080FD: u'EXSCEED CORPRATION', + 0x0080FE: u'AZURE TECHNOLOGIES, INC.', + 0x0080FF: u'SOC. DE TELEINFORMATIQUE RTC', + 0x009000: u'DIAMOND MULTIMEDIA', + 0x009001: u'NISHIMU ELECTRONICS INDUSTRIES CO., LTD.', + 0x009002: u'ALLGON AB', + 0x009003: u'APLIO', + 0x009004: u'3COM EUROPE LTD.', + 0x009005: u'PROTECH SYSTEMS CO., LTD.', + 0x009006: u'HAMAMATSU PHOTONICS K.K.', + 0x009007: u'DOMEX TECHNOLOGY CORP.', + 0x009008: u'HanA Systems Inc.', + 0x009009: u'i Controls, Inc.', + 0x00900A: u'PROTON ELECTRONIC INDUSTRIAL CO., LTD.', + 0x00900B: u'LANNER ELECTRONICS, INC.', + 0x00900C: u'CISCO SYSTEMS, INC.', + 0x00900D: u'Overland Storage Inc.', + 0x00900E: u'HANDLINK TECHNOLOGIES, INC.', + 0x00900F: u'KAWASAKI HEAVY INDUSTRIES, LTD', + 0x009010: u'SIMULATION LABORATORIES, INC.', + 0x009011: u'WAVTrace, Inc.', + 0x009012: u'GLOBESPAN SEMICONDUCTOR, INC.', + 0x009013: u'SAMSAN CORP.', + 0x009014: u'ROTORK INSTRUMENTS, LTD.', + 0x009015: u'CENTIGRAM COMMUNICATIONS CORP.', + 0x009016: u'ZAC', + 0x009017: u'ZYPCOM, INC.', + 0x009018: u'ITO ELECTRIC INDUSTRY CO, LTD.', + 0x009019: u'HERMES ELECTRONICS CO., LTD.', + 0x00901A: u'UNISPHERE SOLUTIONS', + 0x00901B: u'DIGITAL CONTROLS', + 0x00901C: u'mps Software Gmbh', + 0x00901D: u'PEC (NZ) LTD.', + 0x00901E: u'SELESTA INGEGNE RIA S.P.A.', + 0x00901F: u'ADTEC PRODUCTIONS, INC.', + 0x009020: u'PHILIPS ANALYTICAL X-RAY B.V.', + 0x009021: u'CISCO SYSTEMS, INC.', + 0x009022: u'IVEX', + 0x009023: u'ZILOG INC.', + 0x009024: u'PIPELINKS, INC.', + 0x009025: u'VISION SYSTEMS LTD. PTY', + 0x009026: u'ADVANCED SWITCHING COMMUNICATIONS, INC.', + 0x009027: u'INTEL CORPORATION', + 0x009028: u'NIPPON SIGNAL CO., LTD.', + 0x009029: u'CRYPTO AG', + 0x00902A: u'COMMUNICATION DEVICES, INC.', + 0x00902B: u'CISCO SYSTEMS, INC.', + 0x00902C: u'DATA & CONTROL EQUIPMENT LTD.', + 0x00902D: u'DATA ELECTRONICS (AUST.) PTY, LTD.', + 0x00902E: u'NAMCO LIMITED', + 0x00902F: u'NETCORE SYSTEMS, INC.', + 0x009030: u'HONEYWELL-DATING', + 0x009031: u'MYSTICOM, LTD.', + 0x009032: u'PELCOMBE GROUP LTD.', + 0x009033: u'INNOVAPHONE AG', + 0x009034: u'IMAGIC, INC.', + 0x009035: u'ALPHA TELECOM, INC.', + 0x009036: u'ens, inc.', + 0x009037: u'ACUCOMM, INC.', + 0x009038: u'FOUNTAIN TECHNOLOGIES, INC.', + 0x009039: u'SHASTA NETWORKS', + 0x00903A: u'NIHON MEDIA TOOL INC.', + 0x00903B: u'TriEMS Research Lab, Inc.', + 0x00903C: u'ATLANTIC NETWORK SYSTEMS', + 0x00903D: u'BIOPAC SYSTEMS, INC.', + 0x00903E: u'N.V. PHILIPS INDUSTRIAL ACTIVITIES', + 0x00903F: u'AZTEC RADIOMEDIA', + 0x009040: u'Siemens Network Convergence LLC', + 0x009041: u'APPLIED DIGITAL ACCESS', + 0x009042: u'ECCS, Inc.', + 0x009043: u'NICHIBEI DENSHI CO., LTD.', + 0x009044: u'ASSURED DIGITAL, INC.', + 0x009045: u'Marconi Communications', + 0x009046: u'DEXDYNE, LTD.', + 0x009047: u'GIGA FAST E. LTD.', + 0x009048: u'ZEAL CORPORATION', + 0x009049: u'ENTRIDIA CORPORATION', + 0x00904A: u'CONCUR SYSTEM TECHNOLOGIES', + 0x00904B: u'GemTek Technology Co., Ltd.', + 0x00904C: u'EPIGRAM, INC.', + 0x00904D: u'SPEC S.A.', + 0x00904E: u'DELEM BV', + 0x00904F: u'ABB POWER T&D COMPANY, INC.', + 0x009050: u'TELESTE OY', + 0x009051: u'ULTIMATE TECHNOLOGY CORP.', + 0x009052: u'SELCOM ELETTRONICA S.R.L.', + 0x009053: u'DAEWOO ELECTRONICS CO., LTD.', + 0x009054: u'INNOVATIVE SEMICONDUCTORS, INC', + 0x009055: u'PARKER HANNIFIN CORPORATION COMPUMOTOR DIVISION', + 0x009056: u'TELESTREAM, INC.', + 0x009057: u'AANetcom, Inc.', + 0x009058: u'Ultra Electronics Ltd., Command and Control Systems', + 0x009059: u'TELECOM DEVICE K.K.', + 0x00905A: u'DEARBORN GROUP, INC.', + 0x00905B: u'RAYMOND AND LAE ENGINEERING', + 0x00905C: u'EDMI', + 0x00905D: u'NETCOM SICHERHEITSTECHNIK GmbH', + 0x00905E: u'RAULAND-BORG CORPORATION', + 0x00905F: u'CISCO SYSTEMS, INC.', + 0x009060: u'SYSTEM CREATE CORP.', + 0x009061: u'PACIFIC RESEARCH & ENGINEERING CORPORATION', + 0x009062: u'ICP VORTEX COMPUTERSYSTEME GmbH', + 0x009063: u'COHERENT COMMUNICATIONS SYSTEMS CORPORATION', + 0x009064: u'THOMSON BROADCAST SYSTEMS', + 0x009065: u'FINISAR CORPORATION', + 0x009066: u'Troika Networks, Inc.', + 0x009067: u'WalkAbout Computers, Inc.', + 0x009068: u'DVT CORP.', + 0x009069: u'JUNIPER NETWORKS, INC.', + 0x00906A: u'TURNSTONE SYSTEMS, INC.', + 0x00906B: u'APPLIED RESOURCES, INC.', + 0x00906C: u'Sartorius Hamburg GmbH', + 0x00906D: u'CISCO SYSTEMS, INC.', + 0x00906E: u'PRAXON, INC.', + 0x00906F: u'CISCO SYSTEMS, INC.', + 0x009070: u'NEO NETWORKS, INC.', + 0x009071: u'Applied Innovation Inc.', + 0x009072: u'SIMRAD AS', + 0x009073: u'GAIO TECHNOLOGY', + 0x009074: u'ARGON NETWORKS, INC.', + 0x009075: u'NEC DO BRASIL S.A.', + 0x009076: u'FMT AIRCRAFT GATE SUPPORT SYSTEMS AB', + 0x009077: u'ADVANCED FIBRE COMMUNICATIONS', + 0x009078: u'MER TELEMANAGEMENT SOLUTIONS, LTD.', + 0x009079: u'ClearOne, Inc.', + 0x00907A: u'SPECTRALINK CORP.', + 0x00907B: u'E-TECH, INC.', + 0x00907C: u'DIGITALCAST, INC.', + 0x00907D: u'Lake Communications', + 0x00907E: u'VETRONIX CORP.', + 0x00907F: u'WatchGuard Technologies, Inc.', + 0x009080: u'NOT LIMITED, INC.', + 0x009081: u'ALOHA NETWORKS, INC.', + 0x009082: u'FORCE INSTITUTE', + 0x009083: u'TURBO COMMUNICATION, INC.', + 0x009084: u'ATECH SYSTEM', + 0x009085: u'GOLDEN ENTERPRISES, INC.', + 0x009086: u'CISCO SYSTEMS, INC.', + 0x009087: u'ITIS', + 0x009088: u'BAXALL SECURITY LTD.', + 0x009089: u'SOFTCOM MICROSYSTEMS, INC.', + 0x00908A: u'BAYLY COMMUNICATIONS, INC.', + 0x00908B: u'PFU Systems, Inc.', + 0x00908C: u'ETREND ELECTRONICS, INC.', + 0x00908D: u'VICKERS ELECTRONICS SYSTEMS', + 0x00908E: u'Nortel Networks Broadband Access', + 0x00908F: u'AUDIO CODES LTD.', + 0x009090: u'I-BUS', + 0x009091: u'DigitalScape, Inc.', + 0x009092: u'CISCO SYSTEMS, INC.', + 0x009093: u'NANAO CORPORATION', + 0x009094: u'OSPREY TECHNOLOGIES, INC.', + 0x009095: u'UNIVERSAL AVIONICS', + 0x009096: u'ASKEY COMPUTER CORP.', + 0x009097: u'SYCAMORE NETWORKS', + 0x009098: u'SBC DESIGNS, INC.', + 0x009099: u'ALLIED TELESIS, K.K.', + 0x00909A: u'ONE WORLD SYSTEMS, INC.', + 0x00909B: u'MARKPOINT AB', + 0x00909C: u'Terayon Communications Systems', + 0x00909D: u'NovaTech Process Solutions, LLC', + 0x00909E: u'Critical IO, LLC', + 0x00909F: u'DIGI-DATA CORPORATION', + 0x0090A0: u'8X8 INC.', + 0x0090A1: u'FLYING PIG SYSTEMS, LTD.', + 0x0090A2: u'CYBERTAN TECHNOLOGY, INC.', + 0x0090A3: u'Corecess Inc.', + 0x0090A4: u'ALTIGA NETWORKS', + 0x0090A5: u'SPECTRA LOGIC', + 0x0090A6: u'CISCO SYSTEMS, INC.', + 0x0090A7: u'CLIENTEC CORPORATION', + 0x0090A8: u'NineTiles Networks, Ltd.', + 0x0090A9: u'WESTERN DIGITAL', + 0x0090AA: u'INDIGO ACTIVE VISION SYSTEMS LIMITED', + 0x0090AB: u'CISCO SYSTEMS, INC.', + 0x0090AC: u'OPTIVISION, INC.', + 0x0090AD: u'ASPECT ELECTRONICS, INC.', + 0x0090AE: u'ITALTEL S.p.A.', + 0x0090AF: u'J. MORITA MFG. CORP.', + 0x0090B0: u'VADEM', + 0x0090B1: u'CISCO SYSTEMS, INC.', + 0x0090B2: u'AVICI SYSTEMS INC.', + 0x0090B3: u'AGRANAT SYSTEMS', + 0x0090B4: u'WILLOWBROOK TECHNOLOGIES', + 0x0090B5: u'NIKON CORPORATION', + 0x0090B6: u'FIBEX SYSTEMS', + 0x0090B7: u'DIGITAL LIGHTWAVE, INC.', + 0x0090B8: u'ROHDE & SCHWARZ GMBH & CO. KG', + 0x0090B9: u'BERAN INSTRUMENTS LTD.', + 0x0090BA: u'VALID NETWORKS, INC.', + 0x0090BB: u'TAINET COMMUNICATION SYSTEM Corp.', + 0x0090BC: u'TELEMANN CO., LTD.', + 0x0090BD: u'OMNIA COMMUNICATIONS, INC.', + 0x0090BE: u'IBC/INTEGRATED BUSINESS COMPUTERS', + 0x0090BF: u'CISCO SYSTEMS, INC.', + 0x0090C0: u'K.J. LAW ENGINEERS, INC.', + 0x0090C1: u'Peco II, Inc.', + 0x0090C2: u'JK microsystems, Inc.', + 0x0090C3: u'TOPIC SEMICONDUCTOR CORP.', + 0x0090C4: u'JAVELIN SYSTEMS, INC.', + 0x0090C5: u'INTERNET MAGIC, INC.', + 0x0090C6: u'OPTIM SYSTEMS, INC.', + 0x0090C7: u'ICOM INC.', + 0x0090C8: u'WAVERIDER COMMUNICATIONS (CANADA) INC.', + 0x0090C9: u'DPAC Technologies', + 0x0090CA: u'ACCORD VIDEO TELECOMMUNICATIONS, LTD.', + 0x0090CB: u'Wireless OnLine, Inc.', + 0x0090CC: u'PLANET COMMUNICATIONS, INC.', + 0x0090CD: u'ENT-EMPRESA NACIONAL DE TELECOMMUNICACOES, S.A.', + 0x0090CE: u'TETRA GmbH', + 0x0090CF: u'NORTEL', + 0x0090D0: u'Thomson Telecom Belgium', + 0x0090D1: u'LEICHU ENTERPRISE CO., LTD.', + 0x0090D2: u'ARTEL VIDEO SYSTEMS', + 0x0090D3: u'GIESECKE & DEVRIENT GmbH', + 0x0090D4: u'BindView Development Corp.', + 0x0090D5: u'EUPHONIX, INC.', + 0x0090D6: u'CRYSTAL GROUP', + 0x0090D7: u'NetBoost Corp.', + 0x0090D8: u'WHITECROSS SYSTEMS', + 0x0090D9: u'CISCO SYSTEMS, INC.', + 0x0090DA: u'DYNARC, INC.', + 0x0090DB: u'NEXT LEVEL COMMUNICATIONS', + 0x0090DC: u'TECO INFORMATION SYSTEMS', + 0x0090DD: u'THE MIHARU COMMUNICATIONS CO., LTD.', + 0x0090DE: u'CARDKEY SYSTEMS, INC.', + 0x0090DF: u'MITSUBISHI CHEMICAL AMERICA, INC.', + 0x0090E0: u'SYSTRAN CORP.', + 0x0090E1: u'TELENA S.P.A.', + 0x0090E2: u'DISTRIBUTED PROCESSING TECHNOLOGY', + 0x0090E3: u'AVEX ELECTRONICS INC.', + 0x0090E4: u'NEC AMERICA, INC.', + 0x0090E5: u'TEKNEMA, INC.', + 0x0090E6: u'ACER LABORATORIES, INC.', + 0x0090E7: u'HORSCH ELEKTRONIK AG', + 0x0090E8: u'MOXA TECHNOLOGIES CORP., LTD.', + 0x0090E9: u'JANZ COMPUTER AG', + 0x0090EA: u'ALPHA TECHNOLOGIES, INC.', + 0x0090EB: u'SENTRY TELECOM SYSTEMS', + 0x0090EC: u'PYRESCOM', + 0x0090ED: u'CENTRAL SYSTEM RESEARCH CO., LTD.', + 0x0090EE: u'PERSONAL COMMUNICATIONS TECHNOLOGIES', + 0x0090EF: u'INTEGRIX, INC.', + 0x0090F0: u'Harmonic Video Systems Ltd.', + 0x0090F1: u'DOT HILL SYSTEMS CORPORATION', + 0x0090F2: u'CISCO SYSTEMS, INC.', + 0x0090F3: u'ASPECT COMMUNICATIONS', + 0x0090F4: u'LIGHTNING INSTRUMENTATION', + 0x0090F5: u'CLEVO CO.', + 0x0090F6: u'ESCALATE NETWORKS, INC.', + 0x0090F7: u'NBASE COMMUNICATIONS LTD.', + 0x0090F8: u'MEDIATRIX TELECOM', + 0x0090F9: u'LEITCH', + 0x0090FA: u'EMULEX Corp', + 0x0090FB: u'PORTWELL, INC.', + 0x0090FC: u'NETWORK COMPUTING DEVICES', + 0x0090FD: u'CopperCom, Inc.', + 0x0090FE: u'ELECOM CO., LTD. (LANEED DIV.)', + 0x0090FF: u'TELLUS TECHNOLOGY INC.', + 0x0091D6: u'Crystal Group, Inc.', + 0x009D8E: u'CARDIAC RECORDERS, INC.', + 0x00A000: u'CENTILLION NETWORKS, INC.', + 0x00A001: u'DRS Signal Solutions', + 0x00A002: u'LEEDS & NORTHRUP AUSTRALIA PTY LTD', + 0x00A003: u'STAEFA CONTROL SYSTEM', + 0x00A004: u'NETPOWER, INC.', + 0x00A005: u'DANIEL INSTRUMENTS, LTD.', + 0x00A006: u'IMAGE DATA PROCESSING SYSTEM GROUP', + 0x00A007: u'APEXX TECHNOLOGY, INC.', + 0x00A008: u'NETCORP', + 0x00A009: u'WHITETREE NETWORK', + 0x00A00A: u'Airspan', + 0x00A00B: u'COMPUTEX CO., LTD.', + 0x00A00C: u'KINGMAX TECHNOLOGY, INC.', + 0x00A00D: u'THE PANDA PROJECT', + 0x00A00E: u'VISUAL NETWORKS, INC.', + 0x00A00F: u'Broadband Technologies', + 0x00A010: u'SYSLOGIC DATENTECHNIK AG', + 0x00A011: u'MUTOH INDUSTRIES LTD.', + 0x00A012: u'B.A.T.M. ADVANCED TECHNOLOGIES', + 0x00A013: u'TELTREND LTD.', + 0x00A014: u'CSIR', + 0x00A015: u'WYLE', + 0x00A016: u'MICROPOLIS CORP.', + 0x00A017: u'J B M CORPORATION', + 0x00A018: u'CREATIVE CONTROLLERS, INC.', + 0x00A019: u'NEBULA CONSULTANTS, INC.', + 0x00A01A: u'BINAR ELEKTRONIK AB', + 0x00A01B: u'PREMISYS COMMUNICATIONS, INC.', + 0x00A01C: u'NASCENT NETWORKS CORPORATION', + 0x00A01D: u'SIXNET', + 0x00A01E: u'EST CORPORATION', + 0x00A01F: u'TRICORD SYSTEMS, INC.', + 0x00A020: u'CITICORP/TTI', + 0x00A021: u'General Dynamics', + 0x00A022: u'CENTRE FOR DEVELOPMENT OF ADVANCED COMPUTING', + 0x00A023: u'APPLIED CREATIVE TECHNOLOGY, INC.', + 0x00A024: u'3COM CORPORATION', + 0x00A025: u'REDCOM LABS INC.', + 0x00A026: u'TELDAT, S.A.', + 0x00A027: u'FIREPOWER SYSTEMS, INC.', + 0x00A028: u'CONNER PERIPHERALS', + 0x00A029: u'COULTER CORPORATION', + 0x00A02A: u'TRANCELL SYSTEMS', + 0x00A02B: u'TRANSITIONS RESEARCH CORP.', + 0x00A02C: u'interWAVE Communications', + 0x00A02D: u'1394 Trade Association', + 0x00A02E: u'BRAND COMMUNICATIONS, LTD.', + 0x00A02F: u'PIRELLI CAVI', + 0x00A030: u'CAPTOR NV/SA', + 0x00A031: u'HAZELTINE CORPORATION, MS 1-17', + 0x00A032: u'GES SINGAPORE PTE. LTD.', + 0x00A033: u'imc MeBsysteme GmbH', + 0x00A034: u'AXEL', + 0x00A035: u'CYLINK CORPORATION', + 0x00A036: u'APPLIED NETWORK TECHNOLOGY', + 0x00A037: u'DATASCOPE CORPORATION', + 0x00A038: u'EMAIL ELECTRONICS', + 0x00A039: u'ROSS TECHNOLOGY, INC.', + 0x00A03A: u'KUBOTEK CORPORATION', + 0x00A03B: u'TOSHIN ELECTRIC CO., LTD.', + 0x00A03C: u'EG&G NUCLEAR INSTRUMENTS', + 0x00A03D: u'OPTO-22', + 0x00A03E: u'ATM FORUM', + 0x00A03F: u'COMPUTER SOCIETY MICROPROCESSOR & MICROPROCESSOR STANDARDS C', + 0x00A040: u'APPLE COMPUTER', + 0x00A041: u'INFICON', + 0x00A042: u'SPUR PRODUCTS CORP.', + 0x00A043: u'AMERICAN TECHNOLOGY LABS, INC.', + 0x00A044: u'NTT IT CO., LTD.', + 0x00A045: u'PHOENIX CONTACT GMBH & CO.', + 0x00A046: u'SCITEX CORP. LTD.', + 0x00A047: u'INTEGRATED FITNESS CORP.', + 0x00A048: u'QUESTECH, LTD.', + 0x00A049: u'DIGITECH INDUSTRIES, INC.', + 0x00A04A: u'NISSHIN ELECTRIC CO., LTD.', + 0x00A04B: u'TFL LAN INC.', + 0x00A04C: u'INNOVATIVE SYSTEMS & TECHNOLOGIES, INC.', + 0x00A04D: u'EDA INSTRUMENTS, INC.', + 0x00A04E: u'VOELKER TECHNOLOGIES, INC.', + 0x00A04F: u'AMERITEC CORP.', + 0x00A050: u'CYPRESS SEMICONDUCTOR', + 0x00A051: u'ANGIA COMMUNICATIONS. INC.', + 0x00A052: u'STANILITE ELECTRONICS PTY. LTD', + 0x00A053: u'COMPACT DEVICES, INC.', + 0x00A054: u'PRIVATE', + 0x00A055: u'Data Device Corporation', + 0x00A056: u'MICROPROSS', + 0x00A057: u'LANCOM Systems GmbH', + 0x00A058: u'GLORY, LTD.', + 0x00A059: u'HAMILTON HALLMARK', + 0x00A05A: u'KOFAX IMAGE PRODUCTS', + 0x00A05B: u'MARQUIP, INC.', + 0x00A05C: u'INVENTORY CONVERSION, INC./', + 0x00A05D: u'CS COMPUTER SYSTEME GmbH', + 0x00A05E: u'MYRIAD LOGIC INC.', + 0x00A05F: u'BTG ENGINEERING BV', + 0x00A060: u'ACER PERIPHERALS, INC.', + 0x00A061: u'PURITAN BENNETT', + 0x00A062: u'AES PRODATA', + 0x00A063: u'JRL SYSTEMS, INC.', + 0x00A064: u'KVB/ANALECT', + 0x00A065: u'Symantec Corporation', + 0x00A066: u'ISA CO., LTD.', + 0x00A067: u'NETWORK SERVICES GROUP', + 0x00A068: u'BHP LIMITED', + 0x00A069: u'Symmetricom, Inc.', + 0x00A06A: u'Verilink Corporation', + 0x00A06B: u'DMS DORSCH MIKROSYSTEM GMBH', + 0x00A06C: u'SHINDENGEN ELECTRIC MFG. CO., LTD.', + 0x00A06D: u'MANNESMANN TALLY CORPORATION', + 0x00A06E: u'AUSTRON, INC.', + 0x00A06F: u'THE APPCON GROUP, INC.', + 0x00A070: u'COASTCOM', + 0x00A071: u'VIDEO LOTTERY TECHNOLOGIES,INC', + 0x00A072: u'OVATION SYSTEMS LTD.', + 0x00A073: u'COM21, INC.', + 0x00A074: u'PERCEPTION TECHNOLOGY', + 0x00A075: u'MICRON TECHNOLOGY, INC.', + 0x00A076: u'CARDWARE LAB, INC.', + 0x00A077: u'FUJITSU NEXION, INC.', + 0x00A078: u'Marconi Communications', + 0x00A079: u'ALPS ELECTRIC (USA), INC.', + 0x00A07A: u'ADVANCED PERIPHERALS TECHNOLOGIES, INC.', + 0x00A07B: u'DAWN COMPUTER INCORPORATION', + 0x00A07C: u'TONYANG NYLON CO., LTD.', + 0x00A07D: u'SEEQ TECHNOLOGY, INC.', + 0x00A07E: u'AVID TECHNOLOGY, INC.', + 0x00A07F: u'GSM-SYNTEL, LTD.', + 0x00A080: u'SBE, Inc.', + 0x00A081: u'ALCATEL DATA NETWORKS', + 0x00A082: u'NKT ELEKTRONIK A/S', + 0x00A083: u'ASIMMPHONY TURKEY', + 0x00A084: u'DATAPLEX PTY. LTD.', + 0x00A085: u'PRIVATE', + 0x00A086: u'AMBER WAVE SYSTEMS, INC.', + 0x00A087: u'Zarlink Semiconductor Ltd.', + 0x00A088: u'ESSENTIAL COMMUNICATIONS', + 0x00A089: u'XPOINT TECHNOLOGIES, INC.', + 0x00A08A: u'BROOKTROUT TECHNOLOGY, INC.', + 0x00A08B: u'ASTON ELECTRONIC DESIGNS LTD.', + 0x00A08C: u'MultiMedia LANs, Inc.', + 0x00A08D: u'JACOMO CORPORATION', + 0x00A08E: u'Nokia Internet Communications', + 0x00A08F: u'DESKNET SYSTEMS, INC.', + 0x00A090: u'TimeStep Corporation', + 0x00A091: u'APPLICOM INTERNATIONAL', + 0x00A092: u'H. BOLLMANN MANUFACTURERS, LTD', + 0x00A093: u'B/E AEROSPACE, Inc.', + 0x00A094: u'COMSAT CORPORATION', + 0x00A095: u'ACACIA NETWORKS, INC.', + 0x00A096: u'MITUMI ELECTRIC CO., LTD.', + 0x00A097: u'JC INFORMATION SYSTEMS', + 0x00A098: u'NETWORK APPLIANCE CORP.', + 0x00A099: u'K-NET LTD.', + 0x00A09A: u'NIHON KOHDEN AMERICA', + 0x00A09B: u'QPSX COMMUNICATIONS, LTD.', + 0x00A09C: u'Xyplex, Inc.', + 0x00A09D: u'JOHNATHON FREEMAN TECHNOLOGIES', + 0x00A09E: u'ICTV', + 0x00A09F: u'COMMVISION CORP.', + 0x00A0A0: u'COMPACT DATA, LTD.', + 0x00A0A1: u'EPIC DATA INC.', + 0x00A0A2: u'DIGICOM S.P.A.', + 0x00A0A3: u'RELIABLE POWER METERS', + 0x00A0A4: u'MICROS SYSTEMS, INC.', + 0x00A0A5: u'TEKNOR MICROSYSTEME, INC.', + 0x00A0A6: u'M.I. SYSTEMS, K.K.', + 0x00A0A7: u'VORAX CORPORATION', + 0x00A0A8: u'RENEX CORPORATION', + 0x00A0A9: u'NAVTEL COMMUNICATIONS INC.', + 0x00A0AA: u'SPACELABS MEDICAL', + 0x00A0AB: u'NETCS INFORMATIONSTECHNIK GMBH', + 0x00A0AC: u'GILAT SATELLITE NETWORKS, LTD.', + 0x00A0AD: u'MARCONI SPA', + 0x00A0AE: u'NUCOM SYSTEMS, INC.', + 0x00A0AF: u'WMS INDUSTRIES', + 0x00A0B0: u'I-O DATA DEVICE, INC.', + 0x00A0B1: u'FIRST VIRTUAL CORPORATION', + 0x00A0B2: u'SHIMA SEIKI', + 0x00A0B3: u'ZYKRONIX', + 0x00A0B4: u'TEXAS MICROSYSTEMS, INC.', + 0x00A0B5: u'3H TECHNOLOGY', + 0x00A0B6: u'SANRITZ AUTOMATION CO., LTD.', + 0x00A0B7: u'CORDANT, INC.', + 0x00A0B8: u'SYMBIOS LOGIC INC.', + 0x00A0B9: u'EAGLE TECHNOLOGY, INC.', + 0x00A0BA: u'PATTON ELECTRONICS CO.', + 0x00A0BB: u'HILAN GMBH', + 0x00A0BC: u'VIASAT, INCORPORATED', + 0x00A0BD: u'I-TECH CORP.', + 0x00A0BE: u'INTEGRATED CIRCUIT SYSTEMS, INC. COMMUNICATIONS GROUP', + 0x00A0BF: u'WIRELESS DATA GROUP MOTOROLA', + 0x00A0C0: u'DIGITAL LINK CORP.', + 0x00A0C1: u'ORTIVUS MEDICAL AB', + 0x00A0C2: u'R.A. SYSTEMS CO., LTD.', + 0x00A0C3: u'UNICOMPUTER GMBH', + 0x00A0C4: u'CRISTIE ELECTRONICS LTD.', + 0x00A0C5: u'ZYXEL COMMUNICATION', + 0x00A0C6: u'QUALCOMM INCORPORATED', + 0x00A0C7: u'TADIRAN TELECOMMUNICATIONS', + 0x00A0C8: u'ADTRAN INC.', + 0x00A0C9: u'INTEL CORPORATION - HF1-06', + 0x00A0CA: u'FUJITSU DENSO LTD.', + 0x00A0CB: u'ARK TELECOMMUNICATIONS, INC.', + 0x00A0CC: u'LITE-ON COMMUNICATIONS, INC.', + 0x00A0CD: u'DR. JOHANNES HEIDENHAIN GmbH', + 0x00A0CE: u'ASTROCOM CORPORATION', + 0x00A0CF: u'SOTAS, INC.', + 0x00A0D0: u'TEN X TECHNOLOGY, INC.', + 0x00A0D1: u'INVENTEC CORPORATION', + 0x00A0D2: u'ALLIED TELESIS INTERNATIONAL CORPORATION', + 0x00A0D3: u'INSTEM COMPUTER SYSTEMS, LTD.', + 0x00A0D4: u'RADIOLAN, INC.', + 0x00A0D5: u'SIERRA WIRELESS INC.', + 0x00A0D6: u'SBE, INC.', + 0x00A0D7: u'KASTEN CHASE APPLIED RESEARCH', + 0x00A0D8: u'SPECTRA - TEK', + 0x00A0D9: u'CONVEX COMPUTER CORPORATION', + 0x00A0DA: u'INTEGRATED SYSTEMS Technology, Inc.', + 0x00A0DB: u'FISHER & PAYKEL PRODUCTION', + 0x00A0DC: u'O.N. ELECTRONIC CO., LTD.', + 0x00A0DD: u'AZONIX CORPORATION', + 0x00A0DE: u'YAMAHA CORPORATION', + 0x00A0DF: u'STS TECHNOLOGIES, INC.', + 0x00A0E0: u'TENNYSON TECHNOLOGIES PTY LTD', + 0x00A0E1: u'WESTPORT RESEARCH ASSOCIATES, INC.', + 0x00A0E2: u'KEISOKU GIKEN CORP.', + 0x00A0E3: u'XKL SYSTEMS CORP.', + 0x00A0E4: u'OPTIQUEST', + 0x00A0E5: u'NHC COMMUNICATIONS', + 0x00A0E6: u'DIALOGIC CORPORATION', + 0x00A0E7: u'CENTRAL DATA CORPORATION', + 0x00A0E8: u'REUTERS HOLDINGS PLC', + 0x00A0E9: u'ELECTRONIC RETAILING SYSTEMS INTERNATIONAL', + 0x00A0EA: u'ETHERCOM CORP.', + 0x00A0EB: u'Encore Networks', + 0x00A0EC: u'TRANSMITTON LTD.', + 0x00A0ED: u'Brooks Automation, Inc.', + 0x00A0EE: u'NASHOBA NETWORKS', + 0x00A0EF: u'LUCIDATA LTD.', + 0x00A0F0: u'TORONTO MICROELECTRONICS INC.', + 0x00A0F1: u'MTI', + 0x00A0F2: u'INFOTEK COMMUNICATIONS, INC.', + 0x00A0F3: u'STAUBLI', + 0x00A0F4: u'GE', + 0x00A0F5: u'RADGUARD LTD.', + 0x00A0F6: u'AutoGas Systems Inc.', + 0x00A0F7: u'V.I COMPUTER CORP.', + 0x00A0F8: u'SYMBOL TECHNOLOGIES, INC.', + 0x00A0F9: u'BINTEC COMMUNICATIONS GMBH', + 0x00A0FA: u'Marconi Communication GmbH', + 0x00A0FB: u'TORAY ENGINEERING CO., LTD.', + 0x00A0FC: u'IMAGE SCIENCES, INC.', + 0x00A0FD: u'SCITEX DIGITAL PRINTING, INC.', + 0x00A0FE: u'BOSTON TECHNOLOGY, INC.', + 0x00A0FF: u'TELLABS OPERATIONS, INC.', + 0x00AA00: u'INTEL CORPORATION', + 0x00AA01: u'INTEL CORPORATION', + 0x00AA02: u'INTEL CORPORATION', + 0x00AA3C: u'OLIVETTI TELECOM SPA (OLTECO)', + 0x00B009: u'Grass Valley Group', + 0x00B017: u'InfoGear Technology Corp.', + 0x00B019: u'Casi-Rusco', + 0x00B01C: u'Westport Technologies', + 0x00B01E: u'Rantic Labs, Inc.', + 0x00B02A: u'ORSYS GmbH', + 0x00B02D: u'ViaGate Technologies, Inc.', + 0x00B03B: u'HiQ Networks', + 0x00B048: u'Marconi Communications Inc.', + 0x00B04A: u'Cisco Systems, Inc.', + 0x00B052: u'Intellon Corporation', + 0x00B064: u'Cisco Systems, Inc.', + 0x00B069: u'Honewell Oy', + 0x00B06D: u'Jones Futurex Inc.', + 0x00B080: u'Mannesmann Ipulsys B.V.', + 0x00B086: u'LocSoft Limited', + 0x00B08E: u'Cisco Systems, Inc.', + 0x00B091: u'Transmeta Corp.', + 0x00B094: u'Alaris, Inc.', + 0x00B09A: u'Morrow Technologies Corp.', + 0x00B09D: u'Point Grey Research Inc.', + 0x00B0AC: u'SIAE-Microelettronica S.p.A.', + 0x00B0AE: u'Symmetricom', + 0x00B0B3: u'Xstreamis PLC', + 0x00B0C2: u'Cisco Systems, Inc.', + 0x00B0C7: u'Tellabs Operations, Inc.', + 0x00B0CE: u'TECHNOLOGY RESCUE', + 0x00B0D0: u'Dell Computer Corp.', + 0x00B0DB: u'Nextcell, Inc.', + 0x00B0DF: u'Reliable Data Technology, Inc.', + 0x00B0E7: u'British Federal Ltd.', + 0x00B0EC: u'EACEM', + 0x00B0EE: u'Ajile Systems, Inc.', + 0x00B0F0: u'CALY NETWORKS', + 0x00B0F5: u'NetWorth Technologies, Inc.', + 0x00BAC0: u'Biometric Access Company', + 0x00BB01: u'OCTOTHORPE CORP.', + 0x00BBF0: u'UNGERMANN-BASS INC.', + 0x00C000: u'LANOPTICS, LTD.', + 0x00C001: u'DIATEK PATIENT MANAGMENT', + 0x00C002: u'SERCOMM CORPORATION', + 0x00C003: u'GLOBALNET COMMUNICATIONS', + 0x00C004: u'JAPAN BUSINESS COMPUTER CO.LTD', + 0x00C005: u'LIVINGSTON ENTERPRISES, INC.', + 0x00C006: u'NIPPON AVIONICS CO., LTD.', + 0x00C007: u'PINNACLE DATA SYSTEMS, INC.', + 0x00C008: u'SECO SRL', + 0x00C009: u'KT TECHNOLOGY (S) PTE LTD', + 0x00C00A: u'MICRO CRAFT', + 0x00C00B: u'NORCONTROL A.S.', + 0x00C00C: u'RELIA TECHNOLGIES', + 0x00C00D: u'ADVANCED LOGIC RESEARCH, INC.', + 0x00C00E: u'PSITECH, INC.', + 0x00C00F: u'QUANTUM SOFTWARE SYSTEMS LTD.', + 0x00C010: u'HIRAKAWA HEWTECH CORP.', + 0x00C011: u'INTERACTIVE COMPUTING DEVICES', + 0x00C012: u'NETSPAN CORPORATION', + 0x00C013: u'NETRIX', + 0x00C014: u'TELEMATICS CALABASAS INT\'L,INC', + 0x00C015: u'NEW MEDIA CORPORATION', + 0x00C016: u'ELECTRONIC THEATRE CONTROLS', + 0x00C017: u'FORTE NETWORKS', + 0x00C018: u'LANART CORPORATION', + 0x00C019: u'LEAP TECHNOLOGY, INC.', + 0x00C01A: u'COROMETRICS MEDICAL SYSTEMS', + 0x00C01B: u'SOCKET COMMUNICATIONS, INC.', + 0x00C01C: u'INTERLINK COMMUNICATIONS LTD.', + 0x00C01D: u'GRAND JUNCTION NETWORKS, INC.', + 0x00C01E: u'LA FRANCAISE DES JEUX', + 0x00C01F: u'S.E.R.C.E.L.', + 0x00C020: u'ARCO ELECTRONIC, CONTROL LTD.', + 0x00C021: u'NETEXPRESS', + 0x00C022: u'LASERMASTER TECHNOLOGIES, INC.', + 0x00C023: u'TUTANKHAMON ELECTRONICS', + 0x00C024: u'EDEN SISTEMAS DE COMPUTACAO SA', + 0x00C025: u'DATAPRODUCTS CORPORATION', + 0x00C026: u'LANS TECHNOLOGY CO., LTD.', + 0x00C027: u'CIPHER SYSTEMS, INC.', + 0x00C028: u'JASCO CORPORATION', + 0x00C029: u'Nexans Deutschland AG - ANS', + 0x00C02A: u'OHKURA ELECTRIC CO., LTD.', + 0x00C02B: u'GERLOFF GESELLSCHAFT FUR', + 0x00C02C: u'CENTRUM COMMUNICATIONS, INC.', + 0x00C02D: u'FUJI PHOTO FILM CO., LTD.', + 0x00C02E: u'NETWIZ', + 0x00C02F: u'OKUMA CORPORATION', + 0x00C030: u'INTEGRATED ENGINEERING B. V.', + 0x00C031: u'DESIGN RESEARCH SYSTEMS, INC.', + 0x00C032: u'I-CUBED LIMITED', + 0x00C033: u'TELEBIT COMMUNICATIONS APS', + 0x00C034: u'TRANSACTION NETWORK', + 0x00C035: u'QUINTAR COMPANY', + 0x00C036: u'RAYTECH ELECTRONIC CORP.', + 0x00C037: u'DYNATEM', + 0x00C038: u'RASTER IMAGE PROCESSING SYSTEM', + 0x00C039: u'Teridian Semiconductor Corporation', + 0x00C03A: u'MEN-MIKRO ELEKTRONIK GMBH', + 0x00C03B: u'MULTIACCESS COMPUTING CORP.', + 0x00C03C: u'TOWER TECH S.R.L.', + 0x00C03D: u'WIESEMANN & THEIS GMBH', + 0x00C03E: u'FA. GEBR. HELLER GMBH', + 0x00C03F: u'STORES AUTOMATED SYSTEMS, INC.', + 0x00C040: u'ECCI', + 0x00C041: u'DIGITAL TRANSMISSION SYSTEMS', + 0x00C042: u'DATALUX CORP.', + 0x00C043: u'STRATACOM', + 0x00C044: u'EMCOM CORPORATION', + 0x00C045: u'ISOLATION SYSTEMS, LTD.', + 0x00C046: u'KEMITRON LTD.', + 0x00C047: u'UNIMICRO SYSTEMS, INC.', + 0x00C048: u'BAY TECHNICAL ASSOCIATES', + 0x00C049: u'U.S. ROBOTICS, INC.', + 0x00C04A: u'GROUP 2000 AG', + 0x00C04B: u'CREATIVE MICROSYSTEMS', + 0x00C04C: u'DEPARTMENT OF FOREIGN AFFAIRS', + 0x00C04D: u'MITEC, INC.', + 0x00C04E: u'COMTROL CORPORATION', + 0x00C04F: u'DELL COMPUTER CORPORATION', + 0x00C050: u'TOYO DENKI SEIZO K.K.', + 0x00C051: u'ADVANCED INTEGRATION RESEARCH', + 0x00C052: u'BURR-BROWN', + 0x00C053: u'Concerto Software', + 0x00C054: u'NETWORK PERIPHERALS, LTD.', + 0x00C055: u'MODULAR COMPUTING TECHNOLOGIES', + 0x00C056: u'SOMELEC', + 0x00C057: u'MYCO ELECTRONICS', + 0x00C058: u'DATAEXPERT CORP.', + 0x00C059: u'NIPPON DENSO CO., LTD.', + 0x00C05A: u'SEMAPHORE COMMUNICATIONS CORP.', + 0x00C05B: u'NETWORKS NORTHWEST, INC.', + 0x00C05C: u'ELONEX PLC', + 0x00C05D: u'L&N TECHNOLOGIES', + 0x00C05E: u'VARI-LITE, INC.', + 0x00C05F: u'FINE-PAL COMPANY LIMITED', + 0x00C060: u'ID SCANDINAVIA AS', + 0x00C061: u'SOLECTEK CORPORATION', + 0x00C062: u'IMPULSE TECHNOLOGY', + 0x00C063: u'MORNING STAR TECHNOLOGIES, INC', + 0x00C064: u'GENERAL DATACOMM IND. INC.', + 0x00C065: u'SCOPE COMMUNICATIONS, INC.', + 0x00C066: u'DOCUPOINT, INC.', + 0x00C067: u'UNITED BARCODE INDUSTRIES', + 0x00C068: u'PHILIP DRAKE ELECTRONICS LTD.', + 0x00C069: u'Axxcelera Broadband Wireless', + 0x00C06A: u'ZAHNER-ELEKTRIK GMBH & CO. KG', + 0x00C06B: u'OSI PLUS CORPORATION', + 0x00C06C: u'SVEC COMPUTER CORP.', + 0x00C06D: u'BOCA RESEARCH, INC.', + 0x00C06E: u'HAFT TECHNOLOGY, INC.', + 0x00C06F: u'KOMATSU LTD.', + 0x00C070: u'SECTRA SECURE-TRANSMISSION AB', + 0x00C071: u'AREANEX COMMUNICATIONS, INC.', + 0x00C072: u'KNX LTD.', + 0x00C073: u'XEDIA CORPORATION', + 0x00C074: u'TOYODA AUTOMATIC LOOM', + 0x00C075: u'XANTE CORPORATION', + 0x00C076: u'I-DATA INTERNATIONAL A-S', + 0x00C077: u'DAEWOO TELECOM LTD.', + 0x00C078: u'COMPUTER SYSTEMS ENGINEERING', + 0x00C079: u'FONSYS CO.,LTD.', + 0x00C07A: u'PRIVA B.V.', + 0x00C07B: u'ASCEND COMMUNICATIONS, INC.', + 0x00C07C: u'HIGHTECH INFORMATION', + 0x00C07D: u'RISC DEVELOPMENTS LTD.', + 0x00C07E: u'KUBOTA CORPORATION ELECTRONIC', + 0x00C07F: u'NUPON COMPUTING CORP.', + 0x00C080: u'NETSTAR, INC.', + 0x00C081: u'METRODATA LTD.', + 0x00C082: u'MOORE PRODUCTS CO.', + 0x00C083: u'TRACE MOUNTAIN PRODUCTS, INC.', + 0x00C084: u'DATA LINK CORP. LTD.', + 0x00C085: u'ELECTRONICS FOR IMAGING, INC.', + 0x00C086: u'THE LYNK CORPORATION', + 0x00C087: u'UUNET TECHNOLOGIES, INC.', + 0x00C088: u'EKF ELEKTRONIK GMBH', + 0x00C089: u'TELINDUS DISTRIBUTION', + 0x00C08A: u'LAUTERBACH DATENTECHNIK GMBH', + 0x00C08B: u'RISQ MODULAR SYSTEMS, INC.', + 0x00C08C: u'PERFORMANCE TECHNOLOGIES, INC.', + 0x00C08D: u'TRONIX PRODUCT DEVELOPMENT', + 0x00C08E: u'NETWORK INFORMATION TECHNOLOGY', + 0x00C08F: u'Matsushita Electric Works, Ltd.', + 0x00C090: u'PRAIM S.R.L.', + 0x00C091: u'JABIL CIRCUIT, INC.', + 0x00C092: u'MENNEN MEDICAL INC.', + 0x00C093: u'ALTA RESEARCH CORP.', + 0x00C094: u'VMX INC.', + 0x00C095: u'ZNYX', + 0x00C096: u'TAMURA CORPORATION', + 0x00C097: u'ARCHIPEL SA', + 0x00C098: u'CHUNTEX ELECTRONIC CO., LTD.', + 0x00C099: u'YOSHIKI INDUSTRIAL CO.,LTD.', + 0x00C09A: u'PHOTONICS CORPORATION', + 0x00C09B: u'RELIANCE COMM/TEC, R-TEC', + 0x00C09C: u'TOA ELECTRONIC LTD.', + 0x00C09D: u'DISTRIBUTED SYSTEMS INT\'L, INC', + 0x00C09E: u'CACHE COMPUTERS, INC.', + 0x00C09F: u'QUANTA COMPUTER, INC.', + 0x00C0A0: u'ADVANCE MICRO RESEARCH, INC.', + 0x00C0A1: u'TOKYO DENSHI SEKEI CO.', + 0x00C0A2: u'INTERMEDIUM A/S', + 0x00C0A3: u'DUAL ENTERPRISES CORPORATION', + 0x00C0A4: u'UNIGRAF OY', + 0x00C0A5: u'DICKENS DATA SYSTEMS', + 0x00C0A6: u'EXICOM AUSTRALIA PTY. LTD', + 0x00C0A7: u'SEEL LTD.', + 0x00C0A8: u'GVC CORPORATION', + 0x00C0A9: u'BARRON MCCANN LTD.', + 0x00C0AA: u'SILICON VALLEY COMPUTER', + 0x00C0AB: u'Telco Systems, Inc.', + 0x00C0AC: u'GAMBIT COMPUTER COMMUNICATIONS', + 0x00C0AD: u'MARBEN COMMUNICATION SYSTEMS', + 0x00C0AE: u'TOWERCOM CO. INC. DBA PC HOUSE', + 0x00C0AF: u'TEKLOGIX INC.', + 0x00C0B0: u'GCC TECHNOLOGIES,INC.', + 0x00C0B1: u'GENIUS NET CO.', + 0x00C0B2: u'NORAND CORPORATION', + 0x00C0B3: u'COMSTAT DATACOMM CORPORATION', + 0x00C0B4: u'MYSON TECHNOLOGY, INC.', + 0x00C0B5: u'CORPORATE NETWORK SYSTEMS,INC.', + 0x00C0B6: u'Adaptec, Inc.', + 0x00C0B7: u'AMERICAN POWER CONVERSION CORP', + 0x00C0B8: u'FRASER\'S HILL LTD.', + 0x00C0B9: u'FUNK SOFTWARE, INC.', + 0x00C0BA: u'NETVANTAGE', + 0x00C0BB: u'FORVAL CREATIVE, INC.', + 0x00C0BC: u'TELECOM AUSTRALIA/CSSC', + 0x00C0BD: u'INEX TECHNOLOGIES, INC.', + 0x00C0BE: u'ALCATEL - SEL', + 0x00C0BF: u'TECHNOLOGY CONCEPTS, LTD.', + 0x00C0C0: u'SHORE MICROSYSTEMS, INC.', + 0x00C0C1: u'QUAD/GRAPHICS, INC.', + 0x00C0C2: u'INFINITE NETWORKS LTD.', + 0x00C0C3: u'ACUSON COMPUTED SONOGRAPHY', + 0x00C0C4: u'COMPUTER OPERATIONAL', + 0x00C0C5: u'SID INFORMATICA', + 0x00C0C6: u'PERSONAL MEDIA CORP.', + 0x00C0C7: u'SPARKTRUM MICROSYSTEMS, INC.', + 0x00C0C8: u'MICRO BYTE PTY. LTD.', + 0x00C0C9: u'ELSAG BAILEY PROCESS', + 0x00C0CA: u'ALFA, INC.', + 0x00C0CB: u'CONTROL TECHNOLOGY CORPORATION', + 0x00C0CC: u'TELESCIENCES CO SYSTEMS, INC.', + 0x00C0CD: u'COMELTA, S.A.', + 0x00C0CE: u'CEI SYSTEMS & ENGINEERING PTE', + 0x00C0CF: u'IMATRAN VOIMA OY', + 0x00C0D0: u'RATOC SYSTEM INC.', + 0x00C0D1: u'COMTREE TECHNOLOGY CORPORATION', + 0x00C0D2: u'SYNTELLECT, INC.', + 0x00C0D3: u'OLYMPUS IMAGE SYSTEMS, INC.', + 0x00C0D4: u'AXON NETWORKS, INC.', + 0x00C0D5: u'QUANCOM ELECTRONIC GMBH', + 0x00C0D6: u'J1 SYSTEMS, INC.', + 0x00C0D7: u'TAIWAN TRADING CENTER DBA', + 0x00C0D8: u'UNIVERSAL DATA SYSTEMS', + 0x00C0D9: u'QUINTE NETWORK CONFIDENTIALITY', + 0x00C0DA: u'NICE SYSTEMS LTD.', + 0x00C0DB: u'IPC CORPORATION (PTE) LTD.', + 0x00C0DC: u'EOS TECHNOLOGIES, INC.', + 0x00C0DD: u'QLogic Corporation', + 0x00C0DE: u'ZCOMM, INC.', + 0x00C0DF: u'KYE Systems Corp.', + 0x00C0E0: u'DSC COMMUNICATION CORP.', + 0x00C0E1: u'SONIC SOLUTIONS', + 0x00C0E2: u'CALCOMP, INC.', + 0x00C0E3: u'OSITECH COMMUNICATIONS, INC.', + 0x00C0E4: u'SIEMENS BUILDING', + 0x00C0E5: u'GESPAC, S.A.', + 0x00C0E6: u'Verilink Corporation', + 0x00C0E7: u'FIBERDATA AB', + 0x00C0E8: u'PLEXCOM, INC.', + 0x00C0E9: u'OAK SOLUTIONS, LTD.', + 0x00C0EA: u'ARRAY TECHNOLOGY LTD.', + 0x00C0EB: u'SEH COMPUTERTECHNIK GMBH', + 0x00C0EC: u'DAUPHIN TECHNOLOGY', + 0x00C0ED: u'US ARMY ELECTRONIC', + 0x00C0EE: u'KYOCERA CORPORATION', + 0x00C0EF: u'ABIT CORPORATION', + 0x00C0F0: u'KINGSTON TECHNOLOGY CORP.', + 0x00C0F1: u'SHINKO ELECTRIC CO., LTD.', + 0x00C0F2: u'TRANSITION NETWORKS', + 0x00C0F3: u'NETWORK COMMUNICATIONS CORP.', + 0x00C0F4: u'INTERLINK SYSTEM CO., LTD.', + 0x00C0F5: u'METACOMP, INC.', + 0x00C0F6: u'CELAN TECHNOLOGY INC.', + 0x00C0F7: u'ENGAGE COMMUNICATION, INC.', + 0x00C0F8: u'ABOUT COMPUTING INC.', + 0x00C0F9: u'Motorola Embedded Computing Group', + 0x00C0FA: u'CANARY COMMUNICATIONS, INC.', + 0x00C0FB: u'ADVANCED TECHNOLOGY LABS', + 0x00C0FC: u'ELASTIC REALITY, INC.', + 0x00C0FD: u'PROSUM', + 0x00C0FE: u'APTEC COMPUTER SYSTEMS, INC.', + 0x00C0FF: u'DOT HILL SYSTEMS CORPORATION', + 0x00CBBD: u'Cambridge Broadband Ltd.', + 0x00CF1C: u'COMMUNICATION MACHINERY CORP.', + 0x00D000: u'FERRAN SCIENTIFIC, INC.', + 0x00D001: u'VST TECHNOLOGIES, INC.', + 0x00D002: u'DITECH CORPORATION', + 0x00D003: u'COMDA ENTERPRISES CORP.', + 0x00D004: u'PENTACOM LTD.', + 0x00D005: u'ZHS ZEITMANAGEMENTSYSTEME', + 0x00D006: u'CISCO SYSTEMS, INC.', + 0x00D007: u'MIC ASSOCIATES, INC.', + 0x00D008: u'MACTELL CORPORATION', + 0x00D009: u'HSING TECH. ENTERPRISE CO. LTD', + 0x00D00A: u'LANACCESS TELECOM S.A.', + 0x00D00B: u'RHK TECHNOLOGY, INC.', + 0x00D00C: u'SNIJDER MICRO SYSTEMS', + 0x00D00D: u'MICROMERITICS INSTRUMENT', + 0x00D00E: u'PLURIS, INC.', + 0x00D00F: u'SPEECH DESIGN GMBH', + 0x00D010: u'CONVERGENT NETWORKS, INC.', + 0x00D011: u'PRISM VIDEO, INC.', + 0x00D012: u'GATEWORKS CORP.', + 0x00D013: u'PRIMEX AEROSPACE COMPANY', + 0x00D014: u'ROOT, INC.', + 0x00D015: u'UNIVEX MICROTECHNOLOGY CORP.', + 0x00D016: u'SCM MICROSYSTEMS, INC.', + 0x00D017: u'SYNTECH INFORMATION CO., LTD.', + 0x00D018: u'QWES. COM, INC.', + 0x00D019: u'DAINIPPON SCREEN CORPORATE', + 0x00D01A: u'URMET TLC S.P.A.', + 0x00D01B: u'MIMAKI ENGINEERING CO., LTD.', + 0x00D01C: u'SBS TECHNOLOGIES,', + 0x00D01D: u'FURUNO ELECTRIC CO., LTD.', + 0x00D01E: u'PINGTEL CORP.', + 0x00D01F: u'CTAM PTY. LTD.', + 0x00D020: u'AIM SYSTEM, INC.', + 0x00D021: u'REGENT ELECTRONICS CORP.', + 0x00D022: u'INCREDIBLE TECHNOLOGIES, INC.', + 0x00D023: u'INFORTREND TECHNOLOGY, INC.', + 0x00D024: u'Cognex Corporation', + 0x00D025: u'XROSSTECH, INC.', + 0x00D026: u'HIRSCHMANN AUSTRIA GMBH', + 0x00D027: u'APPLIED AUTOMATION, INC.', + 0x00D028: u'OMNEON VIDEO NETWORKS', + 0x00D029: u'WAKEFERN FOOD CORPORATION', + 0x00D02A: u'Voxent Systems Ltd.', + 0x00D02B: u'JETCELL, INC.', + 0x00D02C: u'CAMPBELL SCIENTIFIC, INC.', + 0x00D02D: u'ADEMCO', + 0x00D02E: u'COMMUNICATION AUTOMATION CORP.', + 0x00D02F: u'VLSI TECHNOLOGY INC.', + 0x00D030: u'SAFETRAN SYSTEMS CORP.', + 0x00D031: u'INDUSTRIAL LOGIC CORPORATION', + 0x00D032: u'YANO ELECTRIC CO., LTD.', + 0x00D033: u'DALIAN DAXIAN NETWORK', + 0x00D034: u'ORMEC SYSTEMS CORP.', + 0x00D035: u'BEHAVIOR TECH. COMPUTER CORP.', + 0x00D036: u'TECHNOLOGY ATLANTA CORP.', + 0x00D037: u'PHILIPS-DVS-LO BDR', + 0x00D038: u'FIVEMERE, LTD.', + 0x00D039: u'UTILICOM, INC.', + 0x00D03A: u'ZONEWORX, INC.', + 0x00D03B: u'VISION PRODUCTS PTY. LTD.', + 0x00D03C: u'Vieo, Inc.', + 0x00D03D: u'GALILEO TECHNOLOGY, LTD.', + 0x00D03E: u'ROCKETCHIPS, INC.', + 0x00D03F: u'AMERICAN COMMUNICATION', + 0x00D040: u'SYSMATE CO., LTD.', + 0x00D041: u'AMIGO TECHNOLOGY CO., LTD.', + 0x00D042: u'MAHLO GMBH & CO. UG', + 0x00D043: u'ZONAL RETAIL DATA SYSTEMS', + 0x00D044: u'ALIDIAN NETWORKS, INC.', + 0x00D045: u'KVASER AB', + 0x00D046: u'DOLBY LABORATORIES, INC.', + 0x00D047: u'XN TECHNOLOGIES', + 0x00D048: u'ECTON, INC.', + 0x00D049: u'IMPRESSTEK CO., LTD.', + 0x00D04A: u'PRESENCE TECHNOLOGY GMBH', + 0x00D04B: u'LA CIE GROUP S.A.', + 0x00D04C: u'EUROTEL TELECOM LTD.', + 0x00D04D: u'DIV OF RESEARCH & STATISTICS', + 0x00D04E: u'LOGIBAG', + 0x00D04F: u'BITRONICS, INC.', + 0x00D050: u'ISKRATEL', + 0x00D051: u'O2 MICRO, INC.', + 0x00D052: u'ASCEND COMMUNICATIONS, INC.', + 0x00D053: u'CONNECTED SYSTEMS', + 0x00D054: u'SAS INSTITUTE INC.', + 0x00D055: u'KATHREIN-WERKE KG', + 0x00D056: u'SOMAT CORPORATION', + 0x00D057: u'ULTRAK, INC.', + 0x00D058: u'CISCO SYSTEMS, INC.', + 0x00D059: u'AMBIT MICROSYSTEMS CORP.', + 0x00D05A: u'SYMBIONICS, LTD.', + 0x00D05B: u'ACROLOOP MOTION CONTROL', + 0x00D05C: u'TECHNOTREND SYSTEMTECHNIK GMBH', + 0x00D05D: u'INTELLIWORXX, INC.', + 0x00D05E: u'STRATABEAM TECHNOLOGY, INC.', + 0x00D05F: u'VALCOM, INC.', + 0x00D060: u'PANASONIC EUROPEAN', + 0x00D061: u'TREMON ENTERPRISES CO., LTD.', + 0x00D062: u'DIGIGRAM', + 0x00D063: u'CISCO SYSTEMS, INC.', + 0x00D064: u'MULTITEL', + 0x00D065: u'TOKO ELECTRIC', + 0x00D066: u'WINTRISS ENGINEERING CORP.', + 0x00D067: u'CAMPIO COMMUNICATIONS', + 0x00D068: u'IWILL CORPORATION', + 0x00D069: u'TECHNOLOGIC SYSTEMS', + 0x00D06A: u'LINKUP SYSTEMS CORPORATION', + 0x00D06B: u'SR TELECOM INC.', + 0x00D06C: u'SHAREWAVE, INC.', + 0x00D06D: u'ACRISON, INC.', + 0x00D06E: u'TRENDVIEW RECORDERS LTD.', + 0x00D06F: u'KMC CONTROLS', + 0x00D070: u'LONG WELL ELECTRONICS CORP.', + 0x00D071: u'ECHELON CORP.', + 0x00D072: u'BROADLOGIC', + 0x00D073: u'ACN ADVANCED COMMUNICATIONS', + 0x00D074: u'TAQUA SYSTEMS, INC.', + 0x00D075: u'ALARIS MEDICAL SYSTEMS, INC.', + 0x00D076: u'Merrill Lynch & Co., Inc.', + 0x00D077: u'LUCENT TECHNOLOGIES', + 0x00D078: u'ELTEX OF SWEDEN AB', + 0x00D079: u'CISCO SYSTEMS, INC.', + 0x00D07A: u'AMAQUEST COMPUTER CORP.', + 0x00D07B: u'COMCAM INTERNATIONAL LTD.', + 0x00D07C: u'KOYO ELECTRONICS INC. CO.,LTD.', + 0x00D07D: u'COSINE COMMUNICATIONS', + 0x00D07E: u'KEYCORP LTD.', + 0x00D07F: u'STRATEGY & TECHNOLOGY, LIMITED', + 0x00D080: u'EXABYTE CORPORATION', + 0x00D081: u'REAL TIME DEVICES USA, INC.', + 0x00D082: u'IOWAVE INC.', + 0x00D083: u'INVERTEX, INC.', + 0x00D084: u'NEXCOMM SYSTEMS, INC.', + 0x00D085: u'OTIS ELEVATOR COMPANY', + 0x00D086: u'FOVEON, INC.', + 0x00D087: u'MICROFIRST INC.', + 0x00D088: u'Terayon Communications Systems', + 0x00D089: u'DYNACOLOR, INC.', + 0x00D08A: u'PHOTRON USA', + 0x00D08B: u'ADVA Limited', + 0x00D08C: u'GENOA TECHNOLOGY, INC.', + 0x00D08D: u'PHOENIX GROUP, INC.', + 0x00D08E: u'NVISION INC.', + 0x00D08F: u'ARDENT TECHNOLOGIES, INC.', + 0x00D090: u'CISCO SYSTEMS, INC.', + 0x00D091: u'SMARTSAN SYSTEMS, INC.', + 0x00D092: u'GLENAYRE WESTERN MULTIPLEX', + 0x00D093: u'TQ - COMPONENTS GMBH', + 0x00D094: u'TIMELINE VISTA, INC.', + 0x00D095: u'Alcatel North America ESD', + 0x00D096: u'3COM EUROPE LTD.', + 0x00D097: u'CISCO SYSTEMS, INC.', + 0x00D098: u'Photon Dynamics Canada Inc.', + 0x00D099: u'ELCARD OY', + 0x00D09A: u'FILANET CORPORATION', + 0x00D09B: u'SPECTEL LTD.', + 0x00D09C: u'KAPADIA COMMUNICATIONS', + 0x00D09D: u'VERIS INDUSTRIES', + 0x00D09E: u'2WIRE, INC.', + 0x00D09F: u'NOVTEK TEST SYSTEMS', + 0x00D0A0: u'MIPS DENMARK', + 0x00D0A1: u'OSKAR VIERLING GMBH + CO. KG', + 0x00D0A2: u'INTEGRATED DEVICE', + 0x00D0A3: u'VOCAL DATA, INC.', + 0x00D0A4: u'ALANTRO COMMUNICATIONS', + 0x00D0A5: u'AMERICAN ARIUM', + 0x00D0A6: u'LANBIRD TECHNOLOGY CO., LTD.', + 0x00D0A7: u'TOKYO SOKKI KENKYUJO CO., LTD.', + 0x00D0A8: u'NETWORK ENGINES, INC.', + 0x00D0A9: u'SHINANO KENSHI CO., LTD.', + 0x00D0AA: u'CHASE COMMUNICATIONS', + 0x00D0AB: u'DELTAKABEL TELECOM CV', + 0x00D0AC: u'GRAYSON WIRELESS', + 0x00D0AD: u'TL INDUSTRIES', + 0x00D0AE: u'ORESIS COMMUNICATIONS, INC.', + 0x00D0AF: u'CUTLER-HAMMER, INC.', + 0x00D0B0: u'BITSWITCH LTD.', + 0x00D0B1: u'OMEGA ELECTRONICS SA', + 0x00D0B2: u'XIOTECH CORPORATION', + 0x00D0B3: u'DRS FLIGHT SAFETY AND', + 0x00D0B4: u'KATSUJIMA CO., LTD.', + 0x00D0B5: u'IPricot formerly DotCom', + 0x00D0B6: u'CRESCENT NETWORKS, INC.', + 0x00D0B7: u'INTEL CORPORATION', + 0x00D0B8: u'Iomega Corporation', + 0x00D0B9: u'MICROTEK INTERNATIONAL, INC.', + 0x00D0BA: u'CISCO SYSTEMS, INC.', + 0x00D0BB: u'CISCO SYSTEMS, INC.', + 0x00D0BC: u'CISCO SYSTEMS, INC.', + 0x00D0BD: u'SICAN GMBH', + 0x00D0BE: u'EMUTEC INC.', + 0x00D0BF: u'PIVOTAL TECHNOLOGIES', + 0x00D0C0: u'CISCO SYSTEMS, INC.', + 0x00D0C1: u'HARMONIC DATA SYSTEMS, LTD.', + 0x00D0C2: u'BALTHAZAR TECHNOLOGY AB', + 0x00D0C3: u'VIVID TECHNOLOGY PTE, LTD.', + 0x00D0C4: u'TERATECH CORPORATION', + 0x00D0C5: u'COMPUTATIONAL SYSTEMS, INC.', + 0x00D0C6: u'THOMAS & BETTS CORP.', + 0x00D0C7: u'PATHWAY, INC.', + 0x00D0C8: u'I/O CONSULTING A/S', + 0x00D0C9: u'ADVANTECH CO., LTD.', + 0x00D0CA: u'INTRINSYC SOFTWARE INC.', + 0x00D0CB: u'DASAN CO., LTD.', + 0x00D0CC: u'TECHNOLOGIES LYRE INC.', + 0x00D0CD: u'ATAN TECHNOLOGY INC.', + 0x00D0CE: u'ASYST ELECTRONIC', + 0x00D0CF: u'MORETON BAY', + 0x00D0D0: u'ZHONGXING TELECOM LTD.', + 0x00D0D1: u'SIROCCO SYSTEMS, INC.', + 0x00D0D2: u'EPILOG CORPORATION', + 0x00D0D3: u'CISCO SYSTEMS, INC.', + 0x00D0D4: u'V-BITS, INC.', + 0x00D0D5: u'GRUNDIG AG', + 0x00D0D6: u'AETHRA TELECOMUNICAZIONI', + 0x00D0D7: u'B2C2, INC.', + 0x00D0D8: u'3Com Corporation', + 0x00D0D9: u'DEDICATED MICROCOMPUTERS', + 0x00D0DA: u'TAICOM DATA SYSTEMS CO., LTD.', + 0x00D0DB: u'MCQUAY INTERNATIONAL', + 0x00D0DC: u'MODULAR MINING SYSTEMS, INC.', + 0x00D0DD: u'SUNRISE TELECOM, INC.', + 0x00D0DE: u'PHILIPS MULTIMEDIA NETWORK', + 0x00D0DF: u'KUZUMI ELECTRONICS, INC.', + 0x00D0E0: u'DOOIN ELECTRONICS CO.', + 0x00D0E1: u'AVIONITEK ISRAEL INC.', + 0x00D0E2: u'MRT MICRO, INC.', + 0x00D0E3: u'ELE-CHEM ENGINEERING CO., LTD.', + 0x00D0E4: u'CISCO SYSTEMS, INC.', + 0x00D0E5: u'SOLIDUM SYSTEMS CORP.', + 0x00D0E6: u'IBOND INC.', + 0x00D0E7: u'VCON TELECOMMUNICATION LTD.', + 0x00D0E8: u'MAC SYSTEM CO., LTD.', + 0x00D0E9: u'ADVANTAGE CENTURY', + 0x00D0EA: u'NEXTONE COMMUNICATIONS, INC.', + 0x00D0EB: u'LIGHTERA NETWORKS, INC.', + 0x00D0EC: u'NAKAYO TELECOMMUNICATIONS, INC', + 0x00D0ED: u'XIOX', + 0x00D0EE: u'DICTAPHONE CORPORATION', + 0x00D0EF: u'IGT', + 0x00D0F0: u'CONVISION TECHNOLOGY GMBH', + 0x00D0F1: u'SEGA ENTERPRISES, LTD.', + 0x00D0F2: u'MONTEREY NETWORKS', + 0x00D0F3: u'SOLARI DI UDINE SPA', + 0x00D0F4: u'CARINTHIAN TECH INSTITUTE', + 0x00D0F5: u'ORANGE MICRO, INC.', + 0x00D0F6: u'Alcatel Canada', + 0x00D0F7: u'NEXT NETS CORPORATION', + 0x00D0F8: u'FUJIAN STAR TERMINAL', + 0x00D0F9: u'ACUTE COMMUNICATIONS CORP.', + 0x00D0FA: u'RACAL GUARDATA', + 0x00D0FB: u'TEK MICROSYSTEMS, INCORPORATED', + 0x00D0FC: u'GRANITE MICROSYSTEMS', + 0x00D0FD: u'OPTIMA TELE.COM, INC.', + 0x00D0FE: u'ASTRAL POINT', + 0x00D0FF: u'CISCO SYSTEMS, INC.', + 0x00DD00: u'UNGERMANN-BASS INC.', + 0x00DD01: u'UNGERMANN-BASS INC.', + 0x00DD02: u'UNGERMANN-BASS INC.', + 0x00DD03: u'UNGERMANN-BASS INC.', + 0x00DD04: u'UNGERMANN-BASS INC.', + 0x00DD05: u'UNGERMANN-BASS INC.', + 0x00DD06: u'UNGERMANN-BASS INC.', + 0x00DD07: u'UNGERMANN-BASS INC.', + 0x00DD08: u'UNGERMANN-BASS INC.', + 0x00DD09: u'UNGERMANN-BASS INC.', + 0x00DD0A: u'UNGERMANN-BASS INC.', + 0x00DD0B: u'UNGERMANN-BASS INC.', + 0x00DD0C: u'UNGERMANN-BASS INC.', + 0x00DD0D: u'UNGERMANN-BASS INC.', + 0x00DD0E: u'UNGERMANN-BASS INC.', + 0x00DD0F: u'UNGERMANN-BASS INC.', + 0x00E000: u'FUJITSU, LTD', + 0x00E001: u'STRAND LIGHTING LIMITED', + 0x00E002: u'CROSSROADS SYSTEMS, INC.', + 0x00E003: u'NOKIA WIRELESS BUSINESS COMMUN', + 0x00E004: u'PMC-SIERRA, INC.', + 0x00E005: u'TECHNICAL CORP.', + 0x00E006: u'SILICON INTEGRATED SYS. CORP.', + 0x00E007: u'NETWORK ALCHEMY LTD.', + 0x00E008: u'AMAZING CONTROLS! INC.', + 0x00E009: u'MARATHON TECHNOLOGIES CORP.', + 0x00E00A: u'DIBA, INC.', + 0x00E00B: u'ROOFTOP COMMUNICATIONS CORP.', + 0x00E00C: u'MOTOROLA', + 0x00E00D: u'RADIANT SYSTEMS', + 0x00E00E: u'AVALON IMAGING SYSTEMS, INC.', + 0x00E00F: u'SHANGHAI BAUD DATA', + 0x00E010: u'HESS SB-AUTOMATENBAU GmbH', + 0x00E011: u'UNIDEN SAN DIEGO R&D CENTER, INC.', + 0x00E012: u'PLUTO TECHNOLOGIES INTERNATIONAL INC.', + 0x00E013: u'EASTERN ELECTRONIC CO., LTD.', + 0x00E014: u'CISCO SYSTEMS, INC.', + 0x00E015: u'HEIWA CORPORATION', + 0x00E016: u'RAPID CITY COMMUNICATIONS', + 0x00E017: u'EXXACT GmbH', + 0x00E018: u'ASUSTEK COMPUTER INC.', + 0x00E019: u'ING. GIORDANO ELETTRONICA', + 0x00E01A: u'COMTEC SYSTEMS. CO., LTD.', + 0x00E01B: u'SPHERE COMMUNICATIONS, INC.', + 0x00E01C: u'MOBILITY ELECTRONICSY', + 0x00E01D: u'WebTV NETWORKS, INC.', + 0x00E01E: u'CISCO SYSTEMS, INC.', + 0x00E01F: u'AVIDIA Systems, Inc.', + 0x00E020: u'TECNOMEN OY', + 0x00E021: u'FREEGATE CORP.', + 0x00E022: u'Analog Devices Inc.', + 0x00E023: u'TELRAD', + 0x00E024: u'GADZOOX NETWORKS', + 0x00E025: u'dit CO., LTD.', + 0x00E026: u'Redlake MASD LLC', + 0x00E027: u'DUX, INC.', + 0x00E028: u'APTIX CORPORATION', + 0x00E029: u'STANDARD MICROSYSTEMS CORP.', + 0x00E02A: u'TANDBERG TELEVISION AS', + 0x00E02B: u'EXTREME NETWORKS', + 0x00E02C: u'AST COMPUTER', + 0x00E02D: u'InnoMediaLogic, Inc.', + 0x00E02E: u'SPC ELECTRONICS CORPORATION', + 0x00E02F: u'MCNS HOLDINGS, L.P.', + 0x00E030: u'MELITA INTERNATIONAL CORP.', + 0x00E031: u'HAGIWARA ELECTRIC CO., LTD.', + 0x00E032: u'MISYS FINANCIAL SYSTEMS, LTD.', + 0x00E033: u'E.E.P.D. GmbH', + 0x00E034: u'CISCO SYSTEMS, INC.', + 0x00E035: u'LOUGHBOROUGH SOUND IMAGES, PLC', + 0x00E036: u'PIONEER CORPORATION', + 0x00E037: u'CENTURY CORPORATION', + 0x00E038: u'PROXIMA CORPORATION', + 0x00E039: u'PARADYNE CORP.', + 0x00E03A: u'CABLETRON SYSTEMS, INC.', + 0x00E03B: u'PROMINET CORPORATION', + 0x00E03C: u'AdvanSys', + 0x00E03D: u'FOCON ELECTRONIC SYSTEMS A/S', + 0x00E03E: u'ALFATECH, INC.', + 0x00E03F: u'JATON CORPORATION', + 0x00E040: u'DeskStation Technology, Inc.', + 0x00E041: u'CSPI', + 0x00E042: u'Pacom Systems Ltd.', + 0x00E043: u'VitalCom', + 0x00E044: u'LSICS CORPORATION', + 0x00E045: u'TOUCHWAVE, INC.', + 0x00E046: u'BENTLY NEVADA CORP.', + 0x00E047: u'INFOCUS SYSTEMS', + 0x00E048: u'SDL COMMUNICATIONS, INC.', + 0x00E049: u'MICROWI ELECTRONIC GmbH', + 0x00E04A: u'ENHANCED MESSAGING SYSTEMS, INC', + 0x00E04B: u'JUMP INDUSTRIELLE COMPUTERTECHNIK GmbH', + 0x00E04C: u'REALTEK SEMICONDUCTOR CORP.', + 0x00E04D: u'INTERNET INITIATIVE JAPAN, INC', + 0x00E04E: u'SANYO DENKI CO., LTD.', + 0x00E04F: u'CISCO SYSTEMS, INC.', + 0x00E050: u'EXECUTONE INFORMATION SYSTEMS, INC.', + 0x00E051: u'TALX CORPORATION', + 0x00E052: u'FOUNDRY NETWORKS, INC.', + 0x00E053: u'CELLPORT LABS, INC.', + 0x00E054: u'KODAI HITEC CO., LTD.', + 0x00E055: u'INGENIERIA ELECTRONICA COMERCIAL INELCOM S.A.', + 0x00E056: u'HOLONTECH CORPORATION', + 0x00E057: u'HAN MICROTELECOM. CO., LTD.', + 0x00E058: u'PHASE ONE DENMARK A/S', + 0x00E059: u'CONTROLLED ENVIRONMENTS, LTD.', + 0x00E05A: u'GALEA NETWORK SECURITY', + 0x00E05B: u'WEST END SYSTEMS CORP.', + 0x00E05C: u'MATSUSHITA KOTOBUKI ELECTRONICS INDUSTRIES, LTD.', + 0x00E05D: u'UNITEC CO., LTD.', + 0x00E05E: u'JAPAN AVIATION ELECTRONICS INDUSTRY, LTD.', + 0x00E05F: u'e-Net, Inc.', + 0x00E060: u'SHERWOOD', + 0x00E061: u'EdgePoint Networks, Inc.', + 0x00E062: u'HOST ENGINEERING', + 0x00E063: u'CABLETRON - YAGO SYSTEMS, INC.', + 0x00E064: u'SAMSUNG ELECTRONICS', + 0x00E065: u'OPTICAL ACCESS INTERNATIONAL', + 0x00E066: u'ProMax Systems, Inc.', + 0x00E067: u'eac AUTOMATION-CONSULTING GmbH', + 0x00E068: u'MERRIMAC SYSTEMS INC.', + 0x00E069: u'JAYCOR', + 0x00E06A: u'KAPSCH AG', + 0x00E06B: u'W&G SPECIAL PRODUCTS', + 0x00E06C: u'AEP Systems International Ltd', + 0x00E06D: u'COMPUWARE CORPORATION', + 0x00E06E: u'FAR SYSTEMS S.p.A.', + 0x00E06F: u'Terayon Communications Systems', + 0x00E070: u'DH TECHNOLOGY', + 0x00E071: u'EPIS MICROCOMPUTER', + 0x00E072: u'LYNK', + 0x00E073: u'NATIONAL AMUSEMENT NETWORK, INC.', + 0x00E074: u'TIERNAN COMMUNICATIONS, INC.', + 0x00E075: u'Verilink Corporation', + 0x00E076: u'DEVELOPMENT CONCEPTS, INC.', + 0x00E077: u'WEBGEAR, INC.', + 0x00E078: u'BERKELEY NETWORKS', + 0x00E079: u'A.T.N.R.', + 0x00E07A: u'MIKRODIDAKT AB', + 0x00E07B: u'BAY NETWORKS', + 0x00E07C: u'METTLER-TOLEDO, INC.', + 0x00E07D: u'NETRONIX, INC.', + 0x00E07E: u'WALT DISNEY IMAGINEERING', + 0x00E07F: u'LOGISTISTEM s.r.l.', + 0x00E080: u'CONTROL RESOURCES CORPORATION', + 0x00E081: u'TYAN COMPUTER CORP.', + 0x00E082: u'ANERMA', + 0x00E083: u'JATO TECHNOLOGIES, INC.', + 0x00E084: u'COMPULITE R&D', + 0x00E085: u'GLOBAL MAINTECH, INC.', + 0x00E086: u'CYBEX COMPUTER PRODUCTS', + 0x00E087: u'LeCroy - Networking Productions Division', + 0x00E088: u'LTX CORPORATION', + 0x00E089: u'ION Networks, Inc.', + 0x00E08A: u'GEC AVERY, LTD.', + 0x00E08B: u'QLogic Corp.', + 0x00E08C: u'NEOPARADIGM LABS, INC.', + 0x00E08D: u'PRESSURE SYSTEMS, INC.', + 0x00E08E: u'UTSTARCOM', + 0x00E08F: u'CISCO SYSTEMS, INC.', + 0x00E090: u'BECKMAN LAB. AUTOMATION DIV.', + 0x00E091: u'LG ELECTRONICS, INC.', + 0x00E092: u'ADMTEK INCORPORATED', + 0x00E093: u'ACKFIN NETWORKS', + 0x00E094: u'OSAI SRL', + 0x00E095: u'ADVANCED-VISION TECHNOLGIES CORP.', + 0x00E096: u'SHIMADZU CORPORATION', + 0x00E097: u'CARRIER ACCESS CORPORATION', + 0x00E098: u'AboCom Systems, Inc.', + 0x00E099: u'SAMSON AG', + 0x00E09A: u'POSITRON INDUSTRIES, INC.', + 0x00E09B: u'ENGAGE NETWORKS, INC.', + 0x00E09C: u'MII', + 0x00E09D: u'SARNOFF CORPORATION', + 0x00E09E: u'QUANTUM CORPORATION', + 0x00E09F: u'PIXEL VISION', + 0x00E0A0: u'WILTRON CO.', + 0x00E0A1: u'HIMA PAUL HILDEBRANDT GmbH Co. KG', + 0x00E0A2: u'MICROSLATE INC.', + 0x00E0A3: u'CISCO SYSTEMS, INC.', + 0x00E0A4: u'ESAOTE S.p.A.', + 0x00E0A5: u'ComCore Semiconductor, Inc.', + 0x00E0A6: u'TELOGY NETWORKS, INC.', + 0x00E0A7: u'IPC INFORMATION SYSTEMS, INC.', + 0x00E0A8: u'SAT GmbH & Co.', + 0x00E0A9: u'FUNAI ELECTRIC CO., LTD.', + 0x00E0AA: u'ELECTROSONIC LTD.', + 0x00E0AB: u'DIMAT S.A.', + 0x00E0AC: u'MIDSCO, INC.', + 0x00E0AD: u'EES TECHNOLOGY, LTD.', + 0x00E0AE: u'XAQTI CORPORATION', + 0x00E0AF: u'GENERAL DYNAMICS INFORMATION SYSTEMS', + 0x00E0B0: u'CISCO SYSTEMS, INC.', + 0x00E0B1: u'Alcatel North America ESD', + 0x00E0B2: u'TELMAX COMMUNICATIONS CORP.', + 0x00E0B3: u'EtherWAN Systems, Inc.', + 0x00E0B4: u'TECHNO SCOPE CO., LTD.', + 0x00E0B5: u'ARDENT COMMUNICATIONS CORP.', + 0x00E0B6: u'Entrada Networks', + 0x00E0B7: u'PI GROUP, LTD.', + 0x00E0B8: u'GATEWAY 2000', + 0x00E0B9: u'BYAS SYSTEMS', + 0x00E0BA: u'BERGHOF AUTOMATIONSTECHNIK GmbH', + 0x00E0BB: u'NBX CORPORATION', + 0x00E0BC: u'SYMON COMMUNICATIONS, INC.', + 0x00E0BD: u'INTERFACE SYSTEMS, INC.', + 0x00E0BE: u'GENROCO INTERNATIONAL, INC.', + 0x00E0BF: u'TORRENT NETWORKING TECHNOLOGIES CORP.', + 0x00E0C0: u'SEIWA ELECTRIC MFG. CO., LTD.', + 0x00E0C1: u'MEMOREX TELEX JAPAN, LTD.', + 0x00E0C2: u'NECSY S.p.A.', + 0x00E0C3: u'SAKAI SYSTEM DEVELOPMENT CORP.', + 0x00E0C4: u'HORNER ELECTRIC, INC.', + 0x00E0C5: u'BCOM ELECTRONICS INC.', + 0x00E0C6: u'LINK2IT, L.L.C.', + 0x00E0C7: u'EUROTECH SRL', + 0x00E0C8: u'VIRTUAL ACCESS, LTD.', + 0x00E0C9: u'AutomatedLogic Corporation', + 0x00E0CA: u'BEST DATA PRODUCTS', + 0x00E0CB: u'RESON, INC.', + 0x00E0CC: u'HERO SYSTEMS, LTD.', + 0x00E0CD: u'SENSIS CORPORATION', + 0x00E0CE: u'ARN', + 0x00E0CF: u'INTEGRATED DEVICE TECHNOLOGY, INC.', + 0x00E0D0: u'NETSPEED, INC.', + 0x00E0D1: u'TELSIS LIMITED', + 0x00E0D2: u'VERSANET COMMUNICATIONS, INC.', + 0x00E0D3: u'DATENTECHNIK GmbH', + 0x00E0D4: u'EXCELLENT COMPUTER', + 0x00E0D5: u'ARCXEL TECHNOLOGIES, INC.', + 0x00E0D6: u'COMPUTER & COMMUNICATION RESEARCH LAB.', + 0x00E0D7: u'SUNSHINE ELECTRONICS, INC.', + 0x00E0D8: u'LANBit Computer, Inc.', + 0x00E0D9: u'TAZMO CO., LTD.', + 0x00E0DA: u'Alcatel North America ESD', + 0x00E0DB: u'ViaVideo Communications, Inc.', + 0x00E0DC: u'NEXWARE CORP.', + 0x00E0DD: u'ZENITH ELECTRONICS CORPORATION', + 0x00E0DE: u'DATAX NV', + 0x00E0DF: u'KE KOMMUNIKATIONS-ELECTRONIK', + 0x00E0E0: u'SI ELECTRONICS, LTD.', + 0x00E0E1: u'G2 NETWORKS, INC.', + 0x00E0E2: u'INNOVA CORP.', + 0x00E0E3: u'SK-ELEKTRONIK GmbH', + 0x00E0E4: u'FANUC ROBOTICS NORTH AMERICA, Inc.', + 0x00E0E5: u'CINCO NETWORKS, INC.', + 0x00E0E6: u'INCAA DATACOM B.V.', + 0x00E0E7: u'RAYTHEON E-SYSTEMS, INC.', + 0x00E0E8: u'GRETACODER Data Systems AG', + 0x00E0E9: u'DATA LABS, INC.', + 0x00E0EA: u'INNOVAT COMMUNICATIONS, INC.', + 0x00E0EB: u'DIGICOM SYSTEMS, INCORPORATED', + 0x00E0EC: u'CELESTICA INC.', + 0x00E0ED: u'SILICOM, LTD.', + 0x00E0EE: u'MAREL HF', + 0x00E0EF: u'DIONEX', + 0x00E0F0: u'ABLER TECHNOLOGY, INC.', + 0x00E0F1: u'THAT CORPORATION', + 0x00E0F2: u'ARLOTTO COMNET, INC.', + 0x00E0F3: u'WebSprint Communications, Inc.', + 0x00E0F4: u'INSIDE Technology A/S', + 0x00E0F5: u'TELES AG', + 0x00E0F6: u'DECISION EUROPE', + 0x00E0F7: u'CISCO SYSTEMS, INC.', + 0x00E0F8: u'DICNA CONTROL AB', + 0x00E0F9: u'CISCO SYSTEMS, INC.', + 0x00E0FA: u'TRL TECHNOLOGY, LTD.', + 0x00E0FB: u'LEIGHTRONIX, INC.', + 0x00E0FC: u'HUAWEI TECHNOLOGIES CO., LTD.', + 0x00E0FD: u'A-TREND TECHNOLOGY CO., LTD.', + 0x00E0FE: u'CISCO SYSTEMS, INC.', + 0x00E0FF: u'SECURITY DYNAMICS TECHNOLOGIES, Inc.', + 0x00E6D3: u'NIXDORF COMPUTER CORP.', + 0x020701: u'RACAL-DATACOM', + 0x021C7C: u'PERQ SYSTEMS CORPORATION', + 0x026086: u'LOGIC REPLACEMENT TECH. LTD.', + 0x02608C: u'3COM CORPORATION', + 0x027001: u'RACAL-DATACOM', + 0x0270B0: u'M/A-COM INC. COMPANIES', + 0x0270B3: u'DATA RECALL LTD', + 0x029D8E: u'CARDIAC RECORDERS INC.', + 0x02AA3C: u'OLIVETTI TELECOMM SPA (OLTECO)', + 0x02BB01: u'OCTOTHORPE CORP.', + 0x02C08C: u'3COM CORPORATION', + 0x02CF1C: u'COMMUNICATION MACHINERY CORP.', + 0x02E6D3: u'NIXDORF COMPUTER CORPORATION', + 0x040AE0: u'XMIT AG COMPUTER NETWORKS', + 0x04E0C4: u'TRIUMPH-ADLER AG', + 0x080001: u'COMPUTERVISION CORPORATION', + 0x080002: u'BRIDGE COMMUNICATIONS INC.', + 0x080003: u'ADVANCED COMPUTER COMM.', + 0x080004: u'CROMEMCO INCORPORATED', + 0x080005: u'SYMBOLICS INC.', + 0x080006: u'SIEMENS AG', + 0x080007: u'APPLE COMPUTER INC.', + 0x080008: u'BOLT BERANEK AND NEWMAN INC.', + 0x080009: u'HEWLETT PACKARD', + 0x08000A: u'NESTAR SYSTEMS INCORPORATED', + 0x08000B: u'UNISYS CORPORATION', + 0x08000C: u'MIKLYN DEVELOPMENT CO.', + 0x08000D: u'INTERNATIONAL COMPUTERS LTD.', + 0x08000E: u'NCR CORPORATION', + 0x08000F: u'MITEL CORPORATION', + 0x080011: u'TEKTRONIX INC.', + 0x080012: u'BELL ATLANTIC INTEGRATED SYST.', + 0x080013: u'EXXON', + 0x080014: u'EXCELAN', + 0x080015: u'STC BUSINESS SYSTEMS', + 0x080016: u'BARRISTER INFO SYS CORP', + 0x080017: u'NATIONAL SEMICONDUCTOR', + 0x080018: u'PIRELLI FOCOM NETWORKS', + 0x080019: u'GENERAL ELECTRIC CORPORATION', + 0x08001A: u'TIARA/ 10NET', + 0x08001B: u'DATA GENERAL', + 0x08001C: u'KDD-KOKUSAI DEBNSIN DENWA CO.', + 0x08001D: u'ABLE COMMUNICATIONS INC.', + 0x08001E: u'APOLLO COMPUTER INC.', + 0x08001F: u'SHARP CORPORATION', + 0x080020: u'SUN MICROSYSTEMS INC.', + 0x080021: u'3M COMPANY', + 0x080022: u'NBI INC.', + 0x080023: u'Panasonic Communications Co., Ltd.', + 0x080024: u'10NET COMMUNICATIONS/DCA', + 0x080025: u'CONTROL DATA', + 0x080026: u'NORSK DATA A.S.', + 0x080027: u'CADMUS COMPUTER SYSTEMS', + 0x080028: u'Texas Instruments', + 0x080029: u'MEGATEK CORPORATION', + 0x08002A: u'MOSAIC TECHNOLOGIES INC.', + 0x08002B: u'DIGITAL EQUIPMENT CORPORATION', + 0x08002C: u'BRITTON LEE INC.', + 0x08002D: u'LAN-TEC INC.', + 0x08002E: u'METAPHOR COMPUTER SYSTEMS', + 0x08002F: u'PRIME COMPUTER INC.', + 0x080030: u'NETWORK RESEARCH CORPORATION', + 0x080030: u'CERN', + 0x080030: u'ROYAL MELBOURNE INST OF TECH', + 0x080031: u'LITTLE MACHINES INC.', + 0x080032: u'TIGAN INCORPORATED', + 0x080033: u'BAUSCH & LOMB', + 0x080034: u'FILENET CORPORATION', + 0x080035: u'MICROFIVE CORPORATION', + 0x080036: u'INTERGRAPH CORPORATION', + 0x080037: u'FUJI-XEROX CO. LTD.', + 0x080038: u'CII HONEYWELL BULL', + 0x080039: u'SPIDER SYSTEMS LIMITED', + 0x08003A: u'ORCATECH INC.', + 0x08003B: u'TORUS SYSTEMS LIMITED', + 0x08003C: u'SCHLUMBERGER WELL SERVICES', + 0x08003D: u'CADNETIX CORPORATIONS', + 0x08003E: u'CODEX CORPORATION', + 0x08003F: u'FRED KOSCHARA ENTERPRISES', + 0x080040: u'FERRANTI COMPUTER SYS. LIMITED', + 0x080041: u'RACAL-MILGO INFORMATION SYS..', + 0x080042: u'JAPAN MACNICS CORP.', + 0x080043: u'PIXEL COMPUTER INC.', + 0x080044: u'DAVID SYSTEMS INC.', + 0x080045: u'CONCURRENT COMPUTER CORP.', + 0x080046: u'SONY CORPORATION LTD.', + 0x080047: u'SEQUENT COMPUTER SYSTEMS INC.', + 0x080048: u'EUROTHERM GAUGING SYSTEMS', + 0x080049: u'UNIVATION', + 0x08004A: u'BANYAN SYSTEMS INC.', + 0x08004B: u'PLANNING RESEARCH CORP.', + 0x08004C: u'HYDRA COMPUTER SYSTEMS INC.', + 0x08004D: u'CORVUS SYSTEMS INC.', + 0x08004E: u'3COM EUROPE LTD.', + 0x08004F: u'CYGNET SYSTEMS', + 0x080050: u'DAISY SYSTEMS CORP.', + 0x080051: u'EXPERDATA', + 0x080052: u'INSYSTEC', + 0x080053: u'MIDDLE EAST TECH. UNIVERSITY', + 0x080055: u'STANFORD TELECOMM. INC.', + 0x080056: u'STANFORD LINEAR ACCEL. CENTER', + 0x080057: u'EVANS & SUTHERLAND', + 0x080058: u'SYSTEMS CONCEPTS', + 0x080059: u'A/S MYCRON', + 0x08005A: u'IBM CORPORATION', + 0x08005B: u'VTA TECHNOLOGIES INC.', + 0x08005C: u'FOUR PHASE SYSTEMS', + 0x08005D: u'GOULD INC.', + 0x08005E: u'COUNTERPOINT COMPUTER INC.', + 0x08005F: u'SABER TECHNOLOGY CORP.', + 0x080060: u'INDUSTRIAL NETWORKING INC.', + 0x080061: u'JAROGATE LTD.', + 0x080062: u'GENERAL DYNAMICS', + 0x080063: u'PLESSEY', + 0x080064: u'AUTOPHON AG', + 0x080065: u'GENRAD INC.', + 0x080066: u'AGFA CORPORATION', + 0x080067: u'COMDESIGN', + 0x080068: u'RIDGE COMPUTERS', + 0x080069: u'SILICON GRAPHICS INC.', + 0x08006A: u'ATT BELL LABORATORIES', + 0x08006B: u'ACCEL TECHNOLOGIES INC.', + 0x08006C: u'SUNTEK TECHNOLOGY INT\'L', + 0x08006D: u'WHITECHAPEL COMPUTER WORKS', + 0x08006E: u'MASSCOMP', + 0x08006F: u'PHILIPS APELDOORN B.V.', + 0x080070: u'MITSUBISHI ELECTRIC CORP.', + 0x080071: u'MATRA (DSIE)', + 0x080072: u'XEROX CORP UNIV GRANT PROGRAM', + 0x080073: u'TECMAR INC.', + 0x080074: u'CASIO COMPUTER CO. LTD.', + 0x080075: u'DANSK DATA ELECTRONIK', + 0x080076: u'PC LAN TECHNOLOGIES', + 0x080077: u'TSL COMMUNICATIONS LTD.', + 0x080078: u'ACCELL CORPORATION', + 0x080079: u'THE DROID WORKS', + 0x08007A: u'INDATA', + 0x08007B: u'SANYO ELECTRIC CO. LTD.', + 0x08007C: u'VITALINK COMMUNICATIONS CORP.', + 0x08007E: u'AMALGAMATED WIRELESS(AUS) LTD', + 0x08007F: u'CARNEGIE-MELLON UNIVERSITY', + 0x080080: u'AES DATA INC.', + 0x080081: u'ASTECH INC.', + 0x080082: u'VERITAS SOFTWARE', + 0x080083: u'Seiko Instruments Inc.', + 0x080084: u'TOMEN ELECTRONICS CORP.', + 0x080085: u'ELXSI', + 0x080086: u'KONICA MINOLTA HOLDINGS, INC.', + 0x080087: u'XYPLEX', + 0x080088: u'MCDATA CORPORATION', + 0x080089: u'KINETICS', + 0x08008A: u'PERFORMANCE TECHNOLOGY', + 0x08008B: u'PYRAMID TECHNOLOGY CORP.', + 0x08008C: u'NETWORK RESEARCH CORPORATION', + 0x08008D: u'XYVISION INC.', + 0x08008E: u'TANDEM COMPUTERS', + 0x08008F: u'CHIPCOM CORPORATION', + 0x080090: u'SONOMA SYSTEMS', + 0x081443: u'UNIBRAIN S.A.', + 0x08BBCC: u'AK-NORD EDV VERTRIEBSGES. mbH', + 0x100000: u'PRIVATE', + 0x10005A: u'IBM CORPORATION', + 0x1000E8: u'NATIONAL SEMICONDUCTOR', + 0x1100AA: u'PRIVATE', + 0x800010: u'ATT BELL LABORATORIES', + 0xA06A00: u'Verilink Corporation', + 0xAA0000: u'DIGITAL EQUIPMENT CORPORATION', + 0xAA0001: u'DIGITAL EQUIPMENT CORPORATION', + 0xAA0002: u'DIGITAL EQUIPMENT CORPORATION', + 0xAA0003: u'DIGITAL EQUIPMENT CORPORATION', + 0xAA0004: u'DIGITAL EQUIPMENT CORPORATION', + 0xACDE48: u'PRIVATE', +} + diff --git a/libs/hachoir_parser/network/tcpdump.py b/libs/hachoir_parser/network/tcpdump.py new file mode 100644 index 0000000..564e618 --- /dev/null +++ b/libs/hachoir_parser/network/tcpdump.py @@ -0,0 +1,505 @@ +""" +Tcpdump parser + +Source: + * libpcap source code (file savefile.c) + * RFC 791 (IPv4) + * RFC 792 (ICMP) + * RFC 793 (TCP) + * RFC 1122 (Requirements for Internet Hosts) + +Author: Victor Stinner +Creation: 23 march 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + Enum, Bytes, NullBytes, RawBytes, + UInt8, UInt16, UInt32, Int32, TimestampUnix32, + Bit, Bits, NullBits) +from hachoir_core.endian import NETWORK_ENDIAN, LITTLE_ENDIAN +from hachoir_core.tools import humanDuration +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import createDict +from hachoir_parser.network.common import MAC48_Address, IPv4_Address, IPv6_Address + +def diff(field): + return humanDuration(field.value*1000) + +class Layer(FieldSet): + endian = NETWORK_ENDIAN + def parseNext(self, parent): + return None + +class ARP(Layer): + opcode_name = { + 1: "request", + 2: "reply" + } + endian = NETWORK_ENDIAN + + def createFields(self): + yield UInt16(self, "hw_type") + yield UInt16(self, "proto_type") + yield UInt8(self, "hw_size") + yield UInt8(self, "proto_size") + yield Enum(UInt16(self, "opcode"), ARP.opcode_name) + yield MAC48_Address(self, "src_mac") + yield IPv4_Address(self, "src_ip") + yield MAC48_Address(self, "dst_mac") + yield IPv4_Address(self, "dst_ip") + + def createDescription(self): + desc = "ARP: %s" % self["opcode"].display + opcode = self["opcode"].value + src_ip = self["src_ip"].display + dst_ip = self["dst_ip"].display + if opcode == 1: + desc += ", %s ask %s" % (dst_ip, src_ip) + elif opcode == 2: + desc += " from %s" % src_ip + return desc + +class TCP_Option(FieldSet): + NOP = 1 + MAX_SEGMENT = 2 + WINDOW_SCALE = 3 + SACK = 4 + TIMESTAMP = 8 + + code_name = { + NOP: "NOP", + MAX_SEGMENT: "Max segment size", + WINDOW_SCALE: "Window scale", + SACK: "SACK permitted", + TIMESTAMP: "Timestamp" + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + if self["code"].value != self.NOP: + self._size = self["length"].value * 8 + else: + self._size = 8 + + def createFields(self): + yield Enum(UInt8(self, "code", "Code"), self.code_name) + code = self["code"].value + if code == self.NOP: + return + yield UInt8(self, "length", "Option size in bytes") + if code == self.MAX_SEGMENT: + yield UInt16(self, "max_seg", "Maximum segment size") + elif code == self.WINDOW_SCALE: + yield UInt8(self, "win_scale", "Window scale") + elif code == self.TIMESTAMP: + yield UInt32(self, "ts_val", "Timestamp value") + yield UInt32(self, "ts_ecr", "Timestamp echo reply") + else: + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "data", size) + + def createDescription(self): + return "TCP option: %s" % self["code"].display + +class TCP(Layer): + port_name = { + 13: "daytime", + 20: "ftp data", + 21: "ftp", + 23: "telnet", + 25: "smtp", + 53: "dns", + 63: "dhcp/bootp", + 80: "HTTP", + 110: "pop3", + 119: "nntp", + 123: "ntp", + 139: "netbios session service", + 1863: "MSNMS", + 6667: "IRC" + } + + def createFields(self): + yield Enum(UInt16(self, "src"), self.port_name) + yield Enum(UInt16(self, "dst"), self.port_name) + yield UInt32(self, "seq_num") + yield UInt32(self, "ack_num") + + yield Bits(self, "hdrlen", 6, "Header lenght") + yield NullBits(self, "reserved", 2, "Reserved") + + yield Bit(self, "cgst", "Congestion Window Reduced") + yield Bit(self, "ecn-echo", "ECN-echo") + yield Bit(self, "urg", "Urgent") + yield Bit(self, "ack", "Acknowledge") + yield Bit(self, "psh", "Push mmode") + yield Bit(self, "rst", "Reset connection") + yield Bit(self, "syn", "Synchronize") + yield Bit(self, "fin", "Stop the connection") + + yield UInt16(self, "winsize", "Windows size") + yield textHandler(UInt16(self, "checksum"), hexadecimal) + yield UInt16(self, "urgent") + + size = self["hdrlen"].value*8 - self.current_size + while 0 < size: + option = TCP_Option(self, "option[]") + yield option + size -= option.size + + def parseNext(self, parent): + return None + + def createDescription(self): + src = self["src"].value + dst = self["dst"].value + if src < 32768: + src = self["src"].display + else: + src = None + if dst < 32768: + dst = self["dst"].display + else: + dst = None + desc = "TCP" + if src != None and dst != None: + desc += " (%s->%s)" % (src, dst) + elif src != None: + desc += " (%s->)" % (src) + elif dst != None: + desc += " (->%s)" % (dst) + + # Get flags + flags = [] + if self["syn"].value: + flags.append("SYN") + if self["ack"].value: + flags.append("ACK") + if self["fin"].value: + flags.append("FIN") + if self["rst"].value: + flags.append("RST") + if flags: + desc += " [%s]" % (",".join(flags)) + return desc + +class UDP(Layer): + port_name = { + 12: "daytime", + 22: "ssh", + 53: "DNS", + 67: "dhcp/bootp", + 80: "http", + 110: "pop3", + 123: "ntp", + 137: "netbios name service", + 138: "netbios datagram service" + } + + def createFields(self): + yield Enum(UInt16(self, "src"), UDP.port_name) + yield Enum(UInt16(self, "dst"), UDP.port_name) + yield UInt16(self, "length") + yield textHandler(UInt16(self, "checksum"), hexadecimal) + + def createDescription(self): + return "UDP (%s->%s)" % (self["src"].display, self["dst"].display) + +class ICMP(Layer): + REJECT = 3 + PONG = 0 + PING = 8 + type_desc = { + PONG: "Pong", + REJECT: "Reject", + PING: "Ping" + } + reject_reason = { + 0: "net unreachable", + 1: "host unreachable", + 2: "protocol unreachable", + 3: "port unreachable", + 4: "fragmentation needed and DF set", + 5: "source route failed", + 6: "Destination network unknown error", + 7: "Destination host unknown error", + 8: "Source host isolated error", + 9: "Destination network administratively prohibited", + 10: "Destination host administratively prohibited", + 11: "Unreachable network for Type Of Service", + 12: "Unreachable host for Type Of Service.", + 13: "Communication administratively prohibited", + 14: "Host precedence violation", + 15: "Precedence cutoff in effect" + } + + def createFields(self): + # Type + yield Enum(UInt8(self, "type"), self.type_desc) + type = self["type"].value + + # Code + field = UInt8(self, "code") + if type == 3: + field = Enum(field, self.reject_reason) + yield field + + # Options + yield textHandler(UInt16(self, "checksum"), hexadecimal) + if type in (self.PING, self.PONG): # and self["code"].value == 0: + yield UInt16(self, "id") + yield UInt16(self, "seq_num") + # follow: ping data + elif type == self.REJECT: + yield NullBytes(self, "empty", 2) + yield UInt16(self, "hop_mtu", "Next-Hop MTU") + + def createDescription(self): + type = self["type"].value + if type in (self.PING, self.PONG): + return "%s (num=%s)" % (self["type"].display, self["seq_num"].value) + else: + return "ICMP (%s)" % self["type"].display + + def parseNext(self, parent): + if self["type"].value == self.REJECT: + return IPv4(parent, "rejected_ipv4") + else: + return None + +class ICMPv6(Layer): + ECHO_REQUEST = 128 + ECHO_REPLY = 129 + TYPE_DESC = { + 128: "Echo request", + 129: "Echo reply", + } + + def createFields(self): + yield Enum(UInt8(self, "type"), self.TYPE_DESC) + yield UInt8(self, "code") + yield textHandler(UInt16(self, "checksum"), hexadecimal) + + if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY): + yield UInt16(self, "id") + yield UInt16(self, "sequence") + + def createDescription(self): + if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY): + return "%s (num=%s)" % (self["type"].display, self["sequence"].value) + else: + return "ICMPv6 (%s)" % self["type"].display + +class IP(Layer): + PROTOCOL_INFO = { + 1: ("icmp", ICMP, "ICMP"), + 6: ("tcp", TCP, "TCP"), + 17: ("udp", UDP, "UDP"), + 58: ("icmpv6", ICMPv6, "ICMPv6"), + 60: ("ipv6_opts", None, "IPv6 destination option"), + } + PROTOCOL_NAME = createDict(PROTOCOL_INFO, 2) + + def parseNext(self, parent): + proto = self["protocol"].value + if proto not in self.PROTOCOL_INFO: + return None + name, parser, desc = self.PROTOCOL_INFO[proto] + if not parser: + return None + return parser(parent, name) + +class IPv4(IP): + precedence_name = { + 7: "Network Control", + 6: "Internetwork Control", + 5: "CRITIC/ECP", + 4: "Flash Override", + 3: "Flash", + 2: "Immediate", + 1: "Priority", + 0: "Routine", + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = self["hdr_size"].value * 32 + + def createFields(self): + yield Bits(self, "version", 4, "Version") + yield Bits(self, "hdr_size", 4, "Header size divided by 5") + + # Type of service + yield Enum(Bits(self, "precedence", 3, "Precedence"), self.precedence_name) + yield Bit(self, "low_delay", "If set, low delay, else normal delay") + yield Bit(self, "high_throu", "If set, high throughput, else normal throughput") + yield Bit(self, "high_rel", "If set, high relibility, else normal") + yield NullBits(self, "reserved[]", 2, "(reserved for future use)") + + yield UInt16(self, "length") + yield UInt16(self, "id") + + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "df", "Don't fragment") + yield Bit(self, "more_frag", "There are more fragments? if not set, it's the last one") + yield Bits(self, "frag_ofst_lo", 5) + yield UInt8(self, "frag_ofst_hi") + yield UInt8(self, "ttl", "Type to live") + yield Enum(UInt8(self, "protocol"), self.PROTOCOL_NAME) + yield textHandler(UInt16(self, "checksum"), hexadecimal) + yield IPv4_Address(self, "src") + yield IPv4_Address(self, "dst") + + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "options", size) + + def createDescription(self): + return "IPv4 (%s>%s)" % (self["src"].display, self["dst"].display) + +class IPv6(IP): + static_size = 40 * 8 + endian = NETWORK_ENDIAN + + def createFields(self): + yield Bits(self, "version", 4, "Version (6)") + yield Bits(self, "traffic", 8, "Traffic class") + yield Bits(self, "flow", 20, "Flow label") + yield Bits(self, "length", 16, "Payload length") + yield Enum(Bits(self, "protocol", 8, "Next header"), self.PROTOCOL_NAME) + yield Bits(self, "hop_limit", 8, "Hop limit") + yield IPv6_Address(self, "src") + yield IPv6_Address(self, "dst") + + def createDescription(self): + return "IPv6 (%s>%s)" % (self["src"].display, self["dst"].display) + +class Layer2(Layer): + PROTO_INFO = { + 0x0800: ("ipv4", IPv4, "IPv4"), + 0x0806: ("arp", ARP, "ARP"), + 0x86dd: ("ipv6", IPv6, "IPv6"), + } + PROTO_DESC = createDict(PROTO_INFO, 2) + + def parseNext(self, parent): + try: + name, parser, desc = self.PROTO_INFO[ self["protocol"].value ] + return parser(parent, name) + except KeyError: + return None + +class Unicast(Layer2): + packet_type_name = { + 0: "Unicast to us" + } + def createFields(self): + yield Enum(UInt16(self, "packet_type"), self.packet_type_name) + yield UInt16(self, "addr_type", "Link-layer address type") + yield UInt16(self, "addr_length", "Link-layer address length") + length = self["addr_length"].value + length = 8 # FIXME: Should we use addr_length or not? + if length: + yield RawBytes(self, "source", length) + yield Enum(UInt16(self, "protocol"), self.PROTO_DESC) + +class Ethernet(Layer2): + static_size = 14*8 + def createFields(self): + yield MAC48_Address(self, "dst") + yield MAC48_Address(self, "src") + yield Enum(UInt16(self, "protocol"), self.PROTO_DESC) + + def createDescription(self): + return "Ethernet: %s>%s (%s)" % \ + (self["src"].display, self["dst"].display, self["protocol"].display) + +class Packet(FieldSet): + endian = LITTLE_ENDIAN + + def __init__(self, parent, name, parser, first_name): + FieldSet.__init__(self, parent, name) + self._size = (16 + self["caplen"].value) * 8 + self._first_parser = parser + self._first_name = first_name + + def createFields(self): + yield TimestampUnix32(self, "ts_epoch", "Timestamp (Epoch)") + yield UInt32(self, "ts_nanosec", "Timestamp (nano second)") + yield UInt32(self, "caplen", "length of portion present") + yield UInt32(self, "len", "length this packet (off wire)") + + # Read different layers + field = self._first_parser(self, self._first_name) + while field: + yield field + field = field.parseNext(self) + + # Read data if any + size = (self.size - self.current_size) // 8 + if size: + yield RawBytes(self, "data", size) + + def getTimestamp(self): + nano_sec = float(self["ts_nanosec"].value) / 100 + from datetime import timedelta + return self["ts_epoch"].value + timedelta(microseconds=nano_sec) + + def createDescription(self): + t0 = self["/packet[0]"].getTimestamp() +# ts = max(self.getTimestamp() - t0, t0) + ts = self.getTimestamp() - t0 + #text = ["%1.6f: " % ts] + text = ["%s: " % ts] + if "icmp" in self: + text.append(self["icmp"].description) + elif "tcp" in self: + text.append(self["tcp"].description) + elif "udp" in self: + text.append(self["udp"].description) + elif "arp" in self: + text.append(self["arp"].description) + else: + text.append("Packet") + return "".join(text) + +class TcpdumpFile(Parser): + PARSER_TAGS = { + "id": "tcpdump", + "category": "misc", + "min_size": 24*8, + "description": "Tcpdump file (network)", + "magic": (("\xd4\xc3\xb2\xa1", 0),), + } + endian = LITTLE_ENDIAN + + LINK_TYPE = { + 1: ("ethernet", Ethernet), + 113: ("unicast", Unicast), + } + LINK_TYPE_DESC = createDict(LINK_TYPE, 0) + + def validate(self): + if self["id"].value != "\xd4\xc3\xb2\xa1": + return "Wrong file signature" + if self["link_type"].value not in self.LINK_TYPE: + return "Unknown link type" + return True + + def createFields(self): + yield Bytes(self, "id", 4, "Tcpdump identifier") + yield UInt16(self, "maj_ver", "Major version") + yield UInt16(self, "min_ver", "Minor version") + yield Int32(self, "this_zone", "GMT to local time zone correction") + yield Int32(self, "sigfigs", "accuracy of timestamps") + yield UInt32(self, "snap_len", "max length saved portion of each pkt") + yield Enum(UInt32(self, "link_type", "data link type"), self.LINK_TYPE_DESC) + link = self["link_type"].value + if link not in self.LINK_TYPE: + raise ParserError("Unknown link type: %s" % link) + name, parser = self.LINK_TYPE[link] + while self.current_size < self.size: + yield Packet(self, "packet[]", parser, name) + diff --git a/libs/hachoir_parser/parser.py b/libs/hachoir_parser/parser.py new file mode 100644 index 0000000..e812415 --- /dev/null +++ b/libs/hachoir_parser/parser.py @@ -0,0 +1,154 @@ +import hachoir_core.config as config +from hachoir_core.field import Parser as GenericParser +from hachoir_core.error import HACHOIR_ERRORS, HachoirError, error +from hachoir_core.tools import makeUnicode +from hachoir_core.i18n import _ +from inspect import getmro + + +class ValidateError(HachoirError): + pass + +class HachoirParser(object): + """ + A parser is the root of all other fields. It create first level of fields + and have special attributes and methods: + - tags: dictionnary with keys: + - "file_ext": classical file extensions (string or tuple of strings) ; + - "mime": MIME type(s) (string or tuple of strings) ; + - "description": String describing the parser. + - endian: Byte order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}) of input data ; + - stream: Data input stream (set in L{__init__()}). + + Default values: + - size: Field set size will be size of input stream ; + - mime_type: First MIME type of tags["mime"] (if it does exist, + None otherwise). + """ + + _autofix = False + + def __init__(self, stream, **args): + validate = args.pop("validate", False) + self._mime_type = None + while validate: + nbits = self.getParserTags()["min_size"] + if stream.sizeGe(nbits): + res = self.validate() + if res is True: + break + res = makeUnicode(res) + else: + res = _("stream is smaller than %s.%s bytes" % divmod(nbits, 8)) + raise ValidateError(res or _("no reason given")) + self._autofix = True + + #--- Methods that can be overridden ------------------------------------- + def createDescription(self): + """ + Create an Unicode description + """ + return self.PARSER_TAGS["description"] + + def createMimeType(self): + """ + Create MIME type (string), eg. "image/png" + + If it returns None, "application/octet-stream" is used. + """ + if "mime" in self.PARSER_TAGS: + return self.PARSER_TAGS["mime"][0] + return None + + def validate(self): + """ + Check that the parser is able to parse the stream. Valid results: + - True: stream looks valid ; + - False: stream is invalid ; + - str: string describing the error. + """ + raise NotImplementedError() + + #--- Getter methods ----------------------------------------------------- + def _getDescription(self): + if self._description is None: + try: + self._description = self.createDescription() + if isinstance(self._description, str): + self._description = makeUnicode(self._description) + except HACHOIR_ERRORS, err: + error("Error getting description of %s: %s" \ + % (self.path, unicode(err))) + self._description = self.PARSER_TAGS["description"] + return self._description + description = property(_getDescription, + doc="Description of the parser") + + def _getMimeType(self): + if not self._mime_type: + try: + self._mime_type = self.createMimeType() + except HACHOIR_ERRORS, err: + self.error("Error when creating MIME type: %s" % unicode(err)) + if not self._mime_type \ + and self.createMimeType != Parser.createMimeType: + self._mime_type = Parser.createMimeType(self) + if not self._mime_type: + self._mime_type = u"application/octet-stream" + return self._mime_type + mime_type = property(_getMimeType) + + def createContentSize(self): + return None + def _getContentSize(self): + if not hasattr(self, "_content_size"): + try: + self._content_size = self.createContentSize() + except HACHOIR_ERRORS, err: + error("Unable to compute %s content size: %s" % (self.__class__.__name__, err)) + self._content_size = None + return self._content_size + content_size = property(_getContentSize) + + def createFilenameSuffix(self): + """ + Create filename suffix: "." + first value of self.PARSER_TAGS["file_ext"], + or None if self.PARSER_TAGS["file_ext"] doesn't exist. + """ + file_ext = self.getParserTags().get("file_ext") + if isinstance(file_ext, (tuple, list)): + file_ext = file_ext[0] + return file_ext and '.' + file_ext + def _getFilenameSuffix(self): + if not hasattr(self, "_filename_suffix"): + self._filename_extension = self.createFilenameSuffix() + return self._filename_extension + filename_suffix = property(_getFilenameSuffix) + + @classmethod + def getParserTags(cls): + tags = {} + for cls in reversed(getmro(cls)): + if hasattr(cls, "PARSER_TAGS"): + tags.update(cls.PARSER_TAGS) + return tags + + @classmethod + def print_(cls, out, verbose): + tags = cls.getParserTags() + print >>out, "- %s: %s" % (tags["id"], tags["description"]) + if verbose: + if "mime" in tags: + print >>out, " MIME type: %s" % (", ".join(tags["mime"])) + if "file_ext" in tags: + file_ext = ", ".join( + ".%s" % file_ext for file_ext in tags["file_ext"]) + print >>out, " File extension: %s" % file_ext + + autofix = property(lambda self: self._autofix and config.autofix) + +class Parser(HachoirParser, GenericParser): + def __init__(self, stream, **args): + GenericParser.__init__(self, stream) + HachoirParser.__init__(self, stream, **args) + diff --git a/libs/hachoir_parser/parser_list.py b/libs/hachoir_parser/parser_list.py new file mode 100644 index 0000000..3807155 --- /dev/null +++ b/libs/hachoir_parser/parser_list.py @@ -0,0 +1,216 @@ +import re +import types +from hachoir_core.error import error +from hachoir_core.i18n import _ +from hachoir_parser import Parser, HachoirParser +import sys + +### Parser list ################################################################ + +class ParserList(object): + VALID_CATEGORY = ("archive", "audio", "container", "file_system", + "game", "image", "misc", "program", "video") + ID_REGEX = re.compile("^[a-z0-9][a-z0-9_]{2,}$") + + def __init__(self): + self.parser_list = [] + self.bytag = { "id": {}, "category": {} } + + def translate(self, name, value): + if name in ("magic",): + return True + elif name == "min_size": + return - value < 0 or "Invalid minimum size (min_size)" + elif name == "description": + return isinstance(value, (str, unicode)) and bool(value) or "Invalid description" + elif name == "category": + if value not in self.VALID_CATEGORY: + return "Invalid category: %r" % value + elif name == "id": + if type(value) is not str or not self.ID_REGEX.match(value): + return "Invalid identifier: %r" % value + parser = self.bytag[name].get(value) + if parser: + return "Duplicate parser id: %s already used by %s" % \ + (value, parser[0].__name__) + # TODO: lists should be forbidden + if isinstance(value, list): + value = tuple(value) + elif not isinstance(value, tuple): + value = value, + return name, value + + def validParser(self, parser, tags): + if "id" not in tags: + return "No identifier" + if "description" not in tags: + return "No description" + # TODO: Allow simple strings for file_ext/mime ? + # (see also HachoirParser.createFilenameSuffix) + file_ext = tags.get("file_ext", ()) + if not isinstance(file_ext, (tuple, list)): + return "File extension is not a tuple or list" + mimes = tags.get("mime", ()) + if not isinstance(mimes, tuple): + return "MIME type is not a tuple" + for mime in mimes: + if not isinstance(mime, unicode): + return "MIME type %r is not an unicode string" % mime + + return "" + + def add(self, parser): + tags = parser.getParserTags() + err = self.validParser(parser, tags) + if err: + error("Skip parser %s: %s" % (parser.__name__, err)) + return + + _tags = [] + for tag in tags.iteritems(): + tag = self.translate(*tag) + if isinstance(tag, tuple): + _tags.append(tag) + elif tag is not True: + error("[%s] %s" % (parser.__name__, tag)) + return + + self.parser_list.append(parser) + + for name, values in _tags: + byname = self.bytag.setdefault(name,{}) + for value in values: + byname.setdefault(value,[]).append(parser) + + def __iter__(self): + return iter(self.parser_list) + + def print_(self, title=None, out=None, verbose=False, format="one-line"): + """Display a list of parser with its title + * out: output file + * title : title of the list to display + * format: "rest", "trac", "file-ext", "mime" or "one_line" (default) + """ + if out is None: + out = sys.stdout + + if format in ("file-ext", "mime"): + # Create file extension set + extensions = set() + for parser in self: + file_ext = parser.getParserTags().get(format, ()) + file_ext = list(file_ext) + try: + file_ext.remove("") + except ValueError: + pass + extensions |= set(file_ext) + + # Remove empty extension + extensions -= set(('',)) + + # Convert to list and sort by ASCII order + extensions = list(extensions) + extensions.sort() + + # Print list + text = ", ".join( str(item) for item in extensions ) + if format == "file-ext": + print >>out, "File extensions: %s." % text + print >>out + print >>out, "Total: %s file extensions." % len(extensions) + else: + print >>out, "MIME types: %s." % text + print >>out + print >>out, "Total: %s MIME types." % len(extensions) + return + + if format == "trac": + print >>out, "== List of parsers ==" + print >>out + print >>out, "Total: %s parsers" % len(self.parser_list) + print >>out + elif format == "one_line": + if title: + print >>out, title + else: + print >>out, _("List of Hachoir parsers.") + print >>out + + # Create parser list sorted by module + bycategory = self.bytag["category"] + for category in sorted(bycategory.iterkeys()): + if format == "one_line": + parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ] + parser_list.sort() + print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list)) + else: + if format == "rest": + print >>out, category.replace("_", " ").title() + print >>out, "-" * len(category) + print >>out + elif format == "trac": + print >>out, "=== %s ===" % category.replace("_", " ").title() + print >>out + else: + print >>out, "[%s]" % category + parser_list = sorted(bycategory[category], + key=lambda parser: parser.PARSER_TAGS["id"]) + if format == "rest": + for parser in parser_list: + tags = parser.getParserTags() + print >>out, "* %s: %s" % (tags["id"], tags["description"]) + elif format == "trac": + for parser in parser_list: + tags = parser.getParserTags() + desc = tags["description"] + desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc) + print >>out, " * %s: %s" % (tags["id"], desc) + else: + for parser in parser_list: + parser.print_(out, verbose) + print >>out + if format != "trac": + print >>out, "Total: %s parsers" % len(self.parser_list) + + +class HachoirParserList(ParserList): + _instance = None + + @classmethod + def getInstance(cls): + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + ParserList.__init__(self) + self._load() + + def _load(self): + """ + Load all parsers from "hachoir.parser" module. + + Return the list of loaded parsers. + """ + # Parser list is already loaded? + if self.parser_list: + return self.parser_list + + todo = [] + module = __import__("hachoir_parser") + for attrname in dir(module): + attr = getattr(module, attrname) + if isinstance(attr, types.ModuleType): + todo.append(attr) + + for module in todo: + for name in dir(module): + attr = getattr(module, name) + if isinstance(attr, type) \ + and issubclass(attr, HachoirParser) \ + and attr not in (Parser, HachoirParser): + self.add(attr) + assert 1 <= len(self.parser_list) + return self.parser_list + diff --git a/libs/hachoir_parser/program/__init__.py b/libs/hachoir_parser/program/__init__.py new file mode 100644 index 0000000..2e719f0 --- /dev/null +++ b/libs/hachoir_parser/program/__init__.py @@ -0,0 +1,6 @@ +from hachoir_parser.program.elf import ElfFile +from hachoir_parser.program.exe import ExeFile +from hachoir_parser.program.python import PythonCompiledFile +from hachoir_parser.program.java import JavaCompiledClassFile +from hachoir_parser.program.prc import PRCFile + diff --git a/libs/hachoir_parser/program/elf.py b/libs/hachoir_parser/program/elf.py new file mode 100644 index 0000000..3d5731e --- /dev/null +++ b/libs/hachoir_parser/program/elf.py @@ -0,0 +1,187 @@ +""" +ELF (Unix/BSD executable file format) parser. + +Author: Victor Stinner +Creation date: 08 may 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, Enum, + String, Bytes) +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN + +class ElfHeader(FieldSet): + static_size = 52*8 + LITTLE_ENDIAN_ID = 1 + BIG_ENDIAN_ID = 2 + MACHINE_NAME = { + 1: u"AT&T WE 32100", + 2: u"SPARC", + 3: u"Intel 80386", + 4: u"Motorola 68000", + 5: u"Motorola 88000", + 7: u"Intel 80860", + 8: u"MIPS RS3000" + } + CLASS_NAME = { + 1: u"32 bits", + 2: u"64 bits" + } + TYPE_NAME = { + 0: u"No file type", + 1: u"Relocatable file", + 2: u"Executable file", + 3: u"Shared object file", + 4: u"Core file", + 0xFF00: u"Processor-specific (0xFF00)", + 0xFFFF: u"Processor-specific (0xFFFF)" + } + ENDIAN_NAME = { + LITTLE_ENDIAN_ID: "Little endian", + BIG_ENDIAN_ID: "Big endian", + } + + def createFields(self): + yield Bytes(self, "signature", 4, r'ELF signature ("\x7fELF")') + yield Enum(UInt8(self, "class", "Class"), self.CLASS_NAME) + yield Enum(UInt8(self, "endian", "Endian"), self.ENDIAN_NAME) + yield UInt8(self, "file_version", "File version") + yield String(self, "pad", 8, "Pad") + yield UInt8(self, "nb_ident", "Size of ident[]") + yield Enum(UInt16(self, "type", "File type"), self.TYPE_NAME) + yield Enum(UInt16(self, "machine", "Machine type"), self.MACHINE_NAME) + yield UInt32(self, "version", "ELF format version") + yield UInt32(self, "entry", "Number of entries") + yield UInt32(self, "phoff", "Program header offset") + yield UInt32(self, "shoff", "Section header offset") + yield UInt32(self, "flags", "Flags") + yield UInt16(self, "ehsize", "Elf header size (this header)") + yield UInt16(self, "phentsize", "Program header entry size") + yield UInt16(self, "phnum", "Program header entry count") + yield UInt16(self, "shentsize", "Section header entry size") + yield UInt16(self, "shnum", "Section header entre count") + yield UInt16(self, "shstrndx", "Section header strtab index") + + def isValid(self): + if self["signature"].value != "\x7FELF": + return "Wrong ELF signature" + if self["class"].value not in self.CLASS_NAME: + return "Unknown class" + if self["endian"].value not in self.ENDIAN_NAME: + return "Unknown endian (%s)" % self["endian"].value + return "" + +class SectionHeader32(FieldSet): + static_size = 40*8 + TYPE_NAME = { + 8: "BSS" + } + + def createFields(self): + yield UInt32(self, "name", "Name") + yield Enum(UInt32(self, "type", "Type"), self.TYPE_NAME) + yield UInt32(self, "flags", "Flags") + yield textHandler(UInt32(self, "VMA", "Virtual memory address"), hexadecimal) + yield textHandler(UInt32(self, "LMA", "Logical memory address (in file)"), hexadecimal) + yield textHandler(UInt32(self, "size", "Size"), hexadecimal) + yield UInt32(self, "link", "Link") + yield UInt32(self, "info", "Information") + yield UInt32(self, "addr_align", "Address alignment") + yield UInt32(self, "entry_size", "Entry size") + + def createDescription(self): + return "Section header (name: %s, type: %s)" % \ + (self["name"].value, self["type"].display) + +class ProgramHeader32(FieldSet): + TYPE_NAME = { + 3: "Dynamic library" + } + static_size = 32*8 + + def createFields(self): + yield Enum(UInt16(self, "type", "Type"), ProgramHeader32.TYPE_NAME) + yield UInt16(self, "flags", "Flags") + yield UInt32(self, "offset", "Offset") + yield textHandler(UInt32(self, "vaddr", "V. address"), hexadecimal) + yield textHandler(UInt32(self, "paddr", "P. address"), hexadecimal) + yield UInt32(self, "file_size", "File size") + yield UInt32(self, "mem_size", "Memory size") + yield UInt32(self, "align", "Alignment") + yield UInt32(self, "xxx", "???") + + def createDescription(self): + return "Program Header (%s)" % self["type"].display + +def sortSection(a, b): + return int(a["offset"] - b["offset"]) + +#class Sections(FieldSet): +# def createFields?(self, stream, parent, sections): +# for section in sections: +# ofs = section["offset"] +# size = section["file_size"] +# if size != 0: +# sub = stream.createSub(ofs, size) +# #yield DeflateFilter(self, "section[]", sub, size, Section, "Section")) +# chunk = self.doRead("section[]", "Section", (Section,), {"stream": sub}) +# else: +# chunk = self.doRead("section[]", "Section", (FormatChunk, "string[0]")) +# chunk.description = "ELF section (in file: %s..%s)" % (ofs, ofs+size) + +class ElfFile(Parser): + PARSER_TAGS = { + "id": "elf", + "category": "program", + "file_ext": ("so", ""), + "min_size": ElfHeader.static_size, # At least one program header + "mime": ( + u"application/x-executable", + u"application/x-object", + u"application/x-sharedlib", + u"application/x-executable-file", + u"application/x-coredump"), + "magic": (("\x7FELF", 0),), + "description": "ELF Unix/BSD program/library" + } + endian = LITTLE_ENDIAN + + def validate(self): + err = self["header"].isValid() + if err: + return err + return True + + def createFields(self): + # Choose the right endian depending on endian specified in header + if self.stream.readBits(5*8, 8, BIG_ENDIAN) == ElfHeader.BIG_ENDIAN_ID: + self.endian = BIG_ENDIAN + else: + self.endian = LITTLE_ENDIAN + + # Parse header and program headers + yield ElfHeader(self, "header", "Header") + for index in xrange(self["header/phnum"].value): + yield ProgramHeader32(self, "prg_header[]") + + if False: + raise ParserError("TODO: Parse sections...") + #sections = self.array("prg_header") + #size = self["header/shoff"].value - self.current_size//8 + #chunk = self.doRead("data", "Data", (DeflateFilter, stream, size, Sections, sections)) + #chunk.description = "Sections (use an evil hack to manage share same data on differents parts)" + #assert self.current_size//8 == self["header/shoff"].value + else: + raw = self.seekByte(self["header/shoff"].value, "raw[]", relative=False) + if raw: + yield raw + + for index in xrange(self["header/shnum"].value): + yield SectionHeader32(self, "section_header[]") + + def createDescription(self): + return "ELF Unix/BSD program/library: %s" % ( + self["header/class"].display) + diff --git a/libs/hachoir_parser/program/exe.py b/libs/hachoir_parser/program/exe.py new file mode 100644 index 0000000..5a7bc72 --- /dev/null +++ b/libs/hachoir_parser/program/exe.py @@ -0,0 +1,224 @@ +""" +Microsoft Windows Portable Executable (PE) file parser. + +Informations: +- Microsoft Portable Executable and Common Object File Format Specification: + http://www.microsoft.com/whdc/system/platform/firmware/PECOFF.mspx + +Author: Victor Stinner +Creation date: 2006-08-13 +""" + +from hachoir_parser import HachoirParser +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.field import (FieldSet, RootSeekableFieldSet, + UInt16, UInt32, String, + RawBytes, PaddingBytes) +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_parser.program.exe_ne import NE_Header +from hachoir_parser.program.exe_pe import PE_Header, PE_OptHeader, SectionHeader +from hachoir_parser.program.exe_res import PE_Resource, NE_VersionInfoNode + +MAX_NB_SECTION = 50 + +class MSDosHeader(FieldSet): + static_size = 64*8 + + def createFields(self): + yield String(self, "header", 2, "File header (MZ)", charset="ASCII") + yield UInt16(self, "size_mod_512", "File size in bytes modulo 512") + yield UInt16(self, "size_div_512", "File size in bytes divide by 512") + yield UInt16(self, "reloc_entries", "Number of relocation entries") + yield UInt16(self, "code_offset", "Offset to the code in the file (divided by 16)") + yield UInt16(self, "needed_memory", "Memory needed to run (divided by 16)") + yield UInt16(self, "max_memory", "Maximum memory needed to run (divided by 16)") + yield textHandler(UInt32(self, "init_ss_sp", "Initial value of SP:SS registers"), hexadecimal) + yield UInt16(self, "checksum", "Checksum") + yield textHandler(UInt32(self, "init_cs_ip", "Initial value of CS:IP registers"), hexadecimal) + yield UInt16(self, "reloc_offset", "Offset in file to relocation table") + yield UInt16(self, "overlay_number", "Overlay number") + yield PaddingBytes(self, "reserved[]", 8, "Reserved") + yield UInt16(self, "oem_id", "OEM id") + yield UInt16(self, "oem_info", "OEM info") + yield PaddingBytes(self, "reserved[]", 20, "Reserved") + yield UInt32(self, "next_offset", "Offset to next header (PE or NE)") + + def isValid(self): + if 512 <= self["size_mod_512"].value: + return "Invalid field 'size_mod_512' value" + if self["code_offset"].value < 4: + return "Invalid code offset" + looks_pe = self["size_div_512"].value < 4 + if looks_pe: + if self["checksum"].value != 0: + return "Invalid value of checksum" + if not (80 <= self["next_offset"].value <= 1024): + return "Invalid value of next_offset" + return "" + +class ExeFile(HachoirParser, RootSeekableFieldSet): + PARSER_TAGS = { + "id": "exe", + "category": "program", + "file_ext": ("exe", "dll", "ocx"), + "mime": (u"application/x-dosexec",), + "min_size": 64*8, + #"magic": (("MZ", 0),), + "magic_regex": (("MZ.[\0\1].{4}[^\0\1\2\3]", 0),), + "description": "Microsoft Windows Portable Executable" + } + endian = LITTLE_ENDIAN + + def __init__(self, stream, **args): + RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) + HachoirParser.__init__(self, stream, **args) + + def validate(self): + if self.stream.readBytes(0, 2) != 'MZ': + return "Wrong header" + err = self["msdos"].isValid() + if err: + return "Invalid MSDOS header: "+err + if self.isPE(): + if MAX_NB_SECTION < self["pe_header/nb_section"].value: + return "Invalid number of section (%s)" \ + % self["pe_header/nb_section"].value + return True + + def createFields(self): + yield MSDosHeader(self, "msdos", "MS-DOS program header") + + if self.isPE() or self.isNE(): + offset = self["msdos/next_offset"].value + self.seekByte(offset, relative=False) + + if self.isPE(): + for field in self.parsePortableExecutable(): + yield field + elif self.isNE(): + for field in self.parseNE_Executable(): + yield field + else: + offset = self["msdos/code_offset"].value * 16 + self.seekByte(offset, relative=False) + + def parseNE_Executable(self): + yield NE_Header(self, "ne_header") + + # FIXME: Compute resource offset instead of using searchBytes() + # Ugly hack to get find version info structure + start = self.current_size + addr = self.stream.searchBytes('VS_VERSION_INFO', start) + if addr: + self.seekBit(addr-32) + yield NE_VersionInfoNode(self, "info") + + def parsePortableExecutable(self): + # Read PE header + yield PE_Header(self, "pe_header") + + # Read PE optional header + size = self["pe_header/opt_hdr_size"].value + rsrc_rva = None + if size: + yield PE_OptHeader(self, "pe_opt_header", size=size*8) + if "pe_opt_header/resource/rva" in self: + rsrc_rva = self["pe_opt_header/resource/rva"].value + + # Read section headers + sections = [] + for index in xrange(self["pe_header/nb_section"].value): + section = SectionHeader(self, "section_hdr[]") + yield section + if section["phys_size"].value: + sections.append(section) + + # Read sections + sections.sort(key=lambda field: field["phys_off"].value) + for section in sections: + self.seekByte(section["phys_off"].value) + size = section["phys_size"].value + if size: + name = section.createSectionName() + if rsrc_rva is not None and section["rva"].value == rsrc_rva: + yield PE_Resource(self, name, section, size=size*8) + else: + yield RawBytes(self, name, size) + + def isPE(self): + if not hasattr(self, "_is_pe"): + self._is_pe = False + offset = self["msdos/next_offset"].value * 8 + if 2*8 <= offset \ + and (offset+PE_Header.static_size) <= self.size \ + and self.stream.readBytes(offset, 4) == 'PE\0\0': + self._is_pe = True + return self._is_pe + + def isNE(self): + if not hasattr(self, "_is_ne"): + self._is_ne = False + offset = self["msdos/next_offset"].value * 8 + if 64*8 <= offset \ + and (offset+NE_Header.static_size) <= self.size \ + and self.stream.readBytes(offset, 2) == 'NE': + self._is_ne = True + return self._is_ne + + def getResource(self): + # MS-DOS program: no resource + if not self.isPE(): + return None + + # Check if PE has resource or not + if "pe_opt_header/resource/size" in self: + if not self["pe_opt_header/resource/size"].value: + return None + if "section_rsrc" in self: + return self["section_rsrc"] + return None + + def createDescription(self): + if self.isPE(): + if self["pe_header/is_dll"].value: + text = u"Microsoft Windows DLL" + else: + text = u"Microsoft Windows Portable Executable" + info = [self["pe_header/cpu"].display] + if "pe_opt_header" in self: + hdr = self["pe_opt_header"] + info.append(hdr["subsystem"].display) + if self["pe_header/is_stripped"].value: + info.append(u"stripped") + return u"%s: %s" % (text, ", ".join(info)) + elif self.isNE(): + return u"New-style Executable (NE) for Microsoft MS Windows 3.x" + else: + return u"MS-DOS executable" + + def createContentSize(self): + if self.isPE(): + size = 0 + for index in xrange(self["pe_header/nb_section"].value): + section = self["section_hdr[%u]" % index] + section_size = section["phys_size"].value + if not section_size: + continue + section_size = (section_size + section["phys_off"].value) * 8 + if size: + size = max(size, section_size) + else: + size = section_size + if size: + return size + else: + return None + elif self.isNE(): + # TODO: Guess NE size + return None + else: + size = self["msdos/size_mod_512"].value + (self["msdos/size_div_512"].value-1) * 512 + if size < 0: + return None + return size*8 + diff --git a/libs/hachoir_parser/program/exe_ne.py b/libs/hachoir_parser/program/exe_ne.py new file mode 100644 index 0000000..cf62e88 --- /dev/null +++ b/libs/hachoir_parser/program/exe_ne.py @@ -0,0 +1,60 @@ +from hachoir_core.field import (FieldSet, + Bit, UInt8, UInt16, UInt32, Bytes, + PaddingBits, PaddingBytes, NullBits, NullBytes) +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler + +class NE_Header(FieldSet): + static_size = 64*8 + def createFields(self): + yield Bytes(self, "signature", 2, "New executable signature (NE)") + yield UInt8(self, "link_ver", "Linker version number") + yield UInt8(self, "link_rev", "Linker revision number") + yield UInt16(self, "entry_table_ofst", "Offset to the entry table") + yield UInt16(self, "entry_table_size", "Length (in bytes) of the entry table") + yield PaddingBytes(self, "reserved[]", 4) + + yield Bit(self, "is_dll", "Is a dynamic-link library (DLL)?") + yield Bit(self, "is_win_app", "Is a Windows application?") + yield PaddingBits(self, "reserved[]", 9) + yield Bit(self, "first_seg_code", "First segment contains code that loads the application?") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "link_error", "Load even if linker detects errors?") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "is_lib", "Is a library module?") + + yield UInt16(self, "auto_data_seg", "Automatic data segment number") + yield filesizeHandler(UInt16(self, "local_heap_size", "Initial size (in bytes) of the local heap")) + yield filesizeHandler(UInt16(self, "stack_size", "Initial size (in bytes) of the stack")) + yield textHandler(UInt32(self, "cs_ip", "Value of CS:IP"), hexadecimal) + yield textHandler(UInt32(self, "ss_sp", "Value of SS:SP"), hexadecimal) + + yield UInt16(self, "nb_entry_seg_tab", "Number of entries in the segment table") + yield UInt16(self, "nb_entry_modref_tab", "Number of entries in the module-reference table") + yield filesizeHandler(UInt16(self, "size_nonres_name_tab", "Number of bytes in the nonresident-name table")) + yield UInt16(self, "seg_tab_ofs", "Segment table offset") + yield UInt16(self, "rsrc_ofs", "Resource offset") + + yield UInt16(self, "res_name_tab_ofs", "Resident-name table offset") + yield UInt16(self, "mod_ref_tab_ofs", "Module-reference table offset") + yield UInt16(self, "import_tab_ofs", "Imported-name table offset") + + yield UInt32(self, "non_res_name_tab_ofs", "Nonresident-name table offset") + yield UInt16(self, "nb_mov_ent_pt", "Number of movable entry points") + yield UInt16(self, "log2_sector_size", "Log2 of the segment sector size") + yield UInt16(self, "nb_rsrc_seg", "Number of resource segments") + + yield Bit(self, "unknown_os_format", "Operating system format is unknown") + yield PaddingBits(self, "reserved[]", 1) + yield Bit(self, "os_windows", "Operating system is Microsoft Windows") + yield NullBits(self, "reserved[]", 6) + yield Bit(self, "is_win20_prot", "Is Windows 2.x application running in version 3.x protected mode") + yield Bit(self, "is_win20_font", "Is Windows 2.x application supporting proportional fonts") + yield Bit(self, "fast_load", "Contains a fast-load area?") + yield NullBits(self, "reserved[]", 4) + + yield UInt16(self, "fastload_ofs", "Fast-load area offset (in sector)") + yield UInt16(self, "fastload_size", "Fast-load area length (in sector)") + + yield NullBytes(self, "reserved[]", 2) + yield textHandler(UInt16(self, "win_version", "Expected Windows version number"), hexadecimal) + diff --git a/libs/hachoir_parser/program/exe_pe.py b/libs/hachoir_parser/program/exe_pe.py new file mode 100644 index 0000000..d769e91 --- /dev/null +++ b/libs/hachoir_parser/program/exe_pe.py @@ -0,0 +1,221 @@ +from hachoir_core.field import (FieldSet, ParserError, + Bit, UInt8, UInt16, UInt32, TimestampUnix32, + Bytes, String, Enum, + PaddingBytes, PaddingBits, NullBytes, NullBits) +from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler +from hachoir_core.error import HACHOIR_ERRORS + +class SectionHeader(FieldSet): + static_size = 40 * 8 + def createFields(self): + yield String(self, "name", 8, charset="ASCII", strip="\0 ") + yield filesizeHandler(UInt32(self, "mem_size", "Size in memory")) + yield textHandler(UInt32(self, "rva", "RVA (location) in memory"), hexadecimal) + yield filesizeHandler(UInt32(self, "phys_size", "Physical size (on disk)")) + yield filesizeHandler(UInt32(self, "phys_off", "Physical location (on disk)")) + yield PaddingBytes(self, "reserved", 12) + + # 0x0000000# + yield NullBits(self, "reserved[]", 4) + # 0x000000#0 + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "has_code", "Contains code") + yield Bit(self, "has_init_data", "Contains initialized data") + yield Bit(self, "has_uninit_data", "Contains uninitialized data") + # 0x00000#00 + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "has_comment", "Contains comments?") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "remove", "Contents will not become part of image") + # 0x0000#000 + yield Bit(self, "has_comdata", "Contains comdat?") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "no_defer_spec_exc", "Reset speculative exceptions handling bits in the TLB entries") + yield Bit(self, "gp_rel", "Content can be accessed relative to GP") + # 0x000#0000 + yield NullBits(self, "reserved[]", 4) + # 0x00#00000 + yield NullBits(self, "reserved[]", 4) + # 0x0#000000 + yield Bit(self, "ext_reloc", "Contains extended relocations?") + yield Bit(self, "discarded", "Can be discarded?") + yield Bit(self, "is_not_cached", "Is not cachable?") + yield Bit(self, "is_not_paged", "Is not pageable?") + # 0x#0000000 + yield Bit(self, "is_shareable", "Is shareable?") + yield Bit(self, "is_executable", "Is executable?") + yield Bit(self, "is_readable", "Is readable?") + yield Bit(self, "is_writable", "Is writable?") + + def rva2file(self, rva): + return self["phys_off"].value + (rva - self["rva"].value) + + def createDescription(self): + rva = self["rva"].value + size = self["mem_size"].value + info = [ + "rva=0x%08x..0x%08x" % (rva, rva+size), + "size=%s" % self["mem_size"].display, + ] + if self["is_executable"].value: + info.append("exec") + if self["is_readable"].value: + info.append("read") + if self["is_writable"].value: + info.append("write") + return 'Section "%s": %s' % (self["name"].value, ", ".join(info)) + + def createSectionName(self): + try: + name = str(self["name"].value.strip(".")) + if name: + return "section_%s" % name + except HACHOIR_ERRORS, err: + self.warning(unicode(err)) + return "section[]" + +class DataDirectory(FieldSet): + def createFields(self): + yield textHandler(UInt32(self, "rva", "Virtual address"), hexadecimal) + yield filesizeHandler(UInt32(self, "size")) + + def createDescription(self): + if self["size"].value: + return "Directory at %s (%s)" % ( + self["rva"].display, self["size"].display) + else: + return "(empty directory)" + +class PE_Header(FieldSet): + static_size = 24*8 + cpu_name = { + 0x0184: u"Alpha AXP", + 0x01c0: u"ARM", + 0x014C: u"Intel 80386", + 0x014D: u"Intel 80486", + 0x014E: u"Intel Pentium", + 0x0200: u"Intel IA64", + 0x0268: u"Motorola 68000", + 0x0266: u"MIPS", + 0x0284: u"Alpha AXP 64 bits", + 0x0366: u"MIPS with FPU", + 0x0466: u"MIPS16 with FPU", + 0x01f0: u"PowerPC little endian", + 0x0162: u"R3000", + 0x0166: u"MIPS little endian (R4000)", + 0x0168: u"R10000", + 0x01a2: u"Hitachi SH3", + 0x01a6: u"Hitachi SH4", + 0x0160: u"R3000 (MIPS), big endian", + 0x0162: u"R3000 (MIPS), little endian", + 0x0166: u"R4000 (MIPS), little endian", + 0x0168: u"R10000 (MIPS), little endian", + 0x0184: u"DEC Alpha AXP", + 0x01F0: u"IBM Power PC, little endian", + } + + def createFields(self): + yield Bytes(self, "header", 4, r"PE header signature (PE\0\0)") + if self["header"].value != "PE\0\0": + raise ParserError("Invalid PE header signature") + yield Enum(UInt16(self, "cpu", "CPU type"), self.cpu_name) + yield UInt16(self, "nb_section", "Number of sections") + yield TimestampUnix32(self, "creation_date", "Creation date") + yield UInt32(self, "ptr_to_sym", "Pointer to symbol table") + yield UInt32(self, "nb_symbols", "Number of symbols") + yield UInt16(self, "opt_hdr_size", "Optional header size") + + yield Bit(self, "reloc_stripped", "If true, don't contain base relocations.") + yield Bit(self, "exec_image", "Executable image?") + yield Bit(self, "line_nb_stripped", "COFF line numbers stripped?") + yield Bit(self, "local_sym_stripped", "COFF symbol table entries stripped?") + yield Bit(self, "aggr_ws", "Aggressively trim working set") + yield Bit(self, "large_addr", "Application can handle addresses greater than 2 GB") + yield NullBits(self, "reserved", 1) + yield Bit(self, "reverse_lo", "Little endian: LSB precedes MSB in memory") + yield Bit(self, "32bit", "Machine based on 32-bit-word architecture") + yield Bit(self, "is_stripped", "Debugging information removed?") + yield Bit(self, "swap", "If image is on removable media, copy and run from swap file") + yield PaddingBits(self, "reserved2", 1) + yield Bit(self, "is_system", "It's a system file") + yield Bit(self, "is_dll", "It's a dynamic-link library (DLL)") + yield Bit(self, "up", "File should be run only on a UP machine") + yield Bit(self, "reverse_hi", "Big endian: MSB precedes LSB in memory") + +class PE_OptHeader(FieldSet): + SUBSYSTEM_NAME = { + 1: u"Native", + 2: u"Windows GUI", + 3: u"Windows CUI", + 5: u"OS/2 CUI", + 7: u"POSIX CUI", + 8: u"Native Windows", + 9: u"Windows CE GUI", + 10: u"EFI application", + 11: u"EFI boot service driver", + 12: u"EFI runtime driver", + 13: u"EFI ROM", + 14: u"XBOX", + 16: u"Windows boot application", + } + DIRECTORY_NAME = { + 0: "export", + 1: "import", + 2: "resource", + 3: "exception", + 4: "certificate", + 5: "relocation", + 6: "debug", + 7: "description", + 8: "global_ptr", + 9: "tls", # Thread local storage + 10: "load_config", + 11: "bound_import", + 12: "import_address", + } + def createFields(self): + yield UInt16(self, "signature", "PE optional header signature (0x010b)") + # TODO: Support PE32+ (signature=0x020b) + if self["signature"].value != 0x010b: + raise ParserError("Invalid PE optional header signature") + yield UInt8(self, "maj_lnk_ver", "Major linker version") + yield UInt8(self, "min_lnk_ver", "Minor linker version") + yield filesizeHandler(UInt32(self, "size_code", "Size of code")) + yield filesizeHandler(UInt32(self, "size_init_data", "Size of initialized data")) + yield filesizeHandler(UInt32(self, "size_uninit_data", "Size of uninitialized data")) + yield textHandler(UInt32(self, "entry_point", "Address (RVA) of the code entry point"), hexadecimal) + yield textHandler(UInt32(self, "base_code", "Base (RVA) of code"), hexadecimal) + yield textHandler(UInt32(self, "base_data", "Base (RVA) of data"), hexadecimal) + yield textHandler(UInt32(self, "image_base", "Image base (RVA)"), hexadecimal) + yield filesizeHandler(UInt32(self, "sect_align", "Section alignment")) + yield filesizeHandler(UInt32(self, "file_align", "File alignment")) + yield UInt16(self, "maj_os_ver", "Major OS version") + yield UInt16(self, "min_os_ver", "Minor OS version") + yield UInt16(self, "maj_img_ver", "Major image version") + yield UInt16(self, "min_img_ver", "Minor image version") + yield UInt16(self, "maj_subsys_ver", "Major subsystem version") + yield UInt16(self, "min_subsys_ver", "Minor subsystem version") + yield NullBytes(self, "reserved", 4) + yield filesizeHandler(UInt32(self, "size_img", "Size of image")) + yield filesizeHandler(UInt32(self, "size_hdr", "Size of headers")) + yield textHandler(UInt32(self, "checksum"), hexadecimal) + yield Enum(UInt16(self, "subsystem"), self.SUBSYSTEM_NAME) + yield UInt16(self, "dll_flags") + yield filesizeHandler(UInt32(self, "size_stack_reserve")) + yield filesizeHandler(UInt32(self, "size_stack_commit")) + yield filesizeHandler(UInt32(self, "size_heap_reserve")) + yield filesizeHandler(UInt32(self, "size_heap_commit")) + yield UInt32(self, "loader_flags") + yield UInt32(self, "nb_directory", "Number of RVA and sizes") + for index in xrange(self["nb_directory"].value): + try: + name = self.DIRECTORY_NAME[index] + except KeyError: + name = "data_dir[%u]" % index + yield DataDirectory(self, name) + + def createDescription(self): + return "PE optional header: %s, entry point %s" % ( + self["subsystem"].display, + self["entry_point"].display) + diff --git a/libs/hachoir_parser/program/exe_res.py b/libs/hachoir_parser/program/exe_res.py new file mode 100644 index 0000000..850fcf0 --- /dev/null +++ b/libs/hachoir_parser/program/exe_res.py @@ -0,0 +1,445 @@ +""" +Parser for resource of Microsoft Windows Portable Executable (PE). + +Documentation: +- Wine project + VS_FIXEDFILEINFO structure, file include/winver.h + +Author: Victor Stinner +Creation date: 2007-01-19 +""" + +from hachoir_core.field import (FieldSet, ParserError, Enum, + Bit, Bits, SeekableFieldSet, + UInt16, UInt32, TimestampUnix32, + RawBytes, PaddingBytes, NullBytes, NullBits, + CString, String) +from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal +from hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable +from hachoir_core.error import HACHOIR_ERRORS +from hachoir_parser.common.win32 import BitmapInfoHeader + +MAX_DEPTH = 5 +MAX_INDEX_PER_HEADER = 300 +MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER + +class Version(FieldSet): + static_size = 32 + def createFields(self): + yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal) + yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal) + def createValue(self): + return self["major"].value + float(self["minor"].value) / 10000 + +MAJOR_OS_NAME = { + 1: "DOS", + 2: "OS/2 16-bit", + 3: "OS/2 32-bit", + 4: "Windows NT", +} + +MINOR_OS_BASE = 0 +MINOR_OS_NAME = { + 0: "Base", + 1: "Windows 16-bit", + 2: "Presentation Manager 16-bit", + 3: "Presentation Manager 32-bit", + 4: "Windows 32-bit", +} + +FILETYPE_DRIVER = 3 +FILETYPE_FONT = 4 +FILETYPE_NAME = { + 1: "Application", + 2: "DLL", + 3: "Driver", + 4: "Font", + 5: "VXD", + 7: "Static library", +} + +DRIVER_SUBTYPE_NAME = { + 1: "Printer", + 2: "Keyboard", + 3: "Language", + 4: "Display", + 5: "Mouse", + 6: "Network", + 7: "System", + 8: "Installable", + 9: "Sound", + 10: "Communications", +} + +FONT_SUBTYPE_NAME = { + 1: "Raster", + 2: "Vector", + 3: "TrueType", +} + +class VersionInfoBinary(FieldSet): + def createFields(self): + yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal) + if self["magic"].value != 0xFEEF04BD: + raise ParserError("EXE resource: invalid file info magic") + yield Version(self, "struct_ver", "Structure version (1.0)") + yield Version(self, "file_ver_ms", "File version MS") + yield Version(self, "file_ver_ls", "File version LS") + yield Version(self, "product_ver_ms", "Product version MS") + yield Version(self, "product_ver_ls", "Product version LS") + yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal) + + yield Bit(self, "debug") + yield Bit(self, "prerelease") + yield Bit(self, "patched") + yield Bit(self, "private_build") + yield Bit(self, "info_inferred") + yield Bit(self, "special_build") + yield NullBits(self, "reserved", 26) + + yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME) + yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME) + yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME) + field = textHandler(UInt32(self, "file_subfile"), hexadecimal) + if field.value == FILETYPE_DRIVER: + field = Enum(field, DRIVER_SUBTYPE_NAME) + elif field.value == FILETYPE_FONT: + field = Enum(field, FONT_SUBTYPE_NAME) + yield field + yield TimestampUnix32(self, "date_ms") + yield TimestampUnix32(self, "date_ls") + +class VersionInfoNode(FieldSet): + TYPE_STRING = 1 + TYPE_NAME = { + 0: "binary", + 1: "string", + } + + def __init__(self, parent, name, is_32bit=True): + FieldSet.__init__(self, parent, name) + self._size = alignValue(self["size"].value, 4) * 8 + self.is_32bit = is_32bit + + def createFields(self): + yield UInt16(self, "size", "Node size (in bytes)") + yield UInt16(self, "data_size") + yield Enum(UInt16(self, "type"), self.TYPE_NAME) + yield CString(self, "name", charset="UTF-16-LE") + + size = paddingSize(self.current_size//8, 4) + if size: + yield NullBytes(self, "padding[]", size) + size = self["data_size"].value + if size: + if self["type"].value == self.TYPE_STRING: + if self.is_32bit: + size *= 2 + yield String(self, "value", size, charset="UTF-16-LE", truncate="\0") + elif self["name"].value == "VS_VERSION_INFO": + yield VersionInfoBinary(self, "value", size=size*8) + if self["value/file_flags_mask"].value == 0: + self.is_32bit = False + else: + yield RawBytes(self, "value", size) + while 12 <= (self.size - self.current_size) // 8: + yield VersionInfoNode(self, "node[]", self.is_32bit) + size = (self.size - self.current_size) // 8 + if size: + yield NullBytes(self, "padding[]", size) + + + def createDescription(self): + text = "Version info node: %s" % self["name"].value + if self["type"].value == self.TYPE_STRING and "value" in self: + text += "=%s" % self["value"].value + return text + +def parseVersionInfo(parent): + yield VersionInfoNode(parent, "node[]") + +def parseIcon(parent): + yield BitmapInfoHeader(parent, "bmp_header") + size = (parent.size - parent.current_size) // 8 + if size: + yield RawBytes(parent, "raw", size) + +class WindowsString(FieldSet): + def createFields(self): + yield UInt16(self, "length", "Number of 16-bit characters") + size = self["length"].value * 2 + if size: + yield String(self, "text", size, charset="UTF-16-LE") + + def createValue(self): + if "text" in self: + return self["text"].value + else: + return u"" + + def createDisplay(self): + return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"') + +def parseStringTable(parent): + while not parent.eof: + yield WindowsString(parent, "string[]") + +RESOURCE_TYPE = { + 1: ("cursor[]", "Cursor", None), + 2: ("bitmap[]", "Bitmap", None), + 3: ("icon[]", "Icon", parseIcon), + 4: ("menu[]", "Menu", None), + 5: ("dialog[]", "Dialog", None), + 6: ("string_table[]", "String table", parseStringTable), + 7: ("font_dir[]", "Font directory", None), + 8: ("font[]", "Font", None), + 9: ("accelerators[]", "Accelerators", None), + 10: ("raw_res[]", "Unformatted resource data", None), + 11: ("message_table[]", "Message table", None), + 12: ("group_cursor[]", "Group cursor", None), + 14: ("group_icon[]", "Group icon", None), + 16: ("version_info", "Version information", parseVersionInfo), +} + +class Entry(FieldSet): + static_size = 16*8 + + def __init__(self, parent, name, inode=None): + FieldSet.__init__(self, parent, name) + self.inode = inode + + def createFields(self): + yield textHandler(UInt32(self, "rva"), hexadecimal) + yield filesizeHandler(UInt32(self, "size")) + yield UInt32(self, "codepage") + yield NullBytes(self, "reserved", 4) + + def createDescription(self): + return "Entry #%u: offset=%s size=%s" % ( + self.inode["offset"].value, self["rva"].display, self["size"].display) + +class NameOffset(FieldSet): + def createFields(self): + yield UInt32(self, "name") + yield Bits(self, "offset", 31) + yield Bit(self, "is_name") + +class IndexOffset(FieldSet): + TYPE_DESC = createDict(RESOURCE_TYPE, 1) + + def __init__(self, parent, name, res_type=None): + FieldSet.__init__(self, parent, name) + self.res_type = res_type + + def createFields(self): + yield Enum(UInt32(self, "type"), self.TYPE_DESC) + yield Bits(self, "offset", 31) + yield Bit(self, "is_subdir") + + def createDescription(self): + if self["is_subdir"].value: + return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value) + else: + return "Index: ID %s at %s" % (self["type"].display, self["offset"].value) + +class ResourceContent(FieldSet): + def __init__(self, parent, name, entry, size=None): + FieldSet.__init__(self, parent, name, size=entry["size"].value*8) + self.entry = entry + res_type = self.getResType() + if res_type in RESOURCE_TYPE: + self._name, description, self._parser = RESOURCE_TYPE[res_type] + else: + self._parser = None + + def getResID(self): + return self.entry.inode["offset"].value + + def getResType(self): + return self.entry.inode.res_type + + def createFields(self): + if self._parser: + for field in self._parser(self): + yield field + else: + yield RawBytes(self, "content", self.size//8) + + def createDescription(self): + return "Resource #%u content: type=%s" % ( + self.getResID(), self.getResType()) + +class Header(FieldSet): + static_size = 16*8 + def createFields(self): + yield NullBytes(self, "options", 4) + yield TimestampUnix32(self, "creation_date") + yield UInt16(self, "maj_ver", "Major version") + yield UInt16(self, "min_ver", "Minor version") + yield UInt16(self, "nb_name", "Number of named entries") + yield UInt16(self, "nb_index", "Number of indexed entries") + + def createDescription(self): + text = "Resource header" + info = [] + if self["nb_name"].value: + info.append("%u name" % self["nb_name"].value) + if self["nb_index"].value: + info.append("%u index" % self["nb_index"].value) + if self["creation_date"].value: + info.append(self["creation_date"].display) + if info: + return "%s: %s" % (text, ", ".join(info)) + else: + return text + +class Name(FieldSet): + def createFields(self): + yield UInt16(self, "length") + size = min(self["length"].value, 255) + if size: + yield String(self, "name", size, charset="UTF-16LE") + +class Directory(FieldSet): + def __init__(self, parent, name, res_type=None): + FieldSet.__init__(self, parent, name) + nb_entries = self["header/nb_name"].value + self["header/nb_index"].value + self._size = Header.static_size + nb_entries * 64 + self.res_type = res_type + + def createFields(self): + yield Header(self, "header") + + if MAX_NAME_PER_HEADER < self["header/nb_name"].value: + raise ParserError("EXE resource: invalid number of name (%s)" + % self["header/nb_name"].value) + if MAX_INDEX_PER_HEADER < self["header/nb_index"].value: + raise ParserError("EXE resource: invalid number of index (%s)" + % self["header/nb_index"].value) + + hdr = self["header"] + for index in xrange(hdr["nb_name"].value): + yield NameOffset(self, "name[]") + for index in xrange(hdr["nb_index"].value): + yield IndexOffset(self, "index[]", self.res_type) + + def createDescription(self): + return self["header"].description + +class PE_Resource(SeekableFieldSet): + def __init__(self, parent, name, section, size): + SeekableFieldSet.__init__(self, parent, name, size=size) + self.section = section + + def parseSub(self, directory, name, depth): + indexes = [] + for index in directory.array("index"): + if index["is_subdir"].value: + indexes.append(index) + + #indexes.sort(key=lambda index: index["offset"].value) + for index in indexes: + self.seekByte(index["offset"].value) + if depth == 1: + res_type = index["type"].value + else: + res_type = directory.res_type + yield Directory(self, name, res_type) + + def createFields(self): + # Parse directories + depth = 0 + subdir = Directory(self, "root") + yield subdir + subdirs = [subdir] + alldirs = [subdir] + while subdirs: + depth += 1 + if MAX_DEPTH < depth: + self.error("EXE resource: depth too high (%s), stop parsing directories" % depth) + break + newsubdirs = [] + for index, subdir in enumerate(subdirs): + name = "directory[%u][%u][]" % (depth, index) + try: + for field in self.parseSub(subdir, name, depth): + if field.__class__ == Directory: + newsubdirs.append(field) + yield field + except HACHOIR_ERRORS, err: + self.error("Unable to create directory %s: %s" % (name, err)) + subdirs = newsubdirs + alldirs.extend(subdirs) + + # Create resource list + resources = [] + for directory in alldirs: + for index in directory.array("index"): + if not index["is_subdir"].value: + resources.append(index) + + # Parse entries + entries = [] + for resource in resources: + offset = resource["offset"].value + if offset is None: + continue + self.seekByte(offset) + entry = Entry(self, "entry[]", inode=resource) + yield entry + entries.append(entry) + entries.sort(key=lambda entry: entry["rva"].value) + + # Parse resource content + for entry in entries: + try: + offset = self.section.rva2file(entry["rva"].value) + padding = self.seekByte(offset, relative=False) + if padding: + yield padding + yield ResourceContent(self, "content[]", entry) + except HACHOIR_ERRORS, err: + self.warning("Error when parsing entry %s: %s" % (entry.path, err)) + + size = (self.size - self.current_size) // 8 + if size: + yield PaddingBytes(self, "padding_end", size) + +class NE_VersionInfoNode(FieldSet): + TYPE_STRING = 1 + TYPE_NAME = { + 0: "binary", + 1: "string", + } + + def __init__(self, parent, name): + FieldSet.__init__(self, parent, name) + self._size = alignValue(self["size"].value, 4) * 8 + + def createFields(self): + yield UInt16(self, "size", "Node size (in bytes)") + yield UInt16(self, "data_size") + yield CString(self, "name", charset="ISO-8859-1") + + size = paddingSize(self.current_size//8, 4) + if size: + yield NullBytes(self, "padding[]", size) + size = self["data_size"].value + if size: + if self["name"].value == "VS_VERSION_INFO": + yield VersionInfoBinary(self, "value", size=size*8) + else: + yield String(self, "value", size, charset="ISO-8859-1") + while 12 <= (self.size - self.current_size) // 8: + yield NE_VersionInfoNode(self, "node[]") + size = (self.size - self.current_size) // 8 + if size: + yield NullBytes(self, "padding[]", size) + + + def createDescription(self): + text = "Version info node: %s" % self["name"].value +# if self["type"].value == self.TYPE_STRING and "value" in self: +# text += "=%s" % self["value"].value + return text + diff --git a/libs/hachoir_parser/program/java.py b/libs/hachoir_parser/program/java.py new file mode 100644 index 0000000..7329cbe --- /dev/null +++ b/libs/hachoir_parser/program/java.py @@ -0,0 +1,1097 @@ +""" +Compiled Java classes parser. + +Author: Thomas de Grenier de Latour (TGL) +Creation: 2006/11/01 +Last-update: 2006/11/06 + +Introduction: + * This parser is for compiled Java classes, aka .class files. What is nice + with this format is that it is well documented in the official Java VM specs. + * Some fields, and most field sets, have dynamic sizes, and there is no offset + to directly jump from an header to a given section, or anything like that. + It means that accessing a field at the end of the file requires that you've + already parsed almost the whole file. That's not very efficient, but it's + okay given the usual size of .class files (usually a few KB). + * Most fields are just indexes of some "constant pool" entries, which holds + most constant datas of the class. And constant pool entries reference other + constant pool entries, etc. Hence, a raw display of this fields only shows + integers and is not really understandable. Because of that, this parser + comes with two important custom field classes: + - CPInfo are constant pool entries. They have a type ("Utf8", "Methodref", + etc.), and some contents fields depending on this type. They also have a + "__str__()" method, which returns a syntetic view of this contents. + - CPIndex are constant pool indexes (UInt16). It is possible to specify + what type of CPInfo they are allowed to points to. They also have a + custom display method, usually printing something like "-> foo", where + foo is the str() of their target CPInfo. + +References: + * The Java Virtual Machine Specification, 2nd edition, chapter 4, in HTML: + http://java.sun.com/docs/books/vmspec/2nd-edition/html/ClassFile.doc.html + => That's the spec i've been implementing so far. I think it is format + version 46.0 (JDK 1.2). + * The Java Virtual Machine Specification, 2nd edition, chapter 4, in PDF: + http://java.sun.com/docs/books/vmspec/2nd-edition/ClassFileFormat.pdf + => don't trust the URL, this PDF version is more recent than the HTML one. + It highligths some recent additions to the format (i don't know the + exact version though), which are not yet implemented in this parser. + * The Java Virtual Machine Specification, chapter 4: + http://java.sun.com/docs/books/vmspec/html/ClassFile.doc.html + => describes an older format, probably version 45.3 (JDK 1.1). + +TODO/FIXME: + * Google for some existing free .class files parsers, to get more infos on + the various formats differences, etc. + * Write/compile some good tests cases. + * Rework pretty-printing of CPIndex fields. This str() thing sinks. + * Add support of formats other than 46.0 (45.3 seems to already be ok, but + there are things to add for later formats). + * Make parsing robust: currently, the parser will die on asserts as soon as + something seems wrong. It should rather be tolerant, print errors/warnings, + and try its best to continue. Check how error-handling is done in other + parsers. + * Gettextize the whole thing. + * Check whether Float32/64 are really the same as Java floats/double. PEP-0754 + says that handling of +/-infinity and NaN is very implementation-dependent. + Also check how this values are displayed. + * Make the parser edition-proof. For instance, editing a constant-pool string + should update the length field of it's entry, etc. Sounds like a huge work. +""" + +from hachoir_parser import Parser +from hachoir_core.field import ( + ParserError, FieldSet, StaticFieldSet, + Enum, RawBytes, PascalString16, Float32, Float64, + Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64, + Bit, NullBits ) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.tools import paddingSize + +############################################################################### +def parse_flags(flags, flags_dict, show_unknown_flags=True, separator=" "): + """ + Parses an integer representing a set of flags. The known flags are + stored with their bit-mask in a dictionnary. Returns a string. + """ + flags_list = [] + mask = 0x01 + while mask <= flags: + if flags & mask: + if mask in flags_dict: + flags_list.append(flags_dict[mask]) + elif show_unknown_flags: + flags_list.append("???") + mask = mask << 1 + return separator.join(flags_list) + + +############################################################################### +code_to_type_name = { + 'B': "byte", + 'C': "char", + 'D': "double", + 'F': "float", + 'I': "int", + 'J': "long", + 'S': "short", + 'Z': "boolean", + 'V': "void", +} + +def eat_descriptor(descr): + """ + Read head of a field/method descriptor. Returns a pair of strings, where + the first one is a human-readable string representation of the first found + type, and the second one is the tail of the parameter. + """ + array_dim = 0 + while descr[0] == '[': + array_dim += 1 + descr = descr[1:] + if (descr[0] == 'L'): + try: end = descr.find(';') + except: raise ParserError("Not a valid descriptor string: " + descr) + type = descr[1:end] + descr = descr[end:] + else: + global code_to_type_name + try: + type = code_to_type_name[descr[0]] + except KeyError: + raise ParserError("Not a valid descriptor string: %s" % descr) + return (type.replace("/", ".") + array_dim * "[]", descr[1:]) + +def parse_field_descriptor(descr, name=None): + """ + Parse a field descriptor (single type), and returns it as human-readable + string representation. + """ + assert descr + (type, tail) = eat_descriptor(descr) + assert not tail + if name: + return type + " " + name + else: + return type + +def parse_method_descriptor(descr, name=None): + """ + Parse a method descriptor (params type and return type), and returns it + as human-readable string representation. + """ + assert descr and (descr[0] == '(') + descr = descr[1:] + params_list = [] + while descr[0] != ')': + (param, descr) = eat_descriptor(descr) + params_list.append(param) + (type, tail) = eat_descriptor(descr[1:]) + assert not tail + params = ", ".join(params_list) + if name: + return "%s %s(%s)" % (type, name, params) + else: + return "%s (%s)" % (type, params) + +def parse_any_descriptor(descr, name=None): + """ + Parse either a field or method descriptor, and returns it as human- + readable string representation. + """ + assert descr + if descr[0] == '(': + return parse_method_descriptor(descr, name) + else: + return parse_field_descriptor(descr, name) + + +############################################################################### +class FieldArray(FieldSet): + """ + Holds a fixed length array of fields which all have the same type. This + type may be variable-length. Each field will be named "foo[x]" (with x + starting at 0). + """ + def __init__(self, parent, name, elements_class, length, + **elements_extra_args): + """Create a FieldArray of fields of class , + named "[x]". The **elements_extra_args will be passed to the + constructor of each field when yielded.""" + FieldSet.__init__(self, parent, name) + self.array_elements_class = elements_class + self.array_length = length + self.array_elements_extra_args = elements_extra_args + + def createFields(self): + for i in range(0, self.array_length): + yield self.array_elements_class(self, "%s[%d]" % (self.name, i), + **self.array_elements_extra_args) + +class ConstantPool(FieldSet): + """ + ConstantPool is similar to a FieldArray of CPInfo fields, but: + - numbering starts at 1 instead of zero + - some indexes are skipped (after Long or Double entries) + """ + def __init__(self, parent, name, length): + FieldSet.__init__(self, parent, name) + self.constant_pool_length = length + def createFields(self): + i = 1 + while i < self.constant_pool_length: + name = "%s[%d]" % (self.name, i) + yield CPInfo(self, name) + i += 1 + if self[name].constant_type in ("Long", "Double"): + i += 1 + + +############################################################################### +class CPIndex(UInt16): + """ + Holds index of a constant pool entry. + """ + def __init__(self, parent, name, description=None, target_types=None, + target_text_handler=(lambda x: x), allow_zero=False): + """ + Initialize a CPIndex. + - target_type is the tuple of expected type for the target CPInfo + (if None, then there will be no type check) + - target_text_handler is a string transformation function used for + pretty printing the target str() result + - allow_zero states whether null index is allowed (sometimes, constant + pool index is optionnal) + """ + UInt16.__init__(self, parent, name, description) + if isinstance(target_types, str): + self.target_types = (target_types,) + else: + self.target_types = target_types + self.allow_zero = allow_zero + self.target_text_handler = target_text_handler + self.getOriginalDisplay = lambda: self.value + + def createDisplay(self): + cp_entry = self.get_cp_entry() + if self.allow_zero and not cp_entry: + return "ZERO" + assert cp_entry + return "-> " + self.target_text_handler(str(cp_entry)) + + def get_cp_entry(self): + """ + Returns the target CPInfo field. + """ + assert self.value < self["/constant_pool_count"].value + if self.allow_zero and not self.value: return None + cp_entry = self["/constant_pool/constant_pool[%d]" % self.value] + assert isinstance(cp_entry, CPInfo) + if self.target_types: + assert cp_entry.constant_type in self.target_types + return cp_entry + + +############################################################################### +class JavaOpcode(FieldSet): + OPSIZE = 0 + def __init__(self, parent, name, op, desc): + FieldSet.__init__(self, parent, name) + if self.OPSIZE != 0: self._size = self.OPSIZE*8 + self.op = op + self.desc = desc + def createDisplay(self): + return self.op + def createDescription(self): + return self.desc + def createValue(self): + return self.createDisplay() + +class OpcodeNoArgs(JavaOpcode): + OPSIZE = 1 + def createFields(self): + yield UInt8(self, "opcode") + +class OpcodeCPIndex(JavaOpcode): + OPSIZE = 3 + def createFields(self): + yield UInt8(self, "opcode") + yield CPIndex(self, "index") + def createDisplay(self): + return "%s(%i)"%(self.op, self["index"].value) + +class OpcodeCPIndexShort(JavaOpcode): + OPSIZE = 2 + def createFields(self): + yield UInt8(self, "opcode") + yield UInt8(self, "index") + def createDisplay(self): + return "%s(%i)"%(self.op, self["index"].value) + +class OpcodeIndex(JavaOpcode): + OPSIZE = 2 + def createFields(self): + yield UInt8(self, "opcode") + yield UInt8(self, "index") + def createDisplay(self): + return "%s(%i)"%(self.op, self["index"].value) + +class OpcodeShortJump(JavaOpcode): + OPSIZE = 3 + def createFields(self): + yield UInt8(self, "opcode") + yield Int16(self, "offset") + def createDisplay(self): + return "%s(%s)"%(self.op, self["offset"].value) + +class OpcodeLongJump(JavaOpcode): + OPSIZE = 5 + def createFields(self): + yield UInt8(self, "opcode") + yield Int32(self, "offset") + def createDisplay(self): + return "%s(%s)"%(self.op, self["offset"].value) + +class OpcodeSpecial_bipush(JavaOpcode): + OPSIZE = 2 + def createFields(self): + yield UInt8(self, "opcode") + yield Int8(self, "value") + def createDisplay(self): + return "%s(%s)"%(self.op, self["value"].value) + +class OpcodeSpecial_sipush(JavaOpcode): + OPSIZE = 3 + def createFields(self): + yield UInt8(self, "opcode") + yield Int16(self, "value") + def createDisplay(self): + return "%s(%s)"%(self.op, self["value"].value) + +class OpcodeSpecial_iinc(JavaOpcode): + OPSIZE = 3 + def createFields(self): + yield UInt8(self, "opcode") + yield UInt8(self, "index") + yield Int8(self, "value") + def createDisplay(self): + return "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value) + +class OpcodeSpecial_wide(JavaOpcode): + def createFields(self): + yield UInt8(self, "opcode") + new_op = UInt8(self, "new_opcode") + yield new_op + op = new_op._description = JavaBytecode.OPCODE_TABLE.get(new_op.value, ["reserved", None, "Reserved"])[0] + yield UInt16(self, "index") + if op == "iinc": + yield Int16(self, "value") + self.createDisplay = lambda self: "%s(%i,%i)"%(self.op, self["index"].value, self["value"].value) + else: + self.createDisplay = lambda self: "%s(%i)"%(self.op, self["index"].value) + +class OpcodeSpecial_invokeinterface(JavaOpcode): + OPSIZE = 5 + def createFields(self): + yield UInt8(self, "opcode") + yield CPIndex(self, "index") + yield UInt8(self, "count") + yield UInt8(self, "zero", "Must be zero.") + def createDisplay(self): + return "%s(%i,%i,%i)"%(self.op, self["index"].value, self["count"].value, self["zero"].value) + +class OpcodeSpecial_newarray(JavaOpcode): + OPSIZE = 2 + def createFields(self): + yield UInt8(self, "opcode") + yield Enum(UInt8(self, "atype"), {4: "boolean", + 5: "char", + 6: "float", + 7: "double", + 8: "byte", + 9: "short", + 10:"int", + 11:"long"}) + def createDisplay(self): + return "%s(%s)"%(self.op, self["atype"].createDisplay()) + +class OpcodeSpecial_multianewarray(JavaOpcode): + OPSIZE = 4 + def createFields(self): + yield UInt8(self, "opcode") + yield CPIndex(self, "index") + yield UInt8(self, "dimensions") + def createDisplay(self): + return "%s(%i,%i)"%(self.op, self["index"].value, self["dimensions"].value) + +class OpcodeSpecial_tableswitch(JavaOpcode): + def createFields(self): + yield UInt8(self, "opcode") + pad = paddingSize(self.address+8, 32) + if pad: + yield NullBits(self, "padding", pad) + yield Int32(self, "default") + low = Int32(self, "low") + yield low + high = Int32(self, "high") + yield high + for i in range(high.value-low.value+1): + yield Int32(self, "offset[]") + def createDisplay(self): + return "%s(%i,%i,%i,...)"%(self.op, self["default"].value, self["low"].value, self["high"].value) + +class OpcodeSpecial_lookupswitch(JavaOpcode): + def createFields(self): + yield UInt8(self, "opcode") + pad = paddingSize(self.address+8, 32) + if pad: + yield NullBits(self, "padding", pad) + yield Int32(self, "default") + n = Int32(self, "npairs") + yield n + for i in range(n.value): + yield Int32(self, "match[]") + yield Int32(self, "offset[]") + def createDisplay(self): + return "%s(%i,%i,...)"%(self.op, self["default"].value, self["npairs"].value) + +class JavaBytecode(FieldSet): + OPCODE_TABLE = { +0x00: ("nop", OpcodeNoArgs, "performs no operation. Stack: [No change]"), +0x01: ("aconst_null", OpcodeNoArgs, "pushes a 'null' reference onto the stack. Stack: -> null"), +0x02: ("iconst_m1", OpcodeNoArgs, "loads the int value -1 onto the stack. Stack: -> -1"), +0x03: ("iconst_0", OpcodeNoArgs, "loads the int value 0 onto the stack. Stack: -> 0"), +0x04: ("iconst_1", OpcodeNoArgs, "loads the int value 1 onto the stack. Stack: -> 1"), +0x05: ("iconst_2", OpcodeNoArgs, "loads the int value 2 onto the stack. Stack: -> 2"), +0x06: ("iconst_3", OpcodeNoArgs, "loads the int value 3 onto the stack. Stack: -> 3"), +0x07: ("iconst_4", OpcodeNoArgs, "loads the int value 4 onto the stack. Stack: -> 4"), +0x08: ("iconst_5", OpcodeNoArgs, "loads the int value 5 onto the stack. Stack: -> 5"), +0x09: ("lconst_0", OpcodeNoArgs, "pushes the long 0 onto the stack. Stack: -> 0L"), +0x0a: ("lconst_1", OpcodeNoArgs, "pushes the long 1 onto the stack. Stack: -> 1L"), +0x0b: ("fconst_0", OpcodeNoArgs, "pushes '0.0f' onto the stack. Stack: -> 0.0f"), +0x0c: ("fconst_1", OpcodeNoArgs, "pushes '1.0f' onto the stack. Stack: -> 1.0f"), +0x0d: ("fconst_2", OpcodeNoArgs, "pushes '2.0f' onto the stack. Stack: -> 2.0f"), +0x0e: ("dconst_0", OpcodeNoArgs, "pushes the constant '0.0' onto the stack. Stack: -> 0.0"), +0x0f: ("dconst_1", OpcodeNoArgs, "pushes the constant '1.0' onto the stack. Stack: -> 1.0"), +0x10: ("bipush", OpcodeSpecial_bipush, "pushes the signed 8-bit integer argument onto the stack. Stack: -> value"), +0x11: ("sipush", OpcodeSpecial_sipush, "pushes the signed 16-bit integer argument onto the stack. Stack: -> value"), +0x12: ("ldc", OpcodeCPIndexShort, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"), +0x13: ("ldc_w", OpcodeCPIndex, "pushes a constant from a constant pool (String, int, float or class type) onto the stack. Stack: -> value"), +0x14: ("ldc2_w", OpcodeCPIndex, "pushes a constant from a constant pool (double or long) onto the stack. Stack: -> value"), +0x15: ("iload", OpcodeIndex, "loads an int 'value' from a local variable '#index'. Stack: -> value"), +0x16: ("lload", OpcodeIndex, "loads a long value from a local variable '#index'. Stack: -> value"), +0x17: ("fload", OpcodeIndex, "loads a float 'value' from a local variable '#index'. Stack: -> value"), +0x18: ("dload", OpcodeIndex, "loads a double 'value' from a local variable '#index'. Stack: -> value"), +0x19: ("aload", OpcodeIndex, "loads a reference onto the stack from a local variable '#index'. Stack: -> objectref"), +0x1a: ("iload_0", OpcodeNoArgs, "loads an int 'value' from variable 0. Stack: -> value"), +0x1b: ("iload_1", OpcodeNoArgs, "loads an int 'value' from variable 1. Stack: -> value"), +0x1c: ("iload_2", OpcodeNoArgs, "loads an int 'value' from variable 2. Stack: -> value"), +0x1d: ("iload_3", OpcodeNoArgs, "loads an int 'value' from variable 3. Stack: -> value"), +0x1e: ("lload_0", OpcodeNoArgs, "load a long value from a local variable 0. Stack: -> value"), +0x1f: ("lload_1", OpcodeNoArgs, "load a long value from a local variable 1. Stack: -> value"), +0x20: ("lload_2", OpcodeNoArgs, "load a long value from a local variable 2. Stack: -> value"), +0x21: ("lload_3", OpcodeNoArgs, "load a long value from a local variable 3. Stack: -> value"), +0x22: ("fload_0", OpcodeNoArgs, "loads a float 'value' from local variable 0. Stack: -> value"), +0x23: ("fload_1", OpcodeNoArgs, "loads a float 'value' from local variable 1. Stack: -> value"), +0x24: ("fload_2", OpcodeNoArgs, "loads a float 'value' from local variable 2. Stack: -> value"), +0x25: ("fload_3", OpcodeNoArgs, "loads a float 'value' from local variable 3. Stack: -> value"), +0x26: ("dload_0", OpcodeNoArgs, "loads a double from local variable 0. Stack: -> value"), +0x27: ("dload_1", OpcodeNoArgs, "loads a double from local variable 1. Stack: -> value"), +0x28: ("dload_2", OpcodeNoArgs, "loads a double from local variable 2. Stack: -> value"), +0x29: ("dload_3", OpcodeNoArgs, "loads a double from local variable 3. Stack: -> value"), +0x2a: ("aload_0", OpcodeNoArgs, "loads a reference onto the stack from local variable 0. Stack: -> objectref"), +0x2b: ("aload_1", OpcodeNoArgs, "loads a reference onto the stack from local variable 1. Stack: -> objectref"), +0x2c: ("aload_2", OpcodeNoArgs, "loads a reference onto the stack from local variable 2. Stack: -> objectref"), +0x2d: ("aload_3", OpcodeNoArgs, "loads a reference onto the stack from local variable 3. Stack: -> objectref"), +0x2e: ("iaload", OpcodeNoArgs, "loads an int from an array. Stack: arrayref, index -> value"), +0x2f: ("laload", OpcodeNoArgs, "load a long from an array. Stack: arrayref, index -> value"), +0x30: ("faload", OpcodeNoArgs, "loads a float from an array. Stack: arrayref, index -> value"), +0x31: ("daload", OpcodeNoArgs, "loads a double from an array. Stack: arrayref, index -> value"), +0x32: ("aaload", OpcodeNoArgs, "loads onto the stack a reference from an array. Stack: arrayref, index -> value"), +0x33: ("baload", OpcodeNoArgs, "loads a byte or Boolean value from an array. Stack: arrayref, index -> value"), +0x34: ("caload", OpcodeNoArgs, "loads a char from an array. Stack: arrayref, index -> value"), +0x35: ("saload", OpcodeNoArgs, "load short from array. Stack: arrayref, index -> value"), +0x36: ("istore", OpcodeIndex, "store int 'value' into variable '#index'. Stack: value ->"), +0x37: ("lstore", OpcodeIndex, "store a long 'value' in a local variable '#index'. Stack: value ->"), +0x38: ("fstore", OpcodeIndex, "stores a float 'value' into a local variable '#index'. Stack: value ->"), +0x39: ("dstore", OpcodeIndex, "stores a double 'value' into a local variable '#index'. Stack: value ->"), +0x3a: ("astore", OpcodeIndex, "stores a reference into a local variable '#index'. Stack: objectref ->"), +0x3b: ("istore_0", OpcodeNoArgs, "store int 'value' into variable 0. Stack: value ->"), +0x3c: ("istore_1", OpcodeNoArgs, "store int 'value' into variable 1. Stack: value ->"), +0x3d: ("istore_2", OpcodeNoArgs, "store int 'value' into variable 2. Stack: value ->"), +0x3e: ("istore_3", OpcodeNoArgs, "store int 'value' into variable 3. Stack: value ->"), +0x3f: ("lstore_0", OpcodeNoArgs, "store a long 'value' in a local variable 0. Stack: value ->"), +0x40: ("lstore_1", OpcodeNoArgs, "store a long 'value' in a local variable 1. Stack: value ->"), +0x41: ("lstore_2", OpcodeNoArgs, "store a long 'value' in a local variable 2. Stack: value ->"), +0x42: ("lstore_3", OpcodeNoArgs, "store a long 'value' in a local variable 3. Stack: value ->"), +0x43: ("fstore_0", OpcodeNoArgs, "stores a float 'value' into local variable 0. Stack: value ->"), +0x44: ("fstore_1", OpcodeNoArgs, "stores a float 'value' into local variable 1. Stack: value ->"), +0x45: ("fstore_2", OpcodeNoArgs, "stores a float 'value' into local variable 2. Stack: value ->"), +0x46: ("fstore_3", OpcodeNoArgs, "stores a float 'value' into local variable 3. Stack: value ->"), +0x47: ("dstore_0", OpcodeNoArgs, "stores a double into local variable 0. Stack: value ->"), +0x48: ("dstore_1", OpcodeNoArgs, "stores a double into local variable 1. Stack: value ->"), +0x49: ("dstore_2", OpcodeNoArgs, "stores a double into local variable 2. Stack: value ->"), +0x4a: ("dstore_3", OpcodeNoArgs, "stores a double into local variable 3. Stack: value ->"), +0x4b: ("astore_0", OpcodeNoArgs, "stores a reference into local variable 0. Stack: objectref ->"), +0x4c: ("astore_1", OpcodeNoArgs, "stores a reference into local variable 1. Stack: objectref ->"), +0x4d: ("astore_2", OpcodeNoArgs, "stores a reference into local variable 2. Stack: objectref ->"), +0x4e: ("astore_3", OpcodeNoArgs, "stores a reference into local variable 3. Stack: objectref ->"), +0x4f: ("iastore", OpcodeNoArgs, "stores an int into an array. Stack: arrayref, index, value ->"), +0x50: ("lastore", OpcodeNoArgs, "store a long to an array. Stack: arrayref, index, value ->"), +0x51: ("fastore", OpcodeNoArgs, "stores a float in an array. Stack: arreyref, index, value ->"), +0x52: ("dastore", OpcodeNoArgs, "stores a double into an array. Stack: arrayref, index, value ->"), +0x53: ("aastore", OpcodeNoArgs, "stores into a reference to an array. Stack: arrayref, index, value ->"), +0x54: ("bastore", OpcodeNoArgs, "stores a byte or Boolean value into an array. Stack: arrayref, index, value ->"), +0x55: ("castore", OpcodeNoArgs, "stores a char into an array. Stack: arrayref, index, value ->"), +0x56: ("sastore", OpcodeNoArgs, "store short to array. Stack: arrayref, index, value ->"), +0x57: ("pop", OpcodeNoArgs, "discards the top value on the stack. Stack: value ->"), +0x58: ("pop2", OpcodeNoArgs, "discards the top two values on the stack (or one value, if it is a double or long). Stack: {value2, value1} ->"), +0x59: ("dup", OpcodeNoArgs, "duplicates the value on top of the stack. Stack: value -> value, value"), +0x5a: ("dup_x1", OpcodeNoArgs, "inserts a copy of the top value into the stack two values from the top. Stack: value2, value1 -> value1, value2, value1"), +0x5b: ("dup_x2", OpcodeNoArgs, "inserts a copy of the top value into the stack two (if value2 is double or long it takes up the entry of value3, too) or three values (if value2 is neither double nor long) from the top. Stack: value3, value2, value1 -> value1, value3, value2, value1"), +0x5c: ("dup2", OpcodeNoArgs, "duplicate top two stack words (two values, if value1 is not double nor long; a single value, if value1 is double or long). Stack: {value2, value1} -> {value2, value1}, {value2, value1}"), +0x5d: ("dup2_x1", OpcodeNoArgs, "duplicate two words and insert beneath third word. Stack: value3, {value2, value1} -> {value2, value1}, value3, {value2, value1}"), +0x5e: ("dup2_x2", OpcodeNoArgs, "duplicate two words and insert beneath fourth word. Stack: {value4, value3}, {value2, value1} -> {value2, value1}, {value4, value3}, {value2, value1}"), +0x5f: ("swap", OpcodeNoArgs, "swaps two top words on the stack (note that value1 and value2 must not be double or long). Stack: value2, value1 -> value1, value2"), +0x60: ("iadd", OpcodeNoArgs, "adds two ints together. Stack: value1, value2 -> result"), +0x61: ("ladd", OpcodeNoArgs, "add two longs. Stack: value1, value2 -> result"), +0x62: ("fadd", OpcodeNoArgs, "adds two floats. Stack: value1, value2 -> result"), +0x63: ("dadd", OpcodeNoArgs, "adds two doubles. Stack: value1, value2 -> result"), +0x64: ("isub", OpcodeNoArgs, "int subtract. Stack: value1, value2 -> result"), +0x65: ("lsub", OpcodeNoArgs, "subtract two longs. Stack: value1, value2 -> result"), +0x66: ("fsub", OpcodeNoArgs, "subtracts two floats. Stack: value1, value2 -> result"), +0x67: ("dsub", OpcodeNoArgs, "subtracts a double from another. Stack: value1, value2 -> result"), +0x68: ("imul", OpcodeNoArgs, "multiply two integers. Stack: value1, value2 -> result"), +0x69: ("lmul", OpcodeNoArgs, "multiplies two longs. Stack: value1, value2 -> result"), +0x6a: ("fmul", OpcodeNoArgs, "multiplies two floats. Stack: value1, value2 -> result"), +0x6b: ("dmul", OpcodeNoArgs, "multiplies two doubles. Stack: value1, value2 -> result"), +0x6c: ("idiv", OpcodeNoArgs, "divides two integers. Stack: value1, value2 -> result"), +0x6d: ("ldiv", OpcodeNoArgs, "divide two longs. Stack: value1, value2 -> result"), +0x6e: ("fdiv", OpcodeNoArgs, "divides two floats. Stack: value1, value2 -> result"), +0x6f: ("ddiv", OpcodeNoArgs, "divides two doubles. Stack: value1, value2 -> result"), +0x70: ("irem", OpcodeNoArgs, "logical int remainder. Stack: value1, value2 -> result"), +0x71: ("lrem", OpcodeNoArgs, "remainder of division of two longs. Stack: value1, value2 -> result"), +0x72: ("frem", OpcodeNoArgs, "gets the remainder from a division between two floats. Stack: value1, value2 -> result"), +0x73: ("drem", OpcodeNoArgs, "gets the remainder from a division between two doubles. Stack: value1, value2 -> result"), +0x74: ("ineg", OpcodeNoArgs, "negate int. Stack: value -> result"), +0x75: ("lneg", OpcodeNoArgs, "negates a long. Stack: value -> result"), +0x76: ("fneg", OpcodeNoArgs, "negates a float. Stack: value -> result"), +0x77: ("dneg", OpcodeNoArgs, "negates a double. Stack: value -> result"), +0x78: ("ishl", OpcodeNoArgs, "int shift left. Stack: value1, value2 -> result"), +0x79: ("lshl", OpcodeNoArgs, "bitwise shift left of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"), +0x7a: ("ishr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"), +0x7b: ("lshr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions. Stack: value1, value2 -> result"), +0x7c: ("iushr", OpcodeNoArgs, "int shift right. Stack: value1, value2 -> result"), +0x7d: ("lushr", OpcodeNoArgs, "bitwise shift right of a long 'value1' by 'value2' positions, unsigned. Stack: value1, value2 -> result"), +0x7e: ("iand", OpcodeNoArgs, "performs a logical and on two integers. Stack: value1, value2 -> result"), +0x7f: ("land", OpcodeNoArgs, "bitwise and of two longs. Stack: value1, value2 -> result"), +0x80: ("ior", OpcodeNoArgs, "logical int or. Stack: value1, value2 -> result"), +0x81: ("lor", OpcodeNoArgs, "bitwise or of two longs. Stack: value1, value2 -> result"), +0x82: ("ixor", OpcodeNoArgs, "int xor. Stack: value1, value2 -> result"), +0x83: ("lxor", OpcodeNoArgs, "bitwise exclusive or of two longs. Stack: value1, value2 -> result"), +0x84: ("iinc", OpcodeSpecial_iinc, "increment local variable '#index' by signed byte 'const'. Stack: [No change]"), +0x85: ("i2l", OpcodeNoArgs, "converts an int into a long. Stack: value -> result"), +0x86: ("i2f", OpcodeNoArgs, "converts an int into a float. Stack: value -> result"), +0x87: ("i2d", OpcodeNoArgs, "converts an int into a double. Stack: value -> result"), +0x88: ("l2i", OpcodeNoArgs, "converts a long to an int. Stack: value -> result"), +0x89: ("l2f", OpcodeNoArgs, "converts a long to a float. Stack: value -> result"), +0x8a: ("l2d", OpcodeNoArgs, "converts a long to a double. Stack: value -> result"), +0x8b: ("f2i", OpcodeNoArgs, "converts a float to an int. Stack: value -> result"), +0x8c: ("f2l", OpcodeNoArgs, "converts a float to a long. Stack: value -> result"), +0x8d: ("f2d", OpcodeNoArgs, "converts a float to a double. Stack: value -> result"), +0x8e: ("d2i", OpcodeNoArgs, "converts a double to an int. Stack: value -> result"), +0x8f: ("d2l", OpcodeNoArgs, "converts a double to a long. Stack: value -> result"), +0x90: ("d2f", OpcodeNoArgs, "converts a double to a float. Stack: value -> result"), +0x91: ("i2b", OpcodeNoArgs, "converts an int into a byte. Stack: value -> result"), +0x92: ("i2c", OpcodeNoArgs, "converts an int into a character. Stack: value -> result"), +0x93: ("i2s", OpcodeNoArgs, "converts an int into a short. Stack: value -> result"), +0x94: ("lcmp", OpcodeNoArgs, "compares two longs values. Stack: value1, value2 -> result"), +0x95: ("fcmpl", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"), +0x96: ("fcmpg", OpcodeNoArgs, "compares two floats. Stack: value1, value2 -> result"), +0x97: ("dcmpl", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"), +0x98: ("dcmpg", OpcodeNoArgs, "compares two doubles. Stack: value1, value2 -> result"), +0x99: ("ifeq", OpcodeShortJump, "if 'value' is 0, branch to the 16-bit instruction offset argument. Stack: value ->"), +0x9a: ("ifne", OpcodeShortJump, "if 'value' is not 0, branch to the 16-bit instruction offset argument. Stack: value ->"), +0x9c: ("ifge", OpcodeShortJump, "if 'value' is greater than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"), +0x9d: ("ifgt", OpcodeShortJump, "if 'value' is greater than 0, branch to the 16-bit instruction offset argument. Stack: value ->"), +0x9e: ("ifle", OpcodeShortJump, "if 'value' is less than or equal to 0, branch to the 16-bit instruction offset argument. Stack: value ->"), +0x9f: ("if_icmpeq", OpcodeShortJump, "if ints are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa0: ("if_icmpne", OpcodeShortJump, "if ints are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa1: ("if_icmplt", OpcodeShortJump, "if 'value1' is less than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa2: ("if_icmpge", OpcodeShortJump, "if 'value1' is greater than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa3: ("if_icmpgt", OpcodeShortJump, "if 'value1' is greater than 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa4: ("if_icmple", OpcodeShortJump, "if 'value1' is less than or equal to 'value2', branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa5: ("if_acmpeq", OpcodeShortJump, "if references are equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa6: ("if_acmpne", OpcodeShortJump, "if references are not equal, branch to the 16-bit instruction offset argument. Stack: value1, value2 ->"), +0xa7: ("goto", OpcodeShortJump, "goes to the 16-bit instruction offset argument. Stack: [no change]"), +0xa8: ("jsr", OpcodeShortJump, "jump to subroutine at the 16-bit instruction offset argument and place the return address on the stack. Stack: -> address"), +0xa9: ("ret", OpcodeIndex, "continue execution from address taken from a local variable '#index'. Stack: [No change]"), +0xaa: ("tableswitch", OpcodeSpecial_tableswitch, "continue execution from an address in the table at offset 'index'. Stack: index ->"), +0xab: ("lookupswitch", OpcodeSpecial_lookupswitch, "a target address is looked up from a table using a key and execution continues from the instruction at that address. Stack: key ->"), +0xac: ("ireturn", OpcodeNoArgs, "returns an integer from a method. Stack: value -> [empty]"), +0xad: ("lreturn", OpcodeNoArgs, "returns a long value. Stack: value -> [empty]"), +0xae: ("freturn", OpcodeNoArgs, "returns a float. Stack: value -> [empty]"), +0xaf: ("dreturn", OpcodeNoArgs, "returns a double from a method. Stack: value -> [empty]"), +0xb0: ("areturn", OpcodeNoArgs, "returns a reference from a method. Stack: objectref -> [empty]"), +0xb1: ("return", OpcodeNoArgs, "return void from method. Stack: -> [empty]"), +0xb2: ("getstatic", OpcodeCPIndex, "gets a static field 'value' of a class, where the field is identified by field reference in the constant pool. Stack: -> value"), +0xb3: ("putstatic", OpcodeCPIndex, "set static field to 'value' in a class, where the field is identified by a field reference in constant pool. Stack: value ->"), +0xb4: ("getfield", OpcodeCPIndex, "gets a field 'value' of an object 'objectref', where the field is identified by field reference in the constant pool. Stack: objectref -> value"), +0xb5: ("putfield", OpcodeCPIndex, "set field to 'value' in an object 'objectref', where the field is identified by a field reference in constant pool. Stack: objectref, value ->"), +0xb6: ("invokevirtual", OpcodeCPIndex, "invoke virtual method on object 'objectref', where the method is identified by method reference in constant pool. Stack: objectref, [arg1, arg2, ...] ->"), +0xb7: ("invokespecial", OpcodeCPIndex, "invoke instance method on object 'objectref', where the method is identified by method reference in constant pool. Stack: objectref, [arg1, arg2, ...] ->"), +0xb8: ("invokestatic", OpcodeCPIndex, "invoke a static method, where the method is identified by method reference in the constant pool. Stack: [arg1, arg2, ...] ->"), +0xb9: ("invokeinterface", OpcodeSpecial_invokeinterface, "invokes an interface method on object 'objectref', where the interface method is identified by method reference in constant pool. Stack: objectref, [arg1, arg2, ...] ->"), +0xba: ("xxxunusedxxx", OpcodeNoArgs, "this opcode is reserved for historical reasons. Stack: "), +0xbb: ("new", OpcodeCPIndex, "creates new object of type identified by class reference in constant pool. Stack: -> objectref"), +0xbc: ("newarray", OpcodeSpecial_newarray, "creates new array with 'count' elements of primitive type given in the argument. Stack: count -> arrayref"), +0xbd: ("anewarray", OpcodeCPIndex, "creates a new array of references of length 'count' and component type identified by the class reference in the constant pool. Stack: count -> arrayref"), +0xbe: ("arraylength", OpcodeNoArgs, "gets the length of an array. Stack: arrayref -> length"), +0xbf: ("athrow", OpcodeNoArgs, "throws an error or exception (notice that the rest of the stack is cleared, leaving only a reference to the Throwable). Stack: objectref -> [empty], objectref"), +0xc0: ("checkcast", OpcodeCPIndex, "checks whether an 'objectref' is of a certain type, the class reference of which is in the constant pool. Stack: objectref -> objectref"), +0xc1: ("instanceof", OpcodeCPIndex, "determines if an object 'objectref' is of a given type, identified by class reference in constant pool. Stack: objectref -> result"), +0xc2: ("monitorenter", OpcodeNoArgs, "enter monitor for object (\"grab the lock\" - start of synchronized() section). Stack: objectref -> "), +0xc3: ("monitorexit", OpcodeNoArgs, "exit monitor for object (\"release the lock\" - end of synchronized() section). Stack: objectref -> "), +0xc4: ("wide", OpcodeSpecial_wide, "execute 'opcode', where 'opcode' is either iload, fload, aload, lload, dload, istore, fstore, astore, lstore, dstore, or ret, but assume the 'index' is 16 bit; or execute iinc, where the 'index' is 16 bits and the constant to increment by is a signed 16 bit short. Stack: [same as for corresponding instructions]"), +0xc5: ("multianewarray", OpcodeSpecial_multianewarray, "create a new array of 'dimensions' dimensions with elements of type identified by class reference in constant pool; the sizes of each dimension is identified by 'count1', ['count2', etc]. Stack: count1, [count2,...] -> arrayref"), +0xc6: ("ifnull", OpcodeShortJump, "if 'value' is null, branch to the 16-bit instruction offset argument. Stack: value ->"), +0xc7: ("ifnonnull", OpcodeShortJump, "if 'value' is not null, branch to the 16-bit instruction offset argument. Stack: value ->"), +0xc8: ("goto_w", OpcodeLongJump, "goes to another instruction at the 32-bit branch offset argument. Stack: [no change]"), +0xc9: ("jsr_w", OpcodeLongJump, "jump to subroutine at the 32-bit branch offset argument and place the return address on the stack. Stack: -> address"), +0xca: ("breakpoint", OpcodeNoArgs, "reserved for breakpoints in Java debuggers; should not appear in any class file."), +0xfe: ("impdep1", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file."), +0xff: ("impdep2", OpcodeNoArgs, "reserved for implementation-dependent operations within debuggers; should not appear in any class file.")} + def __init__(self, parent, name, length): + FieldSet.__init__(self, parent, name) + self._size = length*8 + def createFields(self): + while self.current_size < self.size: + bytecode = ord(self.parent.stream.readBytes(self.absolute_address+self.current_size, 1)) + op, cls, desc = self.OPCODE_TABLE.get(bytecode,["", OpcodeNoArgs, "Reserved opcode."]) + yield cls(self, "bytecode[]", op, desc) + +############################################################################### +class CPInfo(FieldSet): + """ + Holds a constant pool entry. Entries all have a type, and various contents + fields depending on their type. + """ + def createFields(self): + yield Enum(UInt8(self, "tag"), self.root.CONSTANT_TYPES) + if self["tag"].value not in self.root.CONSTANT_TYPES: + raise ParserError("Java: unknown constant type (%s)" % self["tag"].value) + self.constant_type = self.root.CONSTANT_TYPES[self["tag"].value] + if self.constant_type == "Utf8": + yield PascalString16(self, "bytes", charset="UTF-8") + elif self.constant_type == "Integer": + yield Int32(self, "bytes") + elif self.constant_type == "Float": + yield Float32(self, "bytes") + elif self.constant_type == "Long": + yield Int64(self, "bytes") + elif self.constant_type == "Double": + yield Float64(self, "bytes") + elif self.constant_type == "Class": + yield CPIndex(self, "name_index", "Class or interface name", target_types="Utf8") + elif self.constant_type == "String": + yield CPIndex(self, "string_index", target_types="Utf8") + elif self.constant_type == "Fieldref": + yield CPIndex(self, "class_index", "Field class or interface name", target_types="Class") + yield CPIndex(self, "name_and_type_index", target_types="NameAndType") + elif self.constant_type == "Methodref": + yield CPIndex(self, "class_index", "Method class name", target_types="Class") + yield CPIndex(self, "name_and_type_index", target_types="NameAndType") + elif self.constant_type == "InterfaceMethodref": + yield CPIndex(self, "class_index", "Method interface name", target_types="Class") + yield CPIndex(self, "name_and_type_index", target_types="NameAndType") + elif self.constant_type == "NameAndType": + yield CPIndex(self, "name_index", target_types="Utf8") + yield CPIndex(self, "descriptor_index", target_types="Utf8") + else: + raise ParserError("Not a valid constant pool element type: " + + self["tag"].value) + + def __str__(self): + """ + Returns a human-readable string representation of the constant pool + entry. It is used for pretty-printing of the CPIndex fields pointing + to it. + """ + if self.constant_type == "Utf8": + return self["bytes"].value + elif self.constant_type in ("Integer", "Float", "Long", "Double"): + return self["bytes"].display + elif self.constant_type == "Class": + class_name = str(self["name_index"].get_cp_entry()) + return class_name.replace("/",".") + elif self.constant_type == "String": + return str(self["string_index"].get_cp_entry()) + elif self.constant_type == "Fieldref": + return "%s (from %s)" % (self["name_and_type_index"], self["class_index"]) + elif self.constant_type == "Methodref": + return "%s (from %s)" % (self["name_and_type_index"], self["class_index"]) + elif self.constant_type == "InterfaceMethodref": + return "%s (from %s)" % (self["name_and_type_index"], self["class_index"]) + elif self.constant_type == "NameAndType": + return parse_any_descriptor( + str(self["descriptor_index"].get_cp_entry()), + name=str(self["name_index"].get_cp_entry())) + else: + # FIXME: Return "" instead of raising an exception? + raise ParserError("Not a valid constant pool element type: " + + self["tag"].value) + + +############################################################################### +# field_info { +# u2 access_flags; +# u2 name_index; +# u2 descriptor_index; +# u2 attributes_count; +# attribute_info attributes[attributes_count]; +# } +class FieldInfo(FieldSet): + def createFields(self): + # Access flags (16 bits) + yield NullBits(self, "reserved[]", 8) + yield Bit(self, "transient") + yield Bit(self, "volatile") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "final") + yield Bit(self, "static") + yield Bit(self, "protected") + yield Bit(self, "private") + yield Bit(self, "public") + + yield CPIndex(self, "name_index", "Field name", target_types="Utf8") + yield CPIndex(self, "descriptor_index", "Field descriptor", target_types="Utf8", + target_text_handler=parse_field_descriptor) + yield UInt16(self, "attributes_count", "Number of field attributes") + if self["attributes_count"].value > 0: + yield FieldArray(self, "attributes", AttributeInfo, + self["attributes_count"].value) + + +############################################################################### +# method_info { +# u2 access_flags; +# u2 name_index; +# u2 descriptor_index; +# u2 attributes_count; +# attribute_info attributes[attributes_count]; +# } +class MethodInfo(FieldSet): + def createFields(self): + # Access flags (16 bits) + yield NullBits(self, "reserved[]", 4) + yield Bit(self, "strict") + yield Bit(self, "abstract") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "native") + yield NullBits(self, "reserved[]", 2) + yield Bit(self, "synchronized") + yield Bit(self, "final") + yield Bit(self, "static") + yield Bit(self, "protected") + yield Bit(self, "private") + yield Bit(self, "public") + + yield CPIndex(self, "name_index", "Method name", target_types="Utf8") + yield CPIndex(self, "descriptor_index", "Method descriptor", + target_types="Utf8", + target_text_handler=parse_method_descriptor) + yield UInt16(self, "attributes_count", "Number of method attributes") + if self["attributes_count"].value > 0: + yield FieldArray(self, "attributes", AttributeInfo, + self["attributes_count"].value) + + +############################################################################### +# attribute_info { +# u2 attribute_name_index; +# u4 attribute_length; +# u1 info[attribute_length]; +# } +# [...] +class AttributeInfo(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + self._size = (self["attribute_length"].value + 6) * 8 + + def createFields(self): + yield CPIndex(self, "attribute_name_index", "Attribute name", target_types="Utf8") + yield UInt32(self, "attribute_length", "Length of the attribute") + attr_name = str(self["attribute_name_index"].get_cp_entry()) + + # ConstantValue_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 constantvalue_index; + # } + if attr_name == "ConstantValue": + if self["attribute_length"].value != 2: + raise ParserError("Java: Invalid attribute %s length (%s)" \ + % (self.path, self["attribute_length"].value)) + yield CPIndex(self, "constantvalue_index", + target_types=("Long","Float","Double","Integer","String")) + + # Code_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 max_stack; + # u2 max_locals; + # u4 code_length; + # u1 code[code_length]; + # u2 exception_table_length; + # { u2 start_pc; + # u2 end_pc; + # u2 handler_pc; + # u2 catch_type; + # } exception_table[exception_table_length]; + # u2 attributes_count; + # attribute_info attributes[attributes_count]; + # } + elif attr_name == "Code": + yield UInt16(self, "max_stack") + yield UInt16(self, "max_locals") + yield UInt32(self, "code_length") + if self["code_length"].value > 0: + yield JavaBytecode(self, "code", self["code_length"].value) + yield UInt16(self, "exception_table_length") + if self["exception_table_length"].value > 0: + yield FieldArray(self, "exception_table", ExceptionTableEntry, + self["exception_table_length"].value) + yield UInt16(self, "attributes_count") + if self["attributes_count"].value > 0: + yield FieldArray(self, "attributes", AttributeInfo, + self["attributes_count"].value) + + # Exceptions_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 number_of_exceptions; + # u2 exception_index_table[number_of_exceptions]; + # } + elif (attr_name == "Exceptions"): + yield UInt16(self, "number_of_exceptions") + yield FieldArray(self, "exception_index_table", CPIndex, + self["number_of_exceptions"].value, target_types="Class") + assert self["attribute_length"].value == \ + 2 + self["number_of_exceptions"].value * 2 + + # InnerClasses_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 number_of_classes; + # { u2 inner_class_info_index; + # u2 outer_class_info_index; + # u2 inner_name_index; + # u2 inner_class_access_flags; + # } classes[number_of_classes]; + # } + elif (attr_name == "InnerClasses"): + yield UInt16(self, "number_of_classes") + if self["number_of_classes"].value > 0: + yield FieldArray(self, "classes", InnerClassesEntry, + self["number_of_classes"].value) + assert self["attribute_length"].value == \ + 2 + self["number_of_classes"].value * 8 + + # Synthetic_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # } + elif (attr_name == "Synthetic"): + assert self["attribute_length"].value == 0 + + # SourceFile_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 sourcefile_index; + # } + elif (attr_name == "SourceFile"): + assert self["attribute_length"].value == 2 + yield CPIndex(self, "sourcefile_index", target_types="Utf8") + + # LineNumberTable_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 line_number_table_length; + # { u2 start_pc; + # u2 line_number; + # } line_number_table[line_number_table_length]; + # } + elif (attr_name == "LineNumberTable"): + yield UInt16(self, "line_number_table_length") + if self["line_number_table_length"].value > 0: + yield FieldArray(self, "line_number_table", + LineNumberTableEntry, + self["line_number_table_length"].value) + assert self["attribute_length"].value == \ + 2 + self["line_number_table_length"].value * 4 + + # LocalVariableTable_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # u2 local_variable_table_length; + # { u2 start_pc; + # u2 length; + # u2 name_index; + # u2 descriptor_index; + # u2 index; + # } local_variable_table[local_variable_table_length]; + # } + elif (attr_name == "LocalVariableTable"): + yield UInt16(self, "local_variable_table_length") + if self["local_variable_table_length"].value > 0: + yield FieldArray(self, "local_variable_table", + LocalVariableTableEntry, + self["local_variable_table_length"].value) + assert self["attribute_length"].value == \ + 2 + self["local_variable_table_length"].value * 10 + + # Deprecated_attribute { + # u2 attribute_name_index; + # u4 attribute_length; + # } + elif (attr_name == "Deprecated"): + assert self["attribute_length"].value == 0 + + # Unkown attribute type. They are allowed by the JVM specs, but we + # can't say much about them... + elif self["attribute_length"].value > 0: + yield RawBytes(self, "info", self["attribute_length"].value) + +class ExceptionTableEntry(FieldSet): + static_size = 48 + CPIndex.static_size + + def createFields(self): + yield textHandler(UInt16(self, "start_pc"), hexadecimal) + yield textHandler(UInt16(self, "end_pc"), hexadecimal) + yield textHandler(UInt16(self, "handler_pc"), hexadecimal) + yield CPIndex(self, "catch_type", target_types="Class") + +class InnerClassesEntry(StaticFieldSet): + format = ( + (CPIndex, "inner_class_info_index", + {"target_types": "Class", "allow_zero": True}), + (CPIndex, "outer_class_info_index", + {"target_types": "Class", "allow_zero": True}), + (CPIndex, "inner_name_index", + {"target_types": "Utf8", "allow_zero": True}), + + # Inner class access flags (16 bits) + (NullBits, "reserved[]", 5), + (Bit, "abstract"), + (Bit, "interface"), + (NullBits, "reserved[]", 3), + (Bit, "super"), + (Bit, "final"), + (Bit, "static"), + (Bit, "protected"), + (Bit, "private"), + (Bit, "public"), + ) + +class LineNumberTableEntry(StaticFieldSet): + format = ( + (UInt16, "start_pc"), + (UInt16, "line_number") + ) + +class LocalVariableTableEntry(StaticFieldSet): + format = ( + (UInt16, "start_pc"), + (UInt16, "length"), + (CPIndex, "name_index", {"target_types": "Utf8"}), + (CPIndex, "descriptor_index", {"target_types": "Utf8", + "target_text_handler": parse_field_descriptor}), + (UInt16, "index") + ) + + +############################################################################### +# ClassFile { +# u4 magic; +# u2 minor_version; +# u2 major_version; +# u2 constant_pool_count; +# cp_info constant_pool[constant_pool_count-1]; +# u2 access_flags; +# u2 this_class; +# u2 super_class; +# u2 interfaces_count; +# u2 interfaces[interfaces_count]; +# u2 fields_count; +# field_info fields[fields_count]; +# u2 methods_count; +# method_info methods[methods_count]; +# u2 attributes_count; +# attribute_info attributes[attributes_count]; +# } +class JavaCompiledClassFile(Parser): + """ + Root of the .class parser. + """ + + endian = BIG_ENDIAN + + PARSER_TAGS = { + "id": "java_class", + "category": "program", + "file_ext": ("class",), + "mime": (u"application/java-vm",), + "min_size": (32 + 3*16), + "description": "Compiled Java class" + } + + MAGIC = 0xCAFEBABE + KNOWN_VERSIONS = { + "45.3": "JDK 1.1", + "46.0": "JDK 1.2", + "47.0": "JDK 1.3", + "48.0": "JDK 1.4", + "49.0": "JDK 1.5", + "50.0": "JDK 1.6" + } + + # Constants go here since they will probably depend on the detected format + # version at some point. Though, if they happen to be really backward + # compatible, they may become module globals. + CONSTANT_TYPES = { + 1: "Utf8", + 3: "Integer", + 4: "Float", + 5: "Long", + 6: "Double", + 7: "Class", + 8: "String", + 9: "Fieldref", + 10: "Methodref", + 11: "InterfaceMethodref", + 12: "NameAndType" + } + + def validate(self): + if self["magic"].value != self.MAGIC: + return "Wrong magic signature!" + version = "%d.%d" % (self["major_version"].value, self["minor_version"].value) + if version not in self.KNOWN_VERSIONS: + return "Unknown version (%s)" % version + return True + + def createDescription(self): + version = "%d.%d" % (self["major_version"].value, self["minor_version"].value) + if version in self.KNOWN_VERSIONS: + return "Compiled Java class, %s" % self.KNOWN_VERSIONS[version] + else: + return "Compiled Java class, version %s" % version + + def createFields(self): + yield textHandler(UInt32(self, "magic", "Java compiled class signature"), + hexadecimal) + yield UInt16(self, "minor_version", "Class format minor version") + yield UInt16(self, "major_version", "Class format major version") + yield UInt16(self, "constant_pool_count", "Size of the constant pool") + if self["constant_pool_count"].value > 1: + #yield FieldArray(self, "constant_pool", CPInfo, + # (self["constant_pool_count"].value - 1), first_index=1) + # Mmmh... can't use FieldArray actually, because ConstantPool + # requires some specific hacks (skipping some indexes after Long + # and Double entries). + yield ConstantPool(self, "constant_pool", + (self["constant_pool_count"].value)) + + # Inner class access flags (16 bits) + yield NullBits(self, "reserved[]", 5) + yield Bit(self, "abstract") + yield Bit(self, "interface") + yield NullBits(self, "reserved[]", 3) + yield Bit(self, "super") + yield Bit(self, "final") + yield Bit(self, "static") + yield Bit(self, "protected") + yield Bit(self, "private") + yield Bit(self, "public") + + yield CPIndex(self, "this_class", "Class name", target_types="Class") + yield CPIndex(self, "super_class", "Super class name", target_types="Class") + yield UInt16(self, "interfaces_count", "Number of implemented interfaces") + if self["interfaces_count"].value > 0: + yield FieldArray(self, "interfaces", CPIndex, + self["interfaces_count"].value, target_types="Class") + yield UInt16(self, "fields_count", "Number of fields") + if self["fields_count"].value > 0: + yield FieldArray(self, "fields", FieldInfo, + self["fields_count"].value) + yield UInt16(self, "methods_count", "Number of methods") + if self["methods_count"].value > 0: + yield FieldArray(self, "methods", MethodInfo, + self["methods_count"].value) + yield UInt16(self, "attributes_count", "Number of attributes") + if self["attributes_count"].value > 0: + yield FieldArray(self, "attributes", AttributeInfo, + self["attributes_count"].value) + +# vim: set expandtab tabstop=4 shiftwidth=4 autoindent smartindent: diff --git a/libs/hachoir_parser/program/prc.py b/libs/hachoir_parser/program/prc.py new file mode 100644 index 0000000..f4db025 --- /dev/null +++ b/libs/hachoir_parser/program/prc.py @@ -0,0 +1,82 @@ +""" +PRC (Palm resource) parser. + +Author: Sebastien Ponce +Creation date: 29 october 2008 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt16, UInt32, TimestampMac32, + String, RawBytes) +from hachoir_core.endian import BIG_ENDIAN + +class PRCHeader(FieldSet): + static_size = 78*8 + + def createFields(self): + yield String(self, "name", 32, "Name") + yield UInt16(self, "flags", "Flags") + yield UInt16(self, "version", "Version") + yield TimestampMac32(self, "create_time", "Creation time") + yield TimestampMac32(self, "mod_time", "Modification time") + yield TimestampMac32(self, "backup_time", "Backup time") + yield UInt32(self, "mod_num", "mod num") + yield UInt32(self, "app_info", "app info") + yield UInt32(self, "sort_info", "sort info") + yield UInt32(self, "type", "type") + yield UInt32(self, "id", "id") + yield UInt32(self, "unique_id_seed", "unique_id_seed") + yield UInt32(self, "next_record_list", "next_record_list") + yield UInt16(self, "num_records", "num_records") + +class ResourceHeader(FieldSet): + static_size = 10*8 + + def createFields(self): + yield String(self, "name", 4, "Name of the resource") + yield UInt16(self, "flags", "ID number of the resource") + yield UInt32(self, "offset", "Pointer to the resource data") + + def createDescription(self): + return "Resource Header (%s)" % self["name"] + +class PRCFile(Parser): + PARSER_TAGS = { + "id": "prc", + "category": "program", + "file_ext": ("prc", ""), + "min_size": ResourceHeader.static_size, # At least one program header + "mime": ( + u"application/x-pilot-prc", + u"application/x-palmpilot"), + "description": "Palm Resource File" + } + endian = BIG_ENDIAN + + def validate(self): + # FIXME: Implement the validation function! + return False + + def createFields(self): + # Parse header and program headers + yield PRCHeader(self, "header", "Header") + lens = [] + firstOne = True + poff = 0 + for index in xrange(self["header/num_records"].value): + r = ResourceHeader(self, "res_header[]") + if firstOne: + firstOne = False + else: + lens.append(r["offset"].value - poff) + poff = r["offset"].value + yield r + lens.append(self.size/8 - poff) + yield UInt16(self, "placeholder", "Place holder bytes") + for i in range(len(lens)): + yield RawBytes(self, "res[]", lens[i], '"'+self["res_header["+str(i)+"]/name"].value+"\" Resource") + + def createDescription(self): + return "Palm Resource file" + diff --git a/libs/hachoir_parser/program/python.py b/libs/hachoir_parser/program/python.py new file mode 100644 index 0000000..6eea32b --- /dev/null +++ b/libs/hachoir_parser/program/python.py @@ -0,0 +1,334 @@ +""" +Python compiled source code parser. + +Informations: +- Python 2.4.2 source code: + files Python/marshal.c and Python/import.c + +Author: Victor Stinner +Creation: 25 march 2005 +""" + +DISASSEMBLE = False + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, UInt8, + UInt16, Int32, UInt32, Int64, ParserError, Float64, Enum, + Character, Bytes, RawBytes, PascalString8, TimestampUnix32) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.bits import long2raw +from hachoir_core.text_handler import textHandler, hexadecimal +from hachoir_core.i18n import ngettext +if DISASSEMBLE: + from dis import dis + + def disassembleBytecode(field): + bytecode = field.value + dis(bytecode) + +# --- String and string reference --- +def parseString(parent): + yield UInt32(parent, "length", "Length") + length = parent["length"].value + if parent.name == "lnotab": + bytecode_offset=0 + line_number=parent['../firstlineno'].value + for i in range(0,length,2): + bc_off_delta=UInt8(parent, 'bytecode_offset_delta[]') + yield bc_off_delta + bytecode_offset+=bc_off_delta.value + bc_off_delta._description='Bytecode Offset %i'%bytecode_offset + line_number_delta=UInt8(parent, 'line_number_delta[]') + yield line_number_delta + line_number+=line_number_delta.value + line_number_delta._description='Line Number %i'%line_number + elif 0 < length: + yield RawBytes(parent, "text", length, "Content") + if DISASSEMBLE and parent.name == "compiled_code": + disassembleBytecode(parent["text"]) + +def parseStringRef(parent): + yield textHandler(UInt32(parent, "ref"), hexadecimal) +def createStringRefDesc(parent): + return "String ref: %s" % parent["ref"].display + +# --- Integers --- +def parseInt32(parent): + yield Int32(parent, "value") + +def parseInt64(parent): + yield Int64(parent, "value") + +def parseLong(parent): + yield Int32(parent, "digit_count") + for index in xrange( abs(parent["digit_count"].value) ): + yield UInt16(parent, "digit[]") + + +# --- Float and complex --- +def parseFloat(parent): + yield PascalString8(parent, "value") +def parseBinaryFloat(parent): + yield Float64(parent, "value") +def parseComplex(parent): + yield PascalString8(parent, "real") + yield PascalString8(parent, "complex") +def parseBinaryComplex(parent): + yield Float64(parent, "real") + yield Float64(parent, "complex") + + +# --- Tuple and list --- +def parseTuple(parent): + yield Int32(parent, "count", "Item count") + count = parent["count"].value + if count < 0: + raise ParserError("Invalid tuple/list count") + for index in xrange(count): + yield Object(parent, "item[]") + +def createTupleDesc(parent): + count = parent["count"].value + items = ngettext("%s item", "%s items", count) % count + return "%s: %s" % (parent.code_info[2], items) + + +# --- Dict --- +def parseDict(parent): + """ + Format is: (key1, value1, key2, value2, ..., keyn, valuen, NULL) + where each keyi and valuei is an object. + """ + parent.count = 0 + while True: + key = Object(parent, "key[]") + yield key + if key["bytecode"].value == "0": + break + yield Object(parent, "value[]") + parent.count += 1 + +def createDictDesc(parent): + return "Dict: %s" % (ngettext("%s key", "%s keys", parent.count) % parent.count) + +# --- Code --- +def parseCode(parent): + if 0x3000000 <= parent.root.getVersion(): + yield UInt32(parent, "arg_count", "Argument count") + yield UInt32(parent, "kwonlyargcount", "Keyword only argument count") + yield UInt32(parent, "nb_locals", "Number of local variables") + yield UInt32(parent, "stack_size", "Stack size") + yield UInt32(parent, "flags") + elif 0x2030000 <= parent.root.getVersion(): + yield UInt32(parent, "arg_count", "Argument count") + yield UInt32(parent, "nb_locals", "Number of local variables") + yield UInt32(parent, "stack_size", "Stack size") + yield UInt32(parent, "flags") + else: + yield UInt16(parent, "arg_count", "Argument count") + yield UInt16(parent, "nb_locals", "Number of local variables") + yield UInt16(parent, "stack_size", "Stack size") + yield UInt16(parent, "flags") + yield Object(parent, "compiled_code") + yield Object(parent, "consts") + yield Object(parent, "names") + yield Object(parent, "varnames") + if 0x2000000 <= parent.root.getVersion(): + yield Object(parent, "freevars") + yield Object(parent, "cellvars") + yield Object(parent, "filename") + yield Object(parent, "name") + if 0x2030000 <= parent.root.getVersion(): + yield UInt32(parent, "firstlineno", "First line number") + else: + yield UInt16(parent, "firstlineno", "First line number") + yield Object(parent, "lnotab") + +class Object(FieldSet): + bytecode_info = { + # Don't contains any data + '0': ("null", None, "NULL", None), + 'N': ("none", None, "None", None), + 'F': ("false", None, "False", None), + 'T': ("true", None, "True", None), + 'S': ("stop_iter", None, "StopIter", None), + '.': ("ellipsis", None, "ELLIPSIS", None), + '?': ("unknown", None, "Unknown", None), + + 'i': ("int32", parseInt32, "Int32", None), + 'I': ("int64", parseInt64, "Int64", None), + 'f': ("float", parseFloat, "Float", None), + 'g': ("bin_float", parseBinaryFloat, "Binary float", None), + 'x': ("complex", parseComplex, "Complex", None), + 'y': ("bin_complex", parseBinaryComplex, "Binary complex", None), + 'l': ("long", parseLong, "Long", None), + 's': ("string", parseString, "String", None), + 't': ("interned", parseString, "Interned", None), + 'u': ("unicode", parseString, "Unicode", None), + 'R': ("string_ref", parseStringRef, "String ref", createStringRefDesc), + '(': ("tuple", parseTuple, "Tuple", createTupleDesc), + '[': ("list", parseTuple, "List", createTupleDesc), + '<': ("set", parseTuple, "Set", createTupleDesc), + '>': ("frozenset", parseTuple, "Frozen set", createTupleDesc), + '{': ("dict", parseDict, "Dict", createDictDesc), + 'c': ("code", parseCode, "Code", None), + } + + def __init__(self, parent, name, **kw): + FieldSet.__init__(self, parent, name, **kw) + code = self["bytecode"].value + if code not in self.bytecode_info: + raise ParserError('Unknown bytecode: "%s"' % code) + self.code_info = self.bytecode_info[code] + if not name: + self._name = self.code_info[0] + if code == "l": + self.createValue = self.createValueLong + elif code in ("i", "I", "f", "g"): + self.createValue = lambda: self["value"].value + elif code == "T": + self.createValue = lambda: True + elif code == "F": + self.createValue = lambda: False + elif code in ("x", "y"): + self.createValue = self.createValueComplex + elif code in ("s", "t", "u"): + self.createValue = self.createValueString + self.createDisplay = self.createDisplayString + if code == 't': + if not hasattr(self.root,'string_table'): + self.root.string_table=[] + self.root.string_table.append(self) + elif code == 'R': + if hasattr(self.root,'string_table'): + self.createValue = self.createValueStringRef + + def createValueString(self): + if "text" in self: + return self["text"].value + else: + return "" + + def createDisplayString(self): + if "text" in self: + return self["text"].display + else: + return "(empty)" + + def createValueLong(self): + is_negative = self["digit_count"].value < 0 + count = abs(self["digit_count"].value) + total = 0 + for index in xrange(count-1, -1, -1): + total <<= 15 + total += self["digit[%u]" % index].value + if is_negative: + total = -total + return total + + def createValueStringRef(self): + return self.root.string_table[self['ref'].value].value + + def createDisplayStringRef(self): + return self.root.string_table[self['ref'].value].display + + def createValueComplex(self): + return complex( + float(self["real"].value), + float(self["complex"].value)) + + def createFields(self): + yield Character(self, "bytecode", "Bytecode") + parser = self.code_info[1] + if parser: + for field in parser(self): + yield field + + def createDescription(self): + create = self.code_info[3] + if create: + return create(self) + else: + return self.code_info[2] + +class PythonCompiledFile(Parser): + PARSER_TAGS = { + "id": "python", + "category": "program", + "file_ext": ("pyc", "pyo"), + "min_size": 9*8, + "description": "Compiled Python script (.pyc/.pyo files)" + } + endian = LITTLE_ENDIAN + + # Dictionnary which associate the pyc signature (32-bit integer) + # to a Python version string (eg. "m\xf2\r\n" => "Python 2.4b1"). + # This list comes from CPython source code, see "MAGIC" + # and "pyc_magic" in file Python/import.c + MAGIC = { + # Python 1.x + 20121: ("1.5", 0x1050000), + + # Python 2.x + 50823: ("2.0", 0x2000000), + 60202: ("2.1", 0x2010000), + 60717: ("2.2", 0x2020000), + 62011: ("2.3a0", 0x2030000), + 62021: ("2.3a0", 0x2030000), + 62041: ("2.4a0", 0x2040000), + 62051: ("2.4a3", 0x2040000), + 62061: ("2.4b1", 0x2040000), + 62071: ("2.5a0", 0x2050000), + 62081: ("2.5a0 (ast-branch)", 0x2050000), + 62091: ("2.5a0 (with)", 0x2050000), + 62092: ("2.5a0 (WITH_CLEANUP opcode)", 0x2050000), + 62101: ("2.5b3", 0x2050000), + 62111: ("2.5b3", 0x2050000), + 62121: ("2.5c1", 0x2050000), + 62131: ("2.5c2", 0x2050000), + + # Python 3.x + 3000: ("3.0 (3000)", 0x3000000), + 3010: ("3.0 (3010)", 0x3000000), + 3020: ("3.0 (3020)", 0x3000000), + 3030: ("3.0 (3030)", 0x3000000), + 3040: ("3.0 (3040)", 0x3000000), + 3050: ("3.0 (3050)", 0x3000000), + 3060: ("3.0 (3060)", 0x3000000), + 3070: ("3.0 (3070)", 0x3000000), + 3080: ("3.0 (3080)", 0x3000000), + 3090: ("3.0 (3090)", 0x3000000), + 3100: ("3.0 (3100)", 0x3000000), + 3102: ("3.0 (3102)", 0x3000000), + 3110: ("3.0a4", 0x3000000), + 3130: ("3.0a5", 0x3000000), + 3131: ("3.0a5 unicode", 0x3000000), + } + + # Dictionnary which associate the pyc signature (4-byte long string) + # to a Python version string (eg. "m\xf2\r\n" => "2.4b1") + STR_MAGIC = dict( \ + (long2raw(magic | (ord('\r')<<16) | (ord('\n')<<24), LITTLE_ENDIAN), value[0]) \ + for magic, value in MAGIC.iteritems()) + + def validate(self): + signature = self.stream.readBits(0, 16, self.endian) + if signature not in self.MAGIC: + return "Unknown version (%s)" % signature + if self.stream.readBytes(2*8, 2) != "\r\n": + return r"Wrong signature (\r\n)" + if self.stream.readBytes(8*8, 1) != 'c': + return "First object bytecode is not code" + return True + + def getVersion(self): + if not hasattr(self, "version"): + signature = self.stream.readBits(0, 16, self.endian) + self.version = self.MAGIC[signature][1] + return self.version + + def createFields(self): + yield Enum(Bytes(self, "signature", 4, "Python file signature and version"), self.STR_MAGIC) + yield TimestampUnix32(self, "timestamp", "Timestamp") + yield Object(self, "content") + diff --git a/libs/hachoir_parser/template.py b/libs/hachoir_parser/template.py new file mode 100644 index 0000000..836215c --- /dev/null +++ b/libs/hachoir_parser/template.py @@ -0,0 +1,54 @@ +""" +====================== 8< ============================ +This file is an Hachoir parser template. Make a copy +of it, and adapt it to your needs. + +You have to replace all "TODO" with you code. +====================== 8< ============================ + +TODO parser. + +Author: TODO TODO +Creation date: YYYY-mm-DD +""" + +# TODO: Just keep what you need +from hachoir_parser import Parser +from hachoir_core.field import (ParserError, + UInt8, UInt16, UInt32, String, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN + +class TODOFile(Parser): + PARSER_TAGS = { + "id": "TODO", + "category": "TODO", # "archive", "audio", "container", ... + "file_ext": ("TODO",), # TODO: Example ("bmp",) to parse the file "image.bmp" + "mime": (u"TODO"), # TODO: Example: "image/png" + "min_size": 0, # TODO: Minimum file size (x bits, or x*8 in bytes) + "description": "TODO", # TODO: Example: "A bitmap picture" + } + +# TODO: Choose between little or big endian +# endian = LITTLE_ENDIAN +# endian = BIG_ENDIAN + + def validate(self): + # TODO: Check that file looks like your format + # Example: check first two bytes + # return (self.stream.readBytes(0, 2) == 'BM') + return False + + def createFields(self): + # TODO: Write your parser using this model: + # yield UInt8(self, "name1", "description1") + # yield UInt16(self, "name2", "description2") + # yield UInt32(self, "name3", "description3") + # yield String(self, "name4", 1, "description4") # TODO: add ", charset="ASCII")" + # yield String(self, "name5", 1, "description5", charset="ASCII") + # yield String(self, "name6", 1, "description6", charset="ISO-8859-1") + + # Read rest of the file (if any) + # TODO: You may remove this code + if self.current_size < self._size: + yield self.seekBit(self._size, "end") + diff --git a/libs/hachoir_parser/version.py b/libs/hachoir_parser/version.py new file mode 100644 index 0000000..6571743 --- /dev/null +++ b/libs/hachoir_parser/version.py @@ -0,0 +1,5 @@ +__version__ = "1.3.5" +PACKAGE = "hachoir-parser" +WEBSITE = "http://bitbucket.org/haypo/hachoir/wiki/hachoir-parser" +LICENSE = 'GNU GPL v2' + diff --git a/libs/hachoir_parser/video/__init__.py b/libs/hachoir_parser/video/__init__.py new file mode 100644 index 0000000..26f787e --- /dev/null +++ b/libs/hachoir_parser/video/__init__.py @@ -0,0 +1,6 @@ +from hachoir_parser.video.asf import AsfFile +from hachoir_parser.video.flv import FlvFile +from hachoir_parser.video.mov import MovFile +from hachoir_parser.video.mpeg_video import MPEGVideoFile +from hachoir_parser.video.mpeg_ts import MPEG_TS + diff --git a/libs/hachoir_parser/video/amf.py b/libs/hachoir_parser/video/amf.py new file mode 100644 index 0000000..496c5c1 --- /dev/null +++ b/libs/hachoir_parser/video/amf.py @@ -0,0 +1,110 @@ +""" +AMF metadata (inside Flash video, FLV file) parser. + +Documentation: + + - flashticle: Python project to read Flash (formats SWF, FLV and AMF) + http://undefined.org/python/#flashticle + +Author: Victor Stinner +Creation date: 4 november 2006 +""" + +from hachoir_core.field import (FieldSet, ParserError, + UInt8, UInt16, UInt32, PascalString16, Float64) +from hachoir_core.tools import timestampUNIX + +def parseUTF8(parent): + yield PascalString16(parent, "value", charset="UTF-8") + +def parseDouble(parent): + yield Float64(parent, "value") + +def parseBool(parent): + yield UInt8(parent, "value") + +def parseArray(parent): + yield UInt32(parent, "count") + for index in xrange(parent["count"].value): + yield AMFObject(parent, "item[]") + +def parseObjectAttributes(parent): + while True: + item = Attribute(parent, "attr[]") + yield item + if item["key"].value == "": + break + +def parseMixedArray(parent): + yield UInt32(parent, "count") + for index in xrange(parent["count"].value + 1): + item = Attribute(parent, "item[]") + yield item + if not item['key'].value: + break + +def parseDate(parent): + yield Float64(parent, "timestamp_microsec") + yield UInt16(parent, "timestamp_sec") + +def parseNothing(parent): + raise StopIteration() + +class AMFObject(FieldSet): + CODE_DATE = 11 + tag_info = { + # http://osflash.org/amf/astypes + 0: (parseDouble, "Double"), + 1: (parseBool, "Boolean"), + 2: (parseUTF8, "UTF-8 string"), + 3: (parseObjectAttributes, "Object attributes"), + #MOVIECLIP = '\x04', + #NULL = '\x05', + #UNDEFINED = '\x06', + #REFERENCE = '\x07', + 8: (parseMixedArray, "Mixed array"), + 9: (parseNothing, "End of object"), + 10: (parseArray, "Array"), + CODE_DATE: (parseDate, "Date"), + #LONGUTF8 = '\x0c', + #UNSUPPORTED = '\x0d', + ## Server-to-client only + #RECORDSET = '\x0e', + #XML = '\x0f', + #TYPEDOBJECT = '\x10', + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + code = self["type"].value + try: + self.parser, desc = self.tag_info[code] + if code == self.CODE_DATE: + self.createValue = self.createValueDate + except KeyError: + raise ParserError("AMF: Unable to parse type %s" % code) + + def createFields(self): + yield UInt8(self, "type") + for field in self.parser(self): + yield field + + def createValueDate(self): + value = (self["timestamp_microsec"].value * 0.001) \ + - (self["timestamp_sec"].value * 60) + return timestampUNIX(value) + +class Attribute(AMFObject): + def __init__(self, *args): + AMFObject.__init__(self, *args) + self._description = None + + def createFields(self): + yield PascalString16(self, "key", charset="UTF-8") + yield UInt8(self, "type") + for field in self.parser(self): + yield field + + def createDescription(self): + return 'Attribute "%s"' % self["key"].value + diff --git a/libs/hachoir_parser/video/asf.py b/libs/hachoir_parser/video/asf.py new file mode 100644 index 0000000..39205ea --- /dev/null +++ b/libs/hachoir_parser/video/asf.py @@ -0,0 +1,356 @@ +""" +Advanced Streaming Format (ASF) parser, format used by Windows Media Video +(WMF) and Windows Media Audio (WMA). + +Informations: +- http://www.microsoft.com/windows/windowsmedia/forpros/format/asfspec.aspx +- http://swpat.ffii.org/pikta/xrani/asf/index.fr.html + +Author: Victor Stinner +Creation: 5 august 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, + UInt16, UInt32, UInt64, + TimestampWin64, TimedeltaWin64, + String, PascalString16, Enum, + Bit, Bits, PaddingBits, + PaddingBytes, NullBytes, RawBytes) +from hachoir_core.endian import LITTLE_ENDIAN +from hachoir_core.text_handler import ( + displayHandler, filesizeHandler) +from hachoir_core.tools import humanBitRate +from itertools import izip +from hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name +from hachoir_parser.common.win32 import BitmapInfoHeader, GUID + +MAX_HEADER_SIZE = 100 * 1024 # bytes + +class AudioHeader(FieldSet): + guid = "F8699E40-5B4D-11CF-A8FD-00805F5C442B" + def createFields(self): + yield Enum(UInt16(self, "twocc"), audio_codec_name) + yield UInt16(self, "channels") + yield UInt32(self, "sample_rate") + yield UInt32(self, "bit_rate") + yield UInt16(self, "block_align") + yield UInt16(self, "bits_per_sample") + yield UInt16(self, "codec_specific_size") + size = self["codec_specific_size"].value + if size: + yield RawBytes(self, "codec_specific", size) + +class BitrateMutualExclusion(FieldSet): + guid = "D6E229DC-35DA-11D1-9034-00A0C90349BE" + mutex_name = { + "D6E22A00-35DA-11D1-9034-00A0C90349BE": "Language", + "D6E22A01-35DA-11D1-9034-00A0C90349BE": "Bitrate", + "D6E22A02-35DA-11D1-9034-00A0C90349BE": "Unknown", + } + + def createFields(self): + yield Enum(GUID(self, "exclusion_type"), self.mutex_name) + yield UInt16(self, "nb_stream") + for index in xrange(self["nb_stream"].value): + yield UInt16(self, "stream[]") + +class VideoHeader(FieldSet): + guid = "BC19EFC0-5B4D-11CF-A8FD-00805F5C442B" + def createFields(self): + if False: + yield UInt32(self, "width0") + yield UInt32(self, "height0") + yield PaddingBytes(self, "reserved[]", 7) + yield UInt32(self, "width") + yield UInt32(self, "height") + yield PaddingBytes(self, "reserved[]", 2) + yield UInt16(self, "depth") + yield Enum(String(self, "codec", 4, charset="ASCII"), video_fourcc_name) + yield NullBytes(self, "padding", 20) + else: + yield UInt32(self, "width") + yield UInt32(self, "height") + yield PaddingBytes(self, "reserved[]", 1) + yield UInt16(self, "format_data_size") + if self["format_data_size"].value < 40: + raise ParserError("Unknown format data size") + yield BitmapInfoHeader(self, "bmp_info", use_fourcc=True) + +class FileProperty(FieldSet): + guid = "8CABDCA1-A947-11CF-8EE4-00C00C205365" + def createFields(self): + yield GUID(self, "guid") + yield filesizeHandler(UInt64(self, "file_size")) + yield TimestampWin64(self, "creation_date") + yield UInt64(self, "pckt_count") + yield TimedeltaWin64(self, "play_duration") + yield TimedeltaWin64(self, "send_duration") + yield UInt64(self, "preroll") + yield Bit(self, "broadcast", "Is broadcast?") + yield Bit(self, "seekable", "Seekable stream?") + yield PaddingBits(self, "reserved[]", 30) + yield filesizeHandler(UInt32(self, "min_pckt_size")) + yield filesizeHandler(UInt32(self, "max_pckt_size")) + yield displayHandler(UInt32(self, "max_bitrate"), humanBitRate) + +class HeaderExtension(FieldSet): + guid = "5FBF03B5-A92E-11CF-8EE3-00C00C205365" + def createFields(self): + yield GUID(self, "reserved[]") + yield UInt16(self, "reserved[]") + yield UInt32(self, "size") + if self["size"].value: + yield RawBytes(self, "data", self["size"].value) + +class Header(FieldSet): + guid = "75B22630-668E-11CF-A6D9-00AA0062CE6C" + def createFields(self): + yield UInt32(self, "obj_count") + yield PaddingBytes(self, "reserved[]", 2) + for index in xrange(self["obj_count"].value): + yield Object(self, "object[]") + +class Metadata(FieldSet): + guid = "75B22633-668E-11CF-A6D9-00AA0062CE6C" + names = ("title", "author", "copyright", "xxx", "yyy") + def createFields(self): + for index in xrange(5): + yield UInt16(self, "size[]") + for name, size in izip(self.names, self.array("size")): + if size.value: + yield String(self, name, size.value, charset="UTF-16-LE", strip=" \0") + +class Descriptor(FieldSet): + """ + See ExtendedContentDescription class. + """ + TYPE_BYTE_ARRAY = 1 + TYPE_NAME = { + 0: "Unicode", + 1: "Byte array", + 2: "BOOL (32 bits)", + 3: "DWORD (32 bits)", + 4: "QWORD (64 bits)", + 5: "WORD (16 bits)" + } + def createFields(self): + yield PascalString16(self, "name", "Name", charset="UTF-16-LE", strip="\0") + yield Enum(UInt16(self, "type"), self.TYPE_NAME) + yield UInt16(self, "value_length") + type = self["type"].value + size = self["value_length"].value + name = "value" + if type == 0 and (size % 2) == 0: + yield String(self, name, size, charset="UTF-16-LE", strip="\0") + elif type in (2, 3): + yield UInt32(self, name) + elif type == 4: + yield UInt64(self, name) + else: + yield RawBytes(self, name, size) + +class ExtendedContentDescription(FieldSet): + guid = "D2D0A440-E307-11D2-97F0-00A0C95EA850" + def createFields(self): + yield UInt16(self, "count") + for index in xrange(self["count"].value): + yield Descriptor(self, "descriptor[]") + +class Codec(FieldSet): + """ + See CodecList class. + """ + type_name = { + 1: "video", + 2: "audio" + } + def createFields(self): + yield Enum(UInt16(self, "type"), self.type_name) + yield UInt16(self, "name_len", "Name length in character (byte=len*2)") + if self["name_len"].value: + yield String(self, "name", self["name_len"].value*2, "Name", charset="UTF-16-LE", strip=" \0") + yield UInt16(self, "desc_len", "Description length in character (byte=len*2)") + if self["desc_len"].value: + yield String(self, "desc", self["desc_len"].value*2, "Description", charset="UTF-16-LE", strip=" \0") + yield UInt16(self, "info_len") + if self["info_len"].value: + yield RawBytes(self, "info", self["info_len"].value) + +class CodecList(FieldSet): + guid = "86D15240-311D-11D0-A3A4-00A0C90348F6" + + def createFields(self): + yield GUID(self, "reserved[]") + yield UInt32(self, "count") + for index in xrange(self["count"].value): + yield Codec(self, "codec[]") + +class SimpleIndexEntry(FieldSet): + """ + See SimpleIndex class. + """ + def createFields(self): + yield UInt32(self, "pckt_number") + yield UInt16(self, "pckt_count") + +class SimpleIndex(FieldSet): + guid = "33000890-E5B1-11CF-89F4-00A0C90349CB" + + def createFields(self): + yield GUID(self, "file_id") + yield TimedeltaWin64(self, "entry_interval") + yield UInt32(self, "max_pckt_count") + yield UInt32(self, "entry_count") + for index in xrange(self["entry_count"].value): + yield SimpleIndexEntry(self, "entry[]") + +class BitRate(FieldSet): + """ + See BitRateList class. + """ + def createFields(self): + yield Bits(self, "stream_index", 7) + yield PaddingBits(self, "reserved", 9) + yield displayHandler(UInt32(self, "avg_bitrate"), humanBitRate) + +class BitRateList(FieldSet): + guid = "7BF875CE-468D-11D1-8D82-006097C9A2B2" + + def createFields(self): + yield UInt16(self, "count") + for index in xrange(self["count"].value): + yield BitRate(self, "bit_rate[]") + +class Data(FieldSet): + guid = "75B22636-668E-11CF-A6D9-00AA0062CE6C" + + def createFields(self): + yield GUID(self, "file_id") + yield UInt64(self, "packet_count") + yield PaddingBytes(self, "reserved", 2) + size = (self.size - self.current_size) / 8 + yield RawBytes(self, "data", size) + +class StreamProperty(FieldSet): + guid = "B7DC0791-A9B7-11CF-8EE6-00C00C205365" + def createFields(self): + yield GUID(self, "type") + yield GUID(self, "error_correction") + yield UInt64(self, "time_offset") + yield UInt32(self, "data_len") + yield UInt32(self, "error_correct_len") + yield Bits(self, "stream_index", 7) + yield Bits(self, "reserved[]", 8) + yield Bit(self, "encrypted", "Content is encrypted?") + yield UInt32(self, "reserved[]") + size = self["data_len"].value + if size: + tag = self["type"].value + if tag in Object.TAG_INFO: + name, parser = Object.TAG_INFO[tag][0:2] + yield parser(self, name, size=size*8) + else: + yield RawBytes(self, "data", size) + size = self["error_correct_len"].value + if size: + yield RawBytes(self, "error_correct", size) + +class Object(FieldSet): + # This list is converted to a dictionnary later where the key is the GUID + TAG_INFO = ( + ("header", Header, "Header object"), + ("file_prop", FileProperty, "File property"), + ("header_ext", HeaderExtension, "Header extension"), + ("codec_list", CodecList, "Codec list"), + ("simple_index", SimpleIndex, "Simple index"), + ("data", Data, "Data object"), + ("stream_prop[]", StreamProperty, "Stream properties"), + ("bit_rates", BitRateList, "Bit rate list"), + ("ext_desc", ExtendedContentDescription, "Extended content description"), + ("metadata", Metadata, "Metadata"), + ("video_header", VideoHeader, "Video"), + ("audio_header", AudioHeader, "Audio"), + ("bitrate_mutex", BitrateMutualExclusion, "Bitrate mutual exclusion"), + ) + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + + tag = self["guid"].value + if tag not in self.TAG_INFO: + self.handler = None + return + info = self.TAG_INFO[tag] + self._name = info[0] + self.handler = info[1] + + def createFields(self): + yield GUID(self, "guid") + yield filesizeHandler(UInt64(self, "size")) + + size = self["size"].value - self.current_size/8 + if 0 < size: + if self.handler: + yield self.handler(self, "content", size=size*8) + else: + yield RawBytes(self, "content", size) + +tag_info_list = Object.TAG_INFO +Object.TAG_INFO = dict( (parser[1].guid, parser) for parser in tag_info_list ) + +class AsfFile(Parser): + MAGIC = "\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C" + PARSER_TAGS = { + "id": "asf", + "category": "video", + "file_ext": ("wmv", "wma", "asf"), + "mime": (u"video/x-ms-asf", u"video/x-ms-wmv", u"audio/x-ms-wma"), + "min_size": 24*8, + "description": "Advanced Streaming Format (ASF), used for WMV (video) and WMA (audio)", + "magic": ((MAGIC, 0),), + } + FILE_TYPE = { + "video/x-ms-wmv": (".wmv", u"Window Media Video (wmv)"), + "video/x-ms-asf": (".asf", u"ASF container"), + "audio/x-ms-wma": (".wma", u"Window Media Audio (wma)"), + } + endian = LITTLE_ENDIAN + + def validate(self): + magic = self.MAGIC + if self.stream.readBytes(0, len(magic)) != magic: + return "Invalid magic" + header = self[0] + if not(30 <= header["size"].value <= MAX_HEADER_SIZE): + return "Invalid header size (%u)" % header["size"].value + return True + + def createMimeType(self): + audio = False + for prop in self.array("header/content/stream_prop"): + guid = prop["content/type"].value + if guid == VideoHeader.guid: + return u"video/x-ms-wmv" + if guid == AudioHeader.guid: + audio = True + if audio: + return u"audio/x-ms-wma" + else: + return u"video/x-ms-asf" + + def createFields(self): + while not self.eof: + yield Object(self, "object[]") + + def createDescription(self): + return self.FILE_TYPE[self.mime_type][1] + + def createFilenameSuffix(self): + return self.FILE_TYPE[self.mime_type][0] + + def createContentSize(self): + if self[0].name != "header": + return None + return self["header/content/file_prop/content/file_size"].value * 8 + diff --git a/libs/hachoir_parser/video/flv.py b/libs/hachoir_parser/video/flv.py new file mode 100644 index 0000000..5edbe7a --- /dev/null +++ b/libs/hachoir_parser/video/flv.py @@ -0,0 +1,157 @@ +""" +FLV video parser. + +Documentation: + + - FLV File format: http://osflash.org/flv + - libavformat from ffmpeg project + - flashticle: Python project to read Flash (SWF and FLV with AMF metadata) + http://undefined.org/python/#flashticle + +Author: Victor Stinner +Creation date: 4 november 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, + UInt8, UInt24, UInt32, NullBits, NullBytes, + Bit, Bits, String, RawBytes, Enum) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_parser.audio.mpeg_audio import Frame +from hachoir_parser.video.amf import AMFObject +from hachoir_core.tools import createDict + +SAMPLING_RATE = { + 0: ( 5512, "5.5 kHz"), + 1: (11025, "11 kHz"), + 2: (22050, "22.1 kHz"), + 3: (44100, "44.1 kHz"), +} +SAMPLING_RATE_VALUE = createDict(SAMPLING_RATE, 0) +SAMPLING_RATE_TEXT = createDict(SAMPLING_RATE, 1) + +AUDIO_CODEC_MP3 = 2 +AUDIO_CODEC_NAME = { + 0: u"Uncompressed", + 1: u"ADPCM", + 2: u"MP3", + 5: u"Nellymoser 8kHz mono", + 6: u"Nellymoser", +} + +VIDEO_CODEC_NAME = { + 2: u"Sorensen H.263", + 3: u"Screen video", + 4: u"On2 VP6", +} + +FRAME_TYPE = { + 1: u"keyframe", + 2: u"inter frame", + 3: u"disposable inter frame", +} + +class Header(FieldSet): + def createFields(self): + yield String(self, "signature", 3, "FLV format signature", charset="ASCII") + yield UInt8(self, "version") + + yield NullBits(self, "reserved[]", 5) + yield Bit(self, "type_flags_audio") + yield NullBits(self, "reserved[]", 1) + yield Bit(self, "type_flags_video") + + yield UInt32(self, "data_offset") + +def parseAudio(parent, size): + yield Enum(Bits(parent, "codec", 4, "Audio codec"), AUDIO_CODEC_NAME) + yield Enum(Bits(parent, "sampling_rate", 2, "Sampling rate"), SAMPLING_RATE_TEXT) + yield Bit(parent, "is_16bit", "16-bit or 8-bit per sample") + yield Bit(parent, "is_stereo", "Stereo or mono channel") + + size -= 1 + if 0 < size: + if parent["codec"].value == AUDIO_CODEC_MP3 : + yield Frame(parent, "music_data", size=size*8) + else: + yield RawBytes(parent, "music_data", size) + +def parseVideo(parent, size): + yield Enum(Bits(parent, "frame_type", 4, "Frame type"), FRAME_TYPE) + yield Enum(Bits(parent, "codec", 4, "Video codec"), VIDEO_CODEC_NAME) + if 1 < size: + yield RawBytes(parent, "data", size-1) + +def parseAMF(parent, size): + while parent.current_size < parent.size: + yield AMFObject(parent, "entry[]") + +class Chunk(FieldSet): + tag_info = { + 8: ("audio[]", parseAudio, ""), + 9: ("video[]", parseVideo, ""), + 18: ("metadata", parseAMF, ""), + } + + def __init__(self, *args, **kw): + FieldSet.__init__(self, *args, **kw) + self._size = (11 + self["size"].value) * 8 + tag = self["tag"].value + if tag in self.tag_info: + self._name, self.parser, self._description = self.tag_info[tag] + else: + self.parser = None + + def createFields(self): + yield UInt8(self, "tag") + yield UInt24(self, "size", "Content size") + yield UInt24(self, "timestamp", "Timestamp in millisecond") + yield NullBytes(self, "reserved", 4) + size = self["size"].value + if size: + if self.parser: + for field in self.parser(self, size): + yield field + else: + yield RawBytes(self, "content", size) + + def getSampleRate(self): + try: + return SAMPLING_RATE_VALUE[self["sampling_rate"].value] + except LookupError: + return None + +class FlvFile(Parser): + PARSER_TAGS = { + "id": "flv", + "category": "video", + "file_ext": ("flv",), + "mime": (u"video/x-flv",), + "min_size": 9*4, + "magic": ( + # Signature, version=1, flags=5 (video+audio), header size=9 + ("FLV\1\x05\0\0\0\x09", 0), + # Signature, version=1, flags=5 (video), header size=9 + ("FLV\1\x01\0\0\0\x09", 0), + ), + "description": u"Macromedia Flash video" + } + endian = BIG_ENDIAN + + def validate(self): + if self.stream.readBytes(0, 3) != "FLV": + return "Wrong file signature" + if self["header/data_offset"].value != 9: + return "Unknown data offset in main header" + return True + + def createFields(self): + yield Header(self, "header") + yield UInt32(self, "prev_size[]", "Size of previous chunk") + while not self.eof: + yield Chunk(self, "chunk[]") + yield UInt32(self, "prev_size[]", "Size of previous chunk") + + def createDescription(self): + return u"Macromedia Flash video version %s" % self["header/version"].value + diff --git a/libs/hachoir_parser/video/fourcc.py b/libs/hachoir_parser/video/fourcc.py new file mode 100644 index 0000000..9d8ee69 --- /dev/null +++ b/libs/hachoir_parser/video/fourcc.py @@ -0,0 +1,415 @@ +# +# fourcc are codes to specify the encoding method a audio or video string +# in RIFF file (.avi and .wav). +# +# The following lists come from mmpython project: +# file: mmpython/video/fourcc.py +# url: http://sourceforge.net/projects/mmpython/ +# + +# List of codecs with no compression (compression rate=1.0) +UNCOMPRESSED_AUDIO = set((1,3,6)) + +audio_codec_name = { +0x0000: u'Microsoft Unknown Wave Format', +0x0001: u'Microsoft Pulse Code Modulation (PCM)', +0x0002: u'Microsoft ADPCM', +0x0003: u'IEEE Float', +0x0004: u'Compaq Computer VSELP', +0x0005: u'IBM CVSD', +0x0006: u'Microsoft A-Law', +0x0007: u'Microsoft mu-Law', +0x0010: u'OKI ADPCM', +0x0011: u'Intel DVI/IMA ADPCM', +0x0012: u'Videologic MediaSpace ADPCM', +0x0013: u'Sierra Semiconductor ADPCM', +0x0014: u'Antex Electronics G.723 ADPCM', +0x0015: u'DSP Solutions DigiSTD', +0x0016: u'DSP Solutions DigiFIX', +0x0017: u'Dialogic OKI ADPCM', +0x0018: u'MediaVision ADPCM', +0x0019: u'Hewlett-Packard CU', +0x0020: u'Yamaha ADPCM', +0x0021: u'Speech Compression Sonarc', +0x0022: u'DSP Group TrueSpeech', +0x0023: u'Echo Speech EchoSC1', +0x0024: u'Audiofile AF36', +0x0025: u'Audio Processing Technology APTX', +0x0026: u'AudioFile AF10', +0x0027: u'Prosody 1612', +0x0028: u'LRC', +0x0030: u'Dolby AC2', +0x0031: u'Microsoft GSM 6.10', +0x0032: u'MSNAudio', +0x0033: u'Antex Electronics ADPCME', +0x0034: u'Control Resources VQLPC', +0x0035: u'DSP Solutions DigiREAL', +0x0036: u'DSP Solutions DigiADPCM', +0x0037: u'Control Resources CR10', +0x0038: u'Natural MicroSystems VBXADPCM', +0x0039: u'Crystal Semiconductor IMA ADPCM', +0x003A: u'EchoSC3', +0x003B: u'Rockwell ADPCM', +0x003C: u'Rockwell Digit LK', +0x003D: u'Xebec', +0x0040: u'Antex Electronics G.721 ADPCM', +0x0041: u'G.728 CELP', +0x0042: u'MSG723', +0x0050: u'Microsoft MPEG', +0x0052: u'RT24', +0x0053: u'PAC', +0x0055: u'MPEG Layer 3', +0x0059: u'Lucent G.723', +0x0060: u'Cirrus', +0x0061: u'ESPCM', +0x0062: u'Voxware', +0x0063: u'Canopus Atrac', +0x0064: u'G.726 ADPCM', +0x0065: u'G.722 ADPCM', +0x0066: u'DSAT', +0x0067: u'DSAT Display', +0x0069: u'Voxware Byte Aligned', +0x0070: u'Voxware AC8', +0x0071: u'Voxware AC10', +0x0072: u'Voxware AC16', +0x0073: u'Voxware AC20', +0x0074: u'Voxware MetaVoice', +0x0075: u'Voxware MetaSound', +0x0076: u'Voxware RT29HW', +0x0077: u'Voxware VR12', +0x0078: u'Voxware VR18', +0x0079: u'Voxware TQ40', +0x0080: u'Softsound', +0x0081: u'Voxware TQ60', +0x0082: u'MSRT24', +0x0083: u'G.729A', +0x0084: u'MVI MV12', +0x0085: u'DF G.726', +0x0086: u'DF GSM610', +0x0088: u'ISIAudio', +0x0089: u'Onlive', +0x0091: u'SBC24', +0x0092: u'Dolby AC3 SPDIF', +0x0097: u'ZyXEL ADPCM', +0x0098: u'Philips LPCBB', +0x0099: u'Packed', +0x0100: u'Rhetorex ADPCM', +0x0101: u'IBM mu-law', +0x0102: u'IBM A-law', +0x0103: u'IBM AVC Adaptive Differential Pulse Code Modulation (ADPCM)', +0x0111: u'Vivo G.723', +0x0112: u'Vivo Siren', +0x0123: u'Digital G.723', +0x0140: u'Windows Media Video V8', +0x0161: u'Windows Media Audio V7 / V8 / V9', +0x0162: u'Windows Media Audio Professional V9', +0x0163: u'Windows Media Audio Lossless V9', +0x0200: u'Creative Labs ADPCM', +0x0202: u'Creative Labs Fastspeech8', +0x0203: u'Creative Labs Fastspeech10', +0x0220: u'Quarterdeck', +0x0300: u'FM Towns Snd', +0x0300: u'Fujitsu FM Towns Snd', +0x0400: u'BTV Digital', +0x0680: u'VME VMPCM', +0x1000: u'Olivetti GSM', +0x1001: u'Olivetti ADPCM', +0x1002: u'Olivetti CELP', +0x1003: u'Olivetti SBC', +0x1004: u'Olivetti OPR', +0x1100: u'Lernout & Hauspie LH Codec', +0x1400: u'Norris', +0x1401: u'AT&T ISIAudio', +0x1500: u'Soundspace Music Compression', +0x2000: u'AC3', +0x7A21: u'GSM-AMR (CBR, no SID)', +0x7A22: u'GSM-AMR (VBR, including SID)', +0xFFFF: u'Development codec' +} + +video_fourcc_name = { +'3IV1': u'3ivx v1', +'3IV2': u'3ivx v2', +'AASC': u'Autodesk Animator', +'ABYR': u'Kensington ?ABYR?', +'AEMI': u'Array VideoONE MPEG1-I Capture', +'AFLC': u'Autodesk Animator FLC', +'AFLI': u'Autodesk Animator FLI', +'AMPG': u'Array VideoONE MPEG', +'ANIM': u'Intel RDX (ANIM)', +'AP41': u'AngelPotion Definitive', +'ASV1': u'Asus Video v1', +'ASV2': u'Asus Video v2', +'ASVX': u'Asus Video 2.0 (audio)', +'AUR2': u'Aura 2 Codec - YUV 4:2:2', +'AURA': u'Aura 1 Codec - YUV 4:1:1', +'BINK': u'RAD Game Tools Bink Video', +'BT20': u'Conexant Prosumer Video', +'BTCV': u'Conexant Composite Video Codec', +'BW10': u'Data Translation Broadway MPEG Capture', +'CC12': u'Intel YUV12', +'CDVC': u'Canopus DV', +'CFCC': u'Digital Processing Systems DPS Perception', +'CGDI': u'Microsoft Office 97 Camcorder Video', +'CHAM': u'Winnov Caviara Champagne', +'CJPG': u'Creative WebCam JPEG', +'CLJR': u'Cirrus Logic YUV 4 pixels', +'CMYK': u'Common Data Format in Printing', +'CPLA': u'Weitek 4:2:0 YUV Planar', +'CRAM': u'Microsoft Video 1 (CRAM)', +'CVID': u'Radius Cinepak', +'CWLT': u'?CWLT?', +'CWLT': u'Microsoft Color WLT DIB', +'CYUV': u'Creative Labs YUV', +'CYUY': u'ATI YUV', +'D261': u'H.261', +'D263': u'H.263', +'DIV3': u'DivX v3 MPEG-4 Low-Motion', +'DIV4': u'DivX v3 MPEG-4 Fast-Motion', +'DIV5': u'?DIV5?', +'DIVX': u'DivX v4', +'divx': u'DivX', +'DMB1': u'Matrox Rainbow Runner hardware MJPEG', +'DMB2': u'Paradigm MJPEG', +'DSVD': u'?DSVD?', +'DUCK': u'Duck True Motion 1.0', +'DVAN': u'?DVAN?', +'DVE2': u'InSoft DVE-2 Videoconferencing', +'dvsd': u'DV', +'DVSD': u'DV', +'DVX1': u'DVX1000SP Video Decoder', +'DVX2': u'DVX2000S Video Decoder', +'DVX3': u'DVX3000S Video Decoder', +'DX50': u'DivX v5', +'DXT1': u'Microsoft DirectX Compressed Texture (DXT1)', +'DXT2': u'Microsoft DirectX Compressed Texture (DXT2)', +'DXT3': u'Microsoft DirectX Compressed Texture (DXT3)', +'DXT4': u'Microsoft DirectX Compressed Texture (DXT4)', +'DXT5': u'Microsoft DirectX Compressed Texture (DXT5)', +'DXTC': u'Microsoft DirectX Compressed Texture (DXTC)', +'EKQ0': u'Elsa ?EKQ0?', +'ELK0': u'Elsa ?ELK0?', +'ESCP': u'Eidos Escape', +'ETV1': u'eTreppid Video ETV1', +'ETV2': u'eTreppid Video ETV2', +'ETVC': u'eTreppid Video ETVC', +'FLJP': u'D-Vision Field Encoded Motion JPEG', +'FRWA': u'SoftLab-Nsk Forward Motion JPEG w/ alpha channel', +'FRWD': u'SoftLab-Nsk Forward Motion JPEG', +'FVF1': u'Iterated Systems Fractal Video Frame', +'GLZW': u'Motion LZW (gabest@freemail.hu)', +'GPEG': u'Motion JPEG (gabest@freemail.hu)', +'GWLT': u'Microsoft Greyscale WLT DIB', +'H260': u'Intel ITU H.260 Videoconferencing', +'H261': u'Intel ITU H.261 Videoconferencing', +'H262': u'Intel ITU H.262 Videoconferencing', +'H263': u'Intel ITU H.263 Videoconferencing', +'H264': u'Intel ITU H.264 Videoconferencing', +'H265': u'Intel ITU H.265 Videoconferencing', +'H266': u'Intel ITU H.266 Videoconferencing', +'H267': u'Intel ITU H.267 Videoconferencing', +'H268': u'Intel ITU H.268 Videoconferencing', +'H269': u'Intel ITU H.269 Videoconferencing', +'HFYU': u'Huffman Lossless Codec', +'HMCR': u'Rendition Motion Compensation Format (HMCR)', +'HMRR': u'Rendition Motion Compensation Format (HMRR)', +'i263': u'Intel ITU H.263 Videoconferencing (i263)', +'I420': u'Intel Indeo 4', +'IAN ': u'Intel RDX', +'ICLB': u'InSoft CellB Videoconferencing', +'IGOR': u'Power DVD', +'IJPG': u'Intergraph JPEG', +'ILVC': u'Intel Layered Video', +'ILVR': u'ITU-T H.263+', +'IPDV': u'I-O Data Device Giga AVI DV Codec', +'IR21': u'Intel Indeo 2.1', +'IRAW': u'Intel YUV Uncompressed', +'IV30': u'Ligos Indeo 3.0', +'IV31': u'Ligos Indeo 3.1', +'IV32': u'Ligos Indeo 3.2', +'IV33': u'Ligos Indeo 3.3', +'IV34': u'Ligos Indeo 3.4', +'IV35': u'Ligos Indeo 3.5', +'IV36': u'Ligos Indeo 3.6', +'IV37': u'Ligos Indeo 3.7', +'IV38': u'Ligos Indeo 3.8', +'IV39': u'Ligos Indeo 3.9', +'IV40': u'Ligos Indeo Interactive 4.0', +'IV41': u'Ligos Indeo Interactive 4.1', +'IV42': u'Ligos Indeo Interactive 4.2', +'IV43': u'Ligos Indeo Interactive 4.3', +'IV44': u'Ligos Indeo Interactive 4.4', +'IV45': u'Ligos Indeo Interactive 4.5', +'IV46': u'Ligos Indeo Interactive 4.6', +'IV47': u'Ligos Indeo Interactive 4.7', +'IV48': u'Ligos Indeo Interactive 4.8', +'IV49': u'Ligos Indeo Interactive 4.9', +'IV50': u'Ligos Indeo Interactive 5.0', +'JBYR': u'Kensington ?JBYR?', +'JPEG': u'Still Image JPEG DIB', +'JPGL': u'Webcam JPEG Light?', +'KMVC': u'Karl Morton\'s Video Codec', +'LEAD': u'LEAD Video Codec', +'Ljpg': u'LEAD MJPEG Codec', +'M261': u'Microsoft H.261', +'M263': u'Microsoft H.263', +'M4S2': u'Microsoft MPEG-4 (M4S2)', +'m4s2': u'Microsoft MPEG-4 (m4s2)', +'MC12': u'ATI Motion Compensation Format (MC12)', +'MCAM': u'ATI Motion Compensation Format (MCAM)', +'MJ2C': u'Morgan Multimedia Motion JPEG2000', +'mJPG': u'IBM Motion JPEG w/ Huffman Tables', +'MJPG': u'Motion JPEG DIB', +'MP42': u'Microsoft MPEG-4 (low-motion)', +'MP43': u'Microsoft MPEG-4 (fast-motion)', +'MP4S': u'Microsoft MPEG-4 (MP4S)', +'mp4s': u'Microsoft MPEG-4 (mp4s)', +'MPEG': u'MPEG 1 Video I-Frame', +'MPG4': u'Microsoft MPEG-4 Video High Speed Compressor', +'MPGI': u'Sigma Designs MPEG', +'MRCA': u'FAST Multimedia Mrcodec', +'MRCA': u'Martin Regen Codec', +'MRLE': u'Microsoft RLE', +'MRLE': u'Run Length Encoding', +'MSVC': u'Microsoft Video 1', +'MTX1': u'Matrox ?MTX1?', +'MTX2': u'Matrox ?MTX2?', +'MTX3': u'Matrox ?MTX3?', +'MTX4': u'Matrox ?MTX4?', +'MTX5': u'Matrox ?MTX5?', +'MTX6': u'Matrox ?MTX6?', +'MTX7': u'Matrox ?MTX7?', +'MTX8': u'Matrox ?MTX8?', +'MTX9': u'Matrox ?MTX9?', +'MV12': u'?MV12?', +'MWV1': u'Aware Motion Wavelets', +'nAVI': u'?nAVI?', +'NTN1': u'Nogatech Video Compression 1', +'NVS0': u'nVidia GeForce Texture (NVS0)', +'NVS1': u'nVidia GeForce Texture (NVS1)', +'NVS2': u'nVidia GeForce Texture (NVS2)', +'NVS3': u'nVidia GeForce Texture (NVS3)', +'NVS4': u'nVidia GeForce Texture (NVS4)', +'NVS5': u'nVidia GeForce Texture (NVS5)', +'NVT0': u'nVidia GeForce Texture (NVT0)', +'NVT1': u'nVidia GeForce Texture (NVT1)', +'NVT2': u'nVidia GeForce Texture (NVT2)', +'NVT3': u'nVidia GeForce Texture (NVT3)', +'NVT4': u'nVidia GeForce Texture (NVT4)', +'NVT5': u'nVidia GeForce Texture (NVT5)', +'PDVC': u'I-O Data Device Digital Video Capture DV codec', +'PGVV': u'Radius Video Vision', +'PHMO': u'IBM Photomotion', +'PIM1': u'Pegasus Imaging ?PIM1?', +'PIM2': u'Pegasus Imaging ?PIM2?', +'PIMJ': u'Pegasus Imaging Lossless JPEG', +'PVEZ': u'Horizons Technology PowerEZ', +'PVMM': u'PacketVideo Corporation MPEG-4', +'PVW2': u'Pegasus Imaging Wavelet Compression', +'QPEG': u'Q-Team QPEG 1.0', +'qpeq': u'Q-Team QPEG 1.1', +'RGBT': u'Computer Concepts 32-bit support', +'RLE ': u'Microsoft Run Length Encoder', +'RLE4': u'Run Length Encoded 4', +'RLE8': u'Run Length Encoded 8', +'RT21': u'Intel Indeo 2.1', +'RT21': u'Intel Real Time Video 2.1', +'rv20': u'RealVideo G2', +'rv30': u'RealVideo 8', +'RVX ': u'Intel RDX (RVX )', +'s422': u'Tekram VideoCap C210 YUV 4:2:2', +'SDCC': u'Sun Communication Digital Camera Codec', +'SFMC': u'CrystalNet Surface Fitting Method', +'SMSC': u'Radius SMSC', +'SMSD': u'Radius SMSD', +'smsv': u'WorldConnect Wavelet Video', +'SPIG': u'Radius Spigot', +'SPLC': u'Splash Studios ACM Audio Codec', +'SQZ2': u'Microsoft VXTreme Video Codec V2', +'STVA': u'ST CMOS Imager Data (Bayer)', +'STVB': u'ST CMOS Imager Data (Nudged Bayer)', +'STVC': u'ST CMOS Imager Data (Bunched)', +'STVX': u'ST CMOS Imager Data (Extended CODEC Data Format)', +'STVY': u'ST CMOS Imager Data (Extended CODEC Data Format with Correction Data)', +'SV10': u'Sorenson Video R1', +'SVQ1': u'Sorenson Video', +'SVQ1': u'Sorenson Video R3', +'TLMS': u'TeraLogic Motion Intraframe Codec (TLMS)', +'TLST': u'TeraLogic Motion Intraframe Codec (TLST)', +'TM20': u'Duck TrueMotion 2.0', +'TM2X': u'Duck TrueMotion 2X', +'TMIC': u'TeraLogic Motion Intraframe Codec (TMIC)', +'TMOT': u'Horizons Technology TrueMotion S', +'tmot': u'Horizons TrueMotion Video Compression', +'TR20': u'Duck TrueMotion RealTime 2.0', +'TSCC': u'TechSmith Screen Capture Codec', +'TV10': u'Tecomac Low-Bit Rate Codec', +'TY0N': u'Trident ?TY0N?', +'TY2C': u'Trident ?TY2C?', +'TY2N': u'Trident ?TY2N?', +'UCOD': u'eMajix.com ClearVideo', +'ULTI': u'IBM Ultimotion', +'UYVY': u'UYVY 4:2:2 byte ordering', +'V261': u'Lucent VX2000S', +'V422': u'24 bit YUV 4:2:2 Format', +'V655': u'16 bit YUV 4:2:2 Format', +'VCR1': u'ATI VCR 1.0', +'VCR2': u'ATI VCR 2.0', +'VCR3': u'ATI VCR 3.0', +'VCR4': u'ATI VCR 4.0', +'VCR5': u'ATI VCR 5.0', +'VCR6': u'ATI VCR 6.0', +'VCR7': u'ATI VCR 7.0', +'VCR8': u'ATI VCR 8.0', +'VCR9': u'ATI VCR 9.0', +'VDCT': u'Video Maker Pro DIB', +'VDOM': u'VDOnet VDOWave', +'VDOW': u'VDOnet VDOLive (H.263)', +'VDTZ': u'Darim Vison VideoTizer YUV', +'VGPX': u'VGPixel Codec', +'VIDS': u'Vitec Multimedia YUV 4:2:2 CCIR 601 for V422', +'VIDS': u'YUV 4:2:2 CCIR 601 for V422', +'VIFP': u'?VIFP?', +'VIVO': u'Vivo H.263 v2.00', +'vivo': u'Vivo H.263', +'VIXL': u'Miro Video XL', +'VLV1': u'Videologic VLCAP.DRV', +'VP30': u'On2 VP3.0', +'VP31': u'On2 VP3.1', +'VX1K': u'VX1000S Video Codec', +'VX2K': u'VX2000S Video Codec', +'VXSP': u'VX1000SP Video Codec', +'WBVC': u'Winbond W9960', +'WHAM': u'Microsoft Video 1 (WHAM)', +'WINX': u'Winnov Software Compression', +'WJPG': u'AverMedia Winbond JPEG', +'WMV1': u'Windows Media Video V7', +'WMV2': u'Windows Media Video V8', +'WMV3': u'Windows Media Video V9', +'WNV1': u'Winnov Hardware Compression', +'x263': u'Xirlink H.263', +'XLV0': u'NetXL Video Decoder', +'XMPG': u'Xing MPEG (I-Frame only)', +'XVID': u'XviD MPEG-4', +'XXAN': u'?XXAN?', +'Y211': u'YUV 2:1:1 Packed', +'Y411': u'YUV 4:1:1 Packed', +'Y41B': u'YUV 4:1:1 Planar', +'Y41P': u'PC1 4:1:1', +'Y41T': u'PC1 4:1:1 with transparency', +'Y42B': u'YUV 4:2:2 Planar', +'Y42T': u'PCI 4:2:2 with transparency', +'Y8 ': u'Grayscale video', +'YC12': u'Intel YUV 12 codec', +'YC12': u'Intel YUV12 Codec', +'YUV8': u'Winnov Caviar YUV8', +'YUV9': u'Intel YUV9', +'YUY2': u'Uncompressed YUV 4:2:2', +'YUYV': u'Canopus YUV', +'YV12': u'YVU12 Planar', +'YVU9': u'Intel YVU9 Planar', +'YVYU': u'YVYU 4:2:2 byte ordering', +'ZLIB': u'?ZLIB?', +'ZPEG': u'Metheus Video Zipper' +} + diff --git a/libs/hachoir_parser/video/mov.py b/libs/hachoir_parser/video/mov.py new file mode 100644 index 0000000..cd79ce1 --- /dev/null +++ b/libs/hachoir_parser/video/mov.py @@ -0,0 +1,327 @@ +""" +Apple Quicktime Movie (file extension ".mov") parser. + +Documents: +- Parsing and Writing QuickTime Files in Java (by Chris Adamson, 02/19/2003) + http://www.onjava.com/pub/a/onjava/2003/02/19/qt_file_format.html +- QuickTime File Format (official technical reference) + http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf +- Apple QuickTime: + http://wiki.multimedia.cx/index.php?title=Apple_QuickTime +- File type (ftyp): + http://www.ftyps.com/ + +Author: Victor Stinner +Creation: 2 august 2006 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (ParserError, FieldSet, MissingField, + UInt8, Int16, UInt16, UInt32, TimestampMac32, + String, PascalString8, CString, + RawBytes, PaddingBytes) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +class QTFloat32(FieldSet): + static_size = 32 + def createFields(self): + yield Int16(self, "int_part") + yield UInt16(self, "float_part") + def createValue(self): + return self["int_part"].value + float(self["float_part"].value) / 65535 + def createDescription(self): + return str(self.value) + +class AtomList(FieldSet): + def createFields(self): + while not self.eof: + yield Atom(self, "atom[]") + +class TrackHeader(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + + # TODO: sum of : + # TrackEnabled = 1; + # TrackInMovie = 2; + # TrackInPreview = 4; + # TrackInPoster = 8 + yield RawBytes(self, "flags", 3) + + yield TimestampMac32(self, "creation_date") + yield TimestampMac32(self, "lastmod_date") + yield UInt32(self, "track_id") + yield PaddingBytes(self, "reserved[]", 8) + yield UInt32(self, "duration") + yield PaddingBytes(self, "reserved[]", 8) + yield Int16(self, "video_layer", "Middle is 0, negative in front") + yield PaddingBytes(self, "other", 2) + yield QTFloat32(self, "geom_a", "Width scale") + yield QTFloat32(self, "geom_b", "Width rotate") + yield QTFloat32(self, "geom_u", "Width angle") + yield QTFloat32(self, "geom_c", "Height rotate") + yield QTFloat32(self, "geom_d", "Height scale") + yield QTFloat32(self, "geom_v", "Height angle") + yield QTFloat32(self, "geom_x", "Position X") + yield QTFloat32(self, "geom_y", "Position Y") + yield QTFloat32(self, "geom_w", "Divider scale") + yield QTFloat32(self, "frame_size_width") + yield QTFloat32(self, "frame_size_height") + +class HDLR(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield String(self, "subtype", 8) + yield String(self, "manufacturer", 4) + yield UInt32(self, "res_flags") + yield UInt32(self, "res_flags_mask") + if self.root.is_mpeg4: + yield CString(self, "name") + else: + yield PascalString8(self, "name") + +class MediaHeader(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield TimestampMac32(self, "creation_date") + yield TimestampMac32(self, "lastmod_date") + yield UInt32(self, "time_scale") + yield UInt32(self, "duration") + yield UInt16(self, "mac_lang") + yield Int16(self, "quality") + +class ELST(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield UInt32(self, "nb_edits") + yield UInt32(self, "length") + yield UInt32(self, "start") + yield QTFloat32(self, "playback_speed") + +class Load(FieldSet): + def createFields(self): + yield UInt32(self, "start") + yield UInt32(self, "length") + yield UInt32(self, "flags") # PreloadAlways = 1 or TrackEnabledPreload = 2 + yield UInt32(self, "hints") # KeepInBuffer = 0x00000004; HighQuality = 0x00000100; SingleFieldVideo = 0x00100000 + +class MovieHeader(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield TimestampMac32(self, "creation_date") + yield TimestampMac32(self, "lastmod_date") + yield UInt32(self, "time_scale") + yield UInt32(self, "duration") + yield QTFloat32(self, "play_speed") + yield UInt16(self, "volume") + yield PaddingBytes(self, "reserved[]", 10) + yield QTFloat32(self, "geom_a", "Width scale") + yield QTFloat32(self, "geom_b", "Width rotate") + yield QTFloat32(self, "geom_u", "Width angle") + yield QTFloat32(self, "geom_c", "Height rotate") + yield QTFloat32(self, "geom_d", "Height scale") + yield QTFloat32(self, "geom_v", "Height angle") + yield QTFloat32(self, "geom_x", "Position X") + yield QTFloat32(self, "geom_y", "Position Y") + yield QTFloat32(self, "geom_w", "Divider scale") + yield UInt32(self, "preview_start") + yield UInt32(self, "preview_length") + yield UInt32(self, "still_poster") + yield UInt32(self, "sel_start") + yield UInt32(self, "sel_length") + yield UInt32(self, "current_time") + yield UInt32(self, "next_track") + +class FileType(FieldSet): + def createFields(self): + yield String(self, "brand", 4, "Major brand") + yield UInt32(self, "version", "Version") + while not self.eof: + yield String(self, "compat_brand[]", 4, "Compatible brand") + +class META(FieldSet): + def createFields(self): + yield UInt32(self, "unk") + yield AtomList(self, "tags") + +class STCO(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield UInt32(self, "count", description="Total entries in offset table") + for i in xrange(self['count'].value): + yield UInt32(self, "chunk_offset[]") + +class SampleDescription(FieldSet): + def createFields(self): + yield UInt32(self, "size", "Sample Description Size") + yield RawBytes(self, "format", 4, "Data Format (codec)") + yield RawBytes(self, "reserved", 6, "Reserved") + yield UInt16(self, "index", "Data Reference Index") + yield UInt16(self, "version") + yield UInt16(self, "revision_level") + yield RawBytes(self, "vendor_id", 4) + yield UInt32(self, "temporal_quality") + yield UInt32(self, "spatial_quality") + yield UInt16(self, "width", "Width (pixels)") + yield UInt16(self, "height", "Height (pixels)") + yield UInt32(self, "horizontal_resolution") + yield UInt32(self, "vertical resolution") + yield UInt32(self, "data_size") + yield UInt16(self, "frame_count") + size = self['size'].value - self.current_size//8 + if size > 0: + yield RawBytes(self, "extra_data", size) + +class STSD(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield UInt32(self, "count", description="Total entries in table") + for i in xrange(self['count'].value): + yield SampleDescription(self, "sample_description[]") + +class STSS(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield UInt32(self, "count", description="Number of sync samples") + for i in xrange(self['count'].value): + yield UInt32(self, "sync_sample[]") + +class STSZ(FieldSet): + def createFields(self): + yield textHandler(UInt8(self, "version"), hexadecimal) + yield RawBytes(self, "flags", 3) + yield UInt32(self, "uniform_size", description="Uniform size of each sample (0 if non-uniform)") + yield UInt32(self, "count", description="Number of samples") + if self['uniform_size'].value == 0: + for i in xrange(self['count'].value): + yield UInt32(self, "sample_size[]") + +class Atom(FieldSet): + tag_info = { + # TODO: Use dictionary of dictionaries, like Matroska parser does + # "elst" is a child of "edts", but not of "moov" for example + "moov": (AtomList, "movie", "Movie"), + "trak": (AtomList, "track", "Track"), + "mdia": (AtomList, "media", "Media"), + "edts": (AtomList, "edts", ""), + "minf": (AtomList, "minf", ""), + "stbl": (AtomList, "stbl", "Sample Table"), + "stco": (STCO, "stsd", "Sample Table Chunk Offset"), + "stsd": (STSD, "stsd", "Sample Table Sample Description"), + "stss": (STSS, "stss", "Sample Table Sync Samples"), + "stsz": (STSZ, "stsz", "Sample Table Sizes"), + "dinf": (AtomList, "dinf", ""), + "udta": (AtomList, "udta", ""), + "ilst": (AtomList, "ilst", ""), + "trkn": (AtomList, "trkn", "Metadata: Track number"), + "disk": (AtomList, "disk", "Metadata: Disk number"), + "tmpo": (AtomList, "tempo", "Metadata: Tempo"), + "cpil": (AtomList, "cpil", "Metadata: Compilation"), + "gnre": (AtomList, "gnre", "Metadata: Genre"), + "\xa9alb": (AtomList, "album", "Metadata: Album name"), + "\xa9ART": (AtomList, "artist", "Metadata: Artist name"), + "\xa9cmt": (AtomList, "comment", "Metadata: Comment"), + "\xa9nam": (AtomList, "name", "Metadata: Track name"), + "\xa9too": (AtomList, "tool", "Metadata: Creator program"), + "\xa9wrt": (AtomList, "composer", "Metadata: Composer name"), + "\xa9day": (AtomList, "date", "Metadata: Date of creation"), + "covr": (AtomList, "cover", "Metadata: Cover art"), + "----": (AtomList, "misc", "Metadata: Miscellaneous"), + "meta": (META, "meta", "File metadata"), + "elst": (ELST, "edts", ""), + "tkhd": (TrackHeader, "track_hdr", "Track header"), + "hdlr": (HDLR, "hdlr", ""), + "mdhd": (MediaHeader, "media_hdr", "Media header"), + "load": (Load, "load", ""), + "mvhd": (MovieHeader, "movie_hdr", "Movie header"), + "ftyp": (FileType, "file_type", "File type"), + } + tag_handler = [ item[0] for item in tag_info ] + tag_desc = [ item[1] for item in tag_info ] + + def createFields(self): + yield UInt32(self, "size") + yield RawBytes(self, "tag", 4) + size = self["size"].value + if size == 1: + raise ParserError("Extended size is not supported!") + #yield UInt64(self, "size64") + size = self["size64"].value + elif size == 0: + #size = (self.root.size - self.root.current_size - self.current_size) / 8 + if self._size is None: + size = (self.parent.size - self.current_size) / 8 - 8 + else: + size = (self.size - self.current_size) / 8 + else: + size = size - 8 + if 0 < size: + tag = self["tag"].value + if tag in self.tag_info: + handler, name, desc = self.tag_info[tag] + yield handler(self, name, desc, size=size*8) + else: + yield RawBytes(self, "data", size) + + def createDescription(self): + return "Atom: %s" % self["tag"].value + +class MovFile(Parser): + PARSER_TAGS = { + "id": "mov", + "category": "video", + "file_ext": ("mov", "qt", "mp4", "m4v", "m4a", "m4p", "m4b"), + "mime": (u"video/quicktime", u'video/mp4'), + "min_size": 8*8, + "magic": (("moov", 4*8),), + "description": "Apple QuickTime movie" + } + BRANDS = { + # File type brand => MIME type + 'mp41': u'video/mp4', + 'mp42': u'video/mp4', + } + endian = BIG_ENDIAN + + def __init__(self, *args, **kw): + Parser.__init__(self, *args, **kw) + self.is_mpeg4 = False + + def validate(self): + # TODO: Write better code, erk! + size = self.stream.readBits(0, 32, self.endian) + if size < 8: + return "Invalid first atom size" + tag = self.stream.readBytes(4*8, 4) + return tag in ("ftyp", "moov", "free") + + def createFields(self): + while not self.eof: + yield Atom(self, "atom[]") + + def createMimeType(self): + first = self[0] + try: + # Read brands in the file type + if first['tag'].value != "ftyp": + return None + file_type = first["file_type"] + brand = file_type["brand"].value + if brand in self.BRANDS: + return self.BRANDS[brand] + for field in file_type.array("compat_brand"): + brand = field.value + if brand in self.BRANDS: + return self.BRANDS[brand] + except MissingField: + pass + return None + diff --git a/libs/hachoir_parser/video/mpeg_ts.py b/libs/hachoir_parser/video/mpeg_ts.py new file mode 100644 index 0000000..56b9bc8 --- /dev/null +++ b/libs/hachoir_parser/video/mpeg_ts.py @@ -0,0 +1,102 @@ +""" +MPEG-2 Transport Stream parser. + +Documentation: +- MPEG-2 Transmission + http://erg.abdn.ac.uk/research/future-net/digital-video/mpeg2-trans.html + +Author: Victor Stinner +Creation date: 13 january 2007 +""" + +from hachoir_parser import Parser +from hachoir_core.field import (FieldSet, ParserError, MissingField, + UInt8, Enum, Bit, Bits, RawBytes) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.text_handler import textHandler, hexadecimal + +class Packet(FieldSet): + def __init__(self, *args): + FieldSet.__init__(self, *args) + if self["has_error"].value: + self._size = 204*8 + else: + self._size = 188*8 + + PID = { + 0x0000: "Program Association Table (PAT)", + 0x0001: "Conditional Access Table (CAT)", + # 0x0002..0x000f: reserved + # 0x0010..0x1FFE: network PID, program map PID, elementary PID, etc. + # TODO: Check above values + #0x0044: "video", + #0x0045: "audio", + 0x1FFF: "Null packet", + } + + def createFields(self): + yield textHandler(UInt8(self, "sync", 8), hexadecimal) + if self["sync"].value != 0x47: + raise ParserError("MPEG-2 TS: Invalid synchronization byte") + yield Bit(self, "has_error") + yield Bit(self, "payload_unit_start") + yield Bit(self, "priority") + yield Enum(textHandler(Bits(self, "pid", 13, "Program identifier"), hexadecimal), self.PID) + yield Bits(self, "scrambling_control", 2) + yield Bit(self, "has_adaptation") + yield Bit(self, "has_payload") + yield Bits(self, "counter", 4) + yield RawBytes(self, "payload", 184) + if self["has_error"].value: + yield RawBytes(self, "error_correction", 16) + + def createDescription(self): + text = "Packet: PID %s" % self["pid"].display + if self["payload_unit_start"].value: + text += ", start of payload" + return text + + def isValid(self): + if not self["has_payload"].value and not self["has_adaptation"].value: + return u"No payload and no adaptation" + pid = self["pid"].value + if (0x0002 <= pid <= 0x000f) or (0x2000 <= pid): + return u"Invalid program identifier (%s)" % self["pid"].display + return "" + +class MPEG_TS(Parser): + PARSER_TAGS = { + "id": "mpeg_ts", + "category": "video", + "file_ext": ("ts",), + "min_size": 188*8, + "description": u"MPEG-2 Transport Stream" + } + endian = BIG_ENDIAN + + def validate(self): + sync = self.stream.searchBytes("\x47", 0, 204*8) + if sync is None: + return "Unable to find synchronization byte" + for index in xrange(5): + try: + packet = self["packet[%u]" % index] + except (ParserError, MissingField): + if index and self.eof: + return True + else: + return "Unable to get packet #%u" % index + err = packet.isValid() + if err: + return "Packet #%u is invalid: %s" % (index, err) + return True + + def createFields(self): + sync = self.stream.searchBytes("\x47", 0, 204*8) + if sync is None: + raise ParserError("Unable to find synchronization byte") + elif sync: + yield RawBytes(self, "incomplete_packet", sync//8) + while not self.eof: + yield Packet(self, "packet[]") + diff --git a/libs/hachoir_parser/video/mpeg_video.py b/libs/hachoir_parser/video/mpeg_video.py new file mode 100644 index 0000000..5a5d51c --- /dev/null +++ b/libs/hachoir_parser/video/mpeg_video.py @@ -0,0 +1,576 @@ +""" +Moving Picture Experts Group (MPEG) video version 1 and 2 parser. + +Information: +- http://www.mpucoder.com/DVD/ +- http://dvd.sourceforge.net/dvdinfo/ +- http://www.mit.jyu.fi/mweber/leffakone/software/parsempegts/ +- http://homepage.mac.com/rnc/EditMpegHeaderIFO.html +- http://standards.iso.org/ittf/PubliclyAvailableStandards/c025029_ISO_IEC_TR_11172-5_1998(E)_Software_Simulation.zip + This is a sample encoder/decoder implementation for MPEG-1. + +Author: Victor Stinner +Creation date: 15 september 2006 +""" + +from hachoir_parser import Parser +from hachoir_parser.audio.mpeg_audio import MpegAudioFile +from hachoir_core.field import (FieldSet, + FieldError, ParserError, + Bit, Bits, Bytes, RawBits, PaddingBits, NullBits, + UInt8, UInt16, + RawBytes, PaddingBytes, + Enum) +from hachoir_core.endian import BIG_ENDIAN +from hachoir_core.stream import StringInputStream +from hachoir_core.text_handler import textHandler, hexadecimal + +class FragmentGroup: + def __init__(self, parser): + self.items = [] + self.parser = parser + self.args = {} + + def add(self, item): + self.items.append(item) + + def createInputStream(self): + # FIXME: Use lazy stream creation + data = [] + for item in self.items: + if 'rawdata' in item: + data.append( item["rawdata"].value ) + data = "".join(data) + + # FIXME: Use smarter code to send arguments + tags = {"class": self.parser, "args": self.args} + tags = tags.iteritems() + return StringInputStream(data, "", tags=tags) + +class CustomFragment(FieldSet): + def __init__(self, parent, name, size, parser, description=None, group=None): + FieldSet.__init__(self, parent, name, description, size=size) + if not group: + group = FragmentGroup(parser) + self.group = group + self.group.add(self) + + def createFields(self): + yield RawBytes(self, "rawdata", self.size//8) + + def _createInputStream(self, **args): + return self.group.createInputStream() + +class Timestamp(FieldSet): + static_size = 36 + + def createValue(self): + return (self["c"].value << 30) + (self["b"].value << 15) + self["a"].value + + def createFields(self): + yield Bits(self, "c", 3) + yield Bit(self, "sync[]") # =True + yield Bits(self, "b", 15) + yield Bit(self, "sync[]") # =True + yield Bits(self, "a", 15) + yield Bit(self, "sync[]") # =True + +class SCR(FieldSet): + static_size = 35 + + def createFields(self): + yield Bits(self, "scr_a", 3) + yield Bit(self, "sync[]") # =True + yield Bits(self, "scr_b", 15) + yield Bit(self, "sync[]") # =True + yield Bits(self, "scr_c", 15) + +class PackHeader(FieldSet): + def createFields(self): + if self.stream.readBits(self.absolute_address, 2, self.endian) == 1: + # MPEG version 2 + yield Bits(self, "sync[]", 2) + yield SCR(self, "scr") + yield Bit(self, "sync[]") + yield Bits(self, "scr_ext", 9) + yield Bit(self, "sync[]") + yield Bits(self, "mux_rate", 22) + yield Bits(self, "sync[]", 2) + yield PaddingBits(self, "reserved", 5, pattern=1) + yield Bits(self, "stuffing_length", 3) + count = self["stuffing_length"].value + if count: + yield PaddingBytes(self, "stuffing", count, pattern="\xff") + else: + # MPEG version 1 + yield Bits(self, "sync[]", 4) + yield Bits(self, "scr_a", 3) + yield Bit(self, "sync[]") + yield Bits(self, "scr_b", 15) + yield Bit(self, "sync[]") + yield Bits(self, "scr_c", 15) + yield Bits(self, "sync[]", 2) + yield Bits(self, "mux_rate", 22) + yield Bit(self, "sync[]") + + def validate(self): + if self["mux_rate"].value == 0: + return "Invalid mux rate" + sync0 = self["sync[0]"] + if (sync0.size == 2 and sync0.value == 1): + # MPEG2 + pass + if not self["sync[1]"].value \ + or not self["sync[2]"].value \ + or self["sync[3]"].value != 3: + return "Invalid synchronisation bits" + elif (sync0.size == 4 and sync0.value == 2): + # MPEG1 + if not self["sync[1]"].value \ + or not self["sync[2]"].value \ + or self["sync[3]"].value != 3 \ + or not self["sync[4]"].value: + return "Invalid synchronisation bits" + else: + return "Unknown version" + return True + +class SystemHeader(FieldSet): + def createFields(self): + yield Bits(self, "marker[]", 1) + yield Bits(self, "rate_bound", 22) + yield Bits(self, "marker[]", 1) + yield Bits(self, "audio_bound", 6) + yield Bit(self, "fixed_bitrate") + yield Bit(self, "csps", description="Constrained system parameter stream") + yield Bit(self, "audio_lock") + yield Bit(self, "video_lock") + yield Bits(self, "marker[]", 1) + yield Bits(self, "video_bound", 5) + length = self['../length'].value-5 + if length: + yield RawBytes(self, "raw[]", length) + +class defaultParser(FieldSet): + def createFields(self): + yield RawBytes(self, "data", self["../length"].value) + +class Padding(FieldSet): + def createFields(self): + yield PaddingBytes(self, "data", self["../length"].value) + +class VideoExtension2(FieldSet): + def createFields(self): + yield Bit(self, "sync[]") # =True + yield Bits(self, "ext_length", 7) + yield NullBits(self, "reserved[]", 8) + size = self["ext_length"].value + if size: + yield RawBytes(self, "ext_bytes", size) + +class VideoExtension1(FieldSet): + def createFields(self): + yield Bit(self, "has_private") + yield Bit(self, "has_pack_lgth") + yield Bit(self, "has_pack_seq") + yield Bit(self, "has_pstd_buffer") + yield Bits(self, "sync[]", 3) # =7 + yield Bit(self, "has_extension2") + + if self["has_private"].value: + yield RawBytes(self, "private", 16) + + if self["has_pack_lgth"].value: + yield UInt8(self, "pack_lgth") + + if self["has_pack_seq"].value: + yield Bit(self, "sync[]") # =True + yield Bits(self, "pack_seq_counter", 7) + yield Bit(self, "sync[]") # =True + yield Bit(self, "mpeg12_id") + yield Bits(self, "orig_stuffing_length", 6) + + if self["has_pstd_buffer"].value: + yield Bits(self, "sync[]", 2) # =1 + yield Enum(Bit(self, "pstd_buffer_scale"), + {True: "128 bytes", False: "1024 bytes"}) + yield Bits(self, "pstd_size", 13) + +class VideoSeqHeader(FieldSet): + ASPECT=["forbidden", "1.0000 (VGA etc.)", "0.6735", + "0.7031 (16:9, 625line)", "0.7615", "0.8055", + "0.8437 (16:9, 525line)", "0.8935", + "0.9157 (CCIR601, 625line)", "0.9815", "1.0255", "1.0695", + "1.0950 (CCIR601, 525line)", "1.1575", "1.2015", "reserved"] + FRAMERATE=["forbidden", "23.976 fps", "24 fps", "25 fps", "29.97 fps", + "30 fps", "50 fps", "59.94 fps", "60 fps"] + def createFields(self): + yield Bits(self, "width", 12) + yield Bits(self, "height", 12) + yield Enum(Bits(self, "aspect", 4), self.ASPECT) + yield Enum(Bits(self, "frame_rate", 4), self.FRAMERATE) + yield Bits(self, "bit_rate", 18, "Bit rate in units of 50 bytes") + yield Bits(self, "sync[]", 1) # =1 + yield Bits(self, "vbv_size", 10, "Video buffer verifier size, in units of 16768") + yield Bit(self, "constrained_params_flag") + yield Bit(self, "has_intra_quantizer") + if self["has_intra_quantizer"].value: + for i in range(64): + yield Bits(self, "intra_quantizer[]", 8) + yield Bit(self, "has_non_intra_quantizer") + if self["has_non_intra_quantizer"].value: + for i in range(64): + yield Bits(self, "non_intra_quantizer[]", 8) + +class GroupStart(FieldSet): + def createFields(self): + yield Bit(self, "drop_frame") + yield Bits(self, "time_hh", 5) + yield Bits(self, "time_mm", 6) + yield PaddingBits(self, "time_pad[]", 1) + yield Bits(self, "time_ss", 6) + yield Bits(self, "time_ff", 6) + yield Bit(self, "closed_group") + yield Bit(self, "broken_group") + yield PaddingBits(self, "pad[]", 5) + +class PacketElement(FieldSet): + def createFields(self): + yield Bits(self, "sync[]", 2) # =2 + if self["sync[0]"].value != 2: + raise ParserError("Unknown video elementary data") + yield Bits(self, "is_scrambled", 2) + yield Bits(self, "priority", 1) + yield Bit(self, "alignment") + yield Bit(self, "is_copyrighted") + yield Bit(self, "is_original") + yield Bit(self, "has_pts", "Presentation Time Stamp") + yield Bit(self, "has_dts", "Decode Time Stamp") + yield Bit(self, "has_escr", "Elementary Stream Clock Reference") + yield Bit(self, "has_es_rate", "Elementary Stream rate") + yield Bit(self, "dsm_trick_mode") + yield Bit(self, "has_copy_info") + yield Bit(self, "has_prev_crc", "If True, previous PES packet CRC follows") + yield Bit(self, "has_extension") + yield UInt8(self, "size") + + # Time stamps + if self["has_pts"].value: + yield Bits(self, "sync[]", 4) # =2, or 3 if has_dts=True + yield Timestamp(self, "pts") + if self["has_dts"].value: + if not(self["has_pts"].value): + raise ParserError("Invalid PTS/DTS values") + yield Bits(self, "sync[]", 4) # =1 + yield Timestamp(self, "dts") + + if self["has_escr"].value: + yield Bits(self, "sync[]", 2) # =0 + yield SCR(self, "escr") + + if self["has_es_rate"].value: + yield Bit(self, "sync[]") # =True + yield Bits(self, "es_rate", 14) # in units of 50 bytes/second + yield Bit(self, "sync[]") # =True + + if self["has_copy_info"].value: + yield Bit(self, "sync[]") # =True + yield Bits(self, "copy_info", 7) + + if self["has_prev_crc"].value: + yield textHandler(UInt16(self, "prev_crc"), hexadecimal) + + # --- Extension --- + if self["has_extension"].value: + yield VideoExtension1(self, "extension") + if self["extension/has_extension2"].value: + yield VideoExtension2(self, "extension2") + +class VideoExtension(FieldSet): + EXT_TYPE = {1:'Sequence',2:'Sequence Display',8:'Picture Coding'} + def createFields(self): + yield Enum(Bits(self, "ext_type", 4), self.EXT_TYPE) + ext_type=self['ext_type'].value + if ext_type==1: + # Sequence extension + yield Bits(self, 'profile_and_level', 8) + yield Bit(self, 'progressive_sequence') + yield Bits(self, 'chroma_format', 2) + yield Bits(self, 'horiz_size_ext', 2) + yield Bits(self, 'vert_size_ext', 2) + yield Bits(self, 'bit_rate_ext', 12) + yield Bits(self, 'pad[]', 1) + yield Bits(self, 'vbv_buffer_size_ext', 8) + yield Bit(self, 'low_delay') + yield Bits(self, 'frame_rate_ext_n', 2) + yield Bits(self, 'frame_rate_ext_d', 5) + elif ext_type==2: + # Sequence Display extension + yield Bits(self, 'video_format', 3) + yield Bit(self, 'color_desc_present') + if self['color_desc_present'].value: + yield UInt8(self, 'color_primaries') + yield UInt8(self, 'transfer_characteristics') + yield UInt8(self, 'matrix_coeffs') + yield Bits(self, 'display_horiz_size', 14) + yield Bits(self, 'pad[]', 1) + yield Bits(self, 'display_vert_size', 14) + yield NullBits(self, 'pad[]', 3) + elif ext_type==8: + yield Bits(self, 'f_code[0][0]', 4, description="forward horizontal") + yield Bits(self, 'f_code[0][1]', 4, description="forward vertical") + yield Bits(self, 'f_code[1][0]', 4, description="backward horizontal") + yield Bits(self, 'f_code[1][1]', 4, description="backward vertical") + yield Bits(self, 'intra_dc_precision', 2) + yield Bits(self, 'picture_structure', 2) + yield Bit(self, 'top_field_first') + yield Bit(self, 'frame_pred_frame_dct') + yield Bit(self, 'concealment_motion_vectors') + yield Bit(self, 'q_scale_type') + yield Bit(self, 'intra_vlc_format') + yield Bit(self, 'alternate_scan') + yield Bit(self, 'repeat_first_field') + yield Bit(self, 'chroma_420_type') + yield Bit(self, 'progressive_frame') + yield Bit(self, 'composite_display') + if self['composite_display'].value: + yield Bit(self, 'v_axis') + yield Bits(self, 'field_sequence', 3) + yield Bit(self, 'sub_carrier') + yield Bits(self, 'burst_amplitude', 7) + yield Bits(self, 'sub_carrier_phase', 8) + yield NullBits(self, 'pad[]', 2) + else: + yield NullBits(self, 'pad[]', 6) + else: + yield RawBits(self, "raw[]", 4) + +class VideoPicture(FieldSet): + CODING_TYPE = ["forbidden","intra-coded (I)", + "predictive-coded (P)", + "bidirectionally-predictive-coded (B)", + "dc intra-coded (D)", "reserved", + "reserved", "reserved"] + def createFields(self): + yield Bits(self, "temporal_ref", 10) + yield Enum(Bits(self, "coding_type", 3), self.CODING_TYPE) + yield Bits(self, "vbv_delay", 16) + if self['coding_type'].value in (2,3): + # predictive coding + yield Bit(self, 'full_pel_fwd_vector') + yield Bits(self, 'forward_f_code', 3) + if self['coding_type'].value == 3: + # bidi predictive coding + yield Bit(self, 'full_pel_back_vector') + yield Bits(self, 'backward_f_code', 3) + yield Bits(self, "padding", 8-(self.current_size % 8)) + +class VideoSlice(FieldSet): + def createFields(self): + yield Bits(self, "quantizer_scale", 5) + start=self.absolute_address+self.current_size+3 + pos=self.stream.searchBytes('\0\0\1',start,start+1024*1024*8) # seek forward by at most 1MB + if pos is None: pos=self.root.size + yield RawBits(self, "data", pos-start+3) + +class VideoChunk(FieldSet): + tag_info = { + 0x00: ("pict_start[]", VideoPicture, "Picture start"), + 0xB2: ("data_start[]", None, "Data start"), + 0xB3: ("seq_hdr[]", VideoSeqHeader,"Sequence header"), + 0xB4: ("seq_err[]", None, "Sequence error"), + 0xB5: ("ext_start[]", VideoExtension,"Extension start"), + 0xB7: ("seq_end[]", None, "Sequence end"), + 0xB8: ("group_start[]", GroupStart, "Group start"), + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + tag = self["tag"].value + if tag in self.tag_info: + self._name, self.parser, self._description = self.tag_info[tag] + if not self.parser: + self.parser = defaultParser + elif 0x01 <= tag <= 0xaf: + self._name, self.parser, self._description = ('slice[]', VideoSlice, 'Picture slice') + else: + self.parser = defaultParser + + def createFields(self): + yield Bytes(self, "sync", 3) + yield textHandler(UInt8(self, "tag"), hexadecimal) + if self.parser and self['tag'].value != 0xb7: + yield self.parser(self, "content") + +class VideoStream(Parser): + endian = BIG_ENDIAN + def createFields(self): + while self.current_size < self.size: + pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB + if pos is not None: + padsize = pos-self.current_size + if padsize: + yield PaddingBytes(self, "pad[]", padsize//8) + yield VideoChunk(self, "chunk[]") + +class Stream(FieldSet): + def createFields(self): + padding=0 + position=0 + while True: + next=ord(self.parent.stream.readBytes(self.absolute_address+self.current_size+position, 1)) + if next == 0xff: + padding+=1 + position+=8 + elif padding: + yield PaddingBytes(self, "pad[]", padding) + padding=None + position=0 + elif 0x40 <= next <= 0x7f: + yield Bits(self, "scale_marker", 2) # 1 + yield Bit(self, "scale") + scale=self['scale'].value + if scale: + scaleval=1024 + else: + scaleval=128 + yield textHandler(Bits(self, "size", 13), lambda field:str(field.value*scaleval)) + elif 0x00 <= next <= 0x3f: + yield Bits(self, "ts_marker", 2) # 0 + yield Bit(self, "has_pts") + yield Bit(self, "has_dts") + if self['has_pts'].value: + yield Timestamp(self, "pts") + if self['has_dts'].value: + yield PaddingBits(self, "pad[]", 4) + yield Timestamp(self, "dts") + if self.current_size % 8 == 4: + yield PaddingBits(self, "pad[]", 4) + break + elif 0x80 <= next <= 0xbf: + # MPEG-2 extension + yield PacketElement(self, "pkt") + break + else: + # 0xc0 - 0xfe: unknown + break + length = self["../length"].value - self.current_size//8 + if length: + tag=self['../tag'].value + group=self.root.streamgroups[tag] + parname=self.parent._name + if parname.startswith('audio'): + frag = CustomFragment(self, "data", length*8, MpegAudioFile, group=group) + elif parname.startswith('video'): + frag = CustomFragment(self, "data", length*8, VideoStream, group=group) + else: + frag = CustomFragment(self, "data", length*8, None, group=group) + self.root.streamgroups[tag]=frag.group + yield frag + +class Chunk(FieldSet): + ISO_END_CODE = 0xB9 + tag_info = { + 0xB9: ("end", None, "End"), + 0xBA: ("pack_start[]", PackHeader, "Pack start"), + 0xBB: ("system_start[]", SystemHeader, "System start"), + # streams + 0xBD: ("private[]", Stream, "Private elementary"), + 0xBE: ("padding[]", Stream, "Padding"), + # 0xC0 to 0xFE handled specially + 0xFF: ("directory[]", Stream, "Program Stream Directory"), + } + + def __init__(self, *args): + FieldSet.__init__(self, *args) + if not hasattr(self.root,'streamgroups'): + self.root.streamgroups={} + for tag in range(0xBC, 0x100): + self.root.streamgroups[tag]=None + tag = self["tag"].value + if tag in self.tag_info: + self._name, self.parser, self._description = self.tag_info[tag] + elif 0xBC <= tag <= 0xFF: + if 0xC0 <= tag < 0xE0: + # audio + streamid = tag-0xC0 + self._name, self.parser, self._description = ("audio[%i][]"%streamid, Stream, "Audio Stream %i Packet"%streamid) + elif 0xE0 <= tag < 0xF0: + # video + streamid = tag-0xE0 + self._name, self.parser, self._description = ("video[%i][]"%streamid, Stream, "Video Stream %i Packet"%streamid) + else: + self._name, self.parser, self._description = ("stream[]", Stream, "Data Stream Packet") + else: + self.parser = defaultParser + + if not self.parser: + self.parser = defaultParser + elif self.parser != PackHeader and "length" in self: + self._size = (6 + self["length"].value) * 8 + + def createFields(self): + yield Bytes(self, "sync", 3) + yield textHandler(UInt8(self, "tag"), hexadecimal) + if self.parser: + if self.parser != PackHeader: + yield UInt16(self, "length") + if not self["length"].value: + return + yield self.parser(self, "content") + + def createDescription(self): + return "Chunk: tag %s" % self["tag"].display + +class MPEGVideoFile(Parser): + PARSER_TAGS = { + "id": "mpeg_video", + "category": "video", + "file_ext": ("mpeg", "mpg", "mpe", "vob"), + "mime": (u"video/mpeg", u"video/mp2p"), + "min_size": 12*8, +#TODO: "magic": xxx, + "description": "MPEG video, version 1 or 2" + } + endian = BIG_ENDIAN + version = None + + def createFields(self): + while self.current_size < self.size: + pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB + if pos is not None: + padsize = pos-self.current_size + if padsize: + yield PaddingBytes(self, "pad[]", padsize//8) + chunk=Chunk(self, "chunk[]") + try: + # force chunk to be processed, so that CustomFragments are complete + chunk['content/data'] + except: pass + yield chunk + + def validate(self): + try: + pack = self[0] + except FieldError: + return "Unable to create first chunk" + if pack.name != "pack_start[0]": + return "Invalid first chunk" + if pack["sync"].value != "\0\0\1": + return "Invalid synchronisation" + return pack["content"].validate() + + def getVersion(self): + if not self.version: + if self["pack_start[0]/content/sync[0]"].size == 2: + self.version = 2 + else: + self.version = 1 + return self.version + + def createDescription(self): + if self.getVersion() == 2: + return "MPEG-2 video" + else: + return "MPEG-1 video" + diff --git a/libs/imdb/Character.py b/libs/imdb/Character.py new file mode 100644 index 0000000..2126494 --- /dev/null +++ b/libs/imdb/Character.py @@ -0,0 +1,197 @@ +""" +Character module (imdb package). + +This module provides the Character class, used to store information about +a given character. + +Copyright 2007-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from copy import deepcopy + +from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople + + +class Character(_Container): + """A Character. + + Every information about a character can be accessed as: + characterObject['information'] + to get a list of the kind of information stored in a + Character object, use the keys() method; some useful aliases + are defined (as "also known as" for the "akas" key); + see the keys_alias dictionary. + """ + # The default sets of information retrieved. + default_info = ('main', 'filmography', 'biography') + + # Aliases for some not-so-intuitive keys. + keys_alias = {'mini biography': 'biography', + 'bio': 'biography', + 'character biography': 'biography', + 'character biographies': 'biography', + 'biographies': 'biography', + 'character bio': 'biography', + 'aka': 'akas', + 'also known as': 'akas', + 'alternate names': 'akas', + 'personal quotes': 'quotes', + 'keys': 'keywords', + 'keyword': 'keywords'} + + keys_tomodify_list = ('biography', 'quotes') + + cmpFunct = cmpPeople + + def _init(self, **kwds): + """Initialize a Character object. + + *characterID* -- the unique identifier for the character. + *name* -- the name of the Character, if not in the data dictionary. + *myName* -- the nickname you use for this character. + *myID* -- your personal id for this character. + *data* -- a dictionary used to initialize the object. + *notes* -- notes about the given character. + *accessSystem* -- a string representing the data access system used. + *titlesRefs* -- a dictionary with references to movies. + *namesRefs* -- a dictionary with references to persons. + *charactersRefs* -- a dictionary with references to characters. + *modFunct* -- function called returning text fields. + """ + name = kwds.get('name') + if name and not self.data.has_key('name'): + self.set_name(name) + self.characterID = kwds.get('characterID', None) + self.myName = kwds.get('myName', u'') + + def _reset(self): + """Reset the Character object.""" + self.characterID = None + self.myName = u'' + + def set_name(self, name): + """Set the name of the character.""" + # XXX: convert name to unicode, if it's a plain string? + d = analyze_name(name, canonical=0) + self.data.update(d) + + def _additional_keys(self): + """Valid keys to append to the data.keys() list.""" + addkeys = [] + if self.data.has_key('name'): + addkeys += ['long imdb name'] + if self.data.has_key('headshot'): + addkeys += ['full-size headshot'] + return addkeys + + def _getitem(self, key): + """Handle special keys.""" + ## XXX: can a character have an imdbIndex? + if self.data.has_key('name'): + if key == 'long imdb name': + return build_name(self.data) + if key == 'full-size headshot' and self.data.has_key('headshot'): + return self._re_fullsizeURL.sub('', self.data.get('headshot', '')) + return None + + def getID(self): + """Return the characterID.""" + return self.characterID + + def __nonzero__(self): + """The Character is "false" if the self.data does not contain a name.""" + # XXX: check the name and the characterID? + if self.data.get('name'): return 1 + return 0 + + def __contains__(self, item): + """Return true if this Character was portrayed in the given Movie + or it was impersonated by the given Person.""" + from Movie import Movie + from Person import Person + if isinstance(item, Person): + for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): + if item.isSame(m.currentRole): + return 1 + elif isinstance(item, Movie): + for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): + if item.isSame(m): + return 1 + return 0 + + def isSameName(self, other): + """Return true if two character have the same name + and/or characterID.""" + if not isinstance(other, self.__class__): + return 0 + if self.data.has_key('name') and \ + other.data.has_key('name') and \ + build_name(self.data, canonical=0) == \ + build_name(other.data, canonical=0): + return 1 + if self.accessSystem == other.accessSystem and \ + self.characterID is not None and \ + self.characterID == other.characterID: + return 1 + return 0 + isSameCharacter = isSameName + + def __deepcopy__(self, memo): + """Return a deep copy of a Character instance.""" + c = Character(name=u'', characterID=self.characterID, + myName=self.myName, myID=self.myID, + data=deepcopy(self.data, memo), + notes=self.notes, accessSystem=self.accessSystem, + titlesRefs=deepcopy(self.titlesRefs, memo), + namesRefs=deepcopy(self.namesRefs, memo), + charactersRefs=deepcopy(self.charactersRefs, memo)) + c.current_info = list(self.current_info) + c.set_mod_funct(self.modFunct) + return c + + def __repr__(self): + """String representation of a Character object.""" + r = '' % (self.characterID, + self.accessSystem, + self.get('name')) + if isinstance(r, unicode): r = r.encode('utf_8', 'replace') + return r + + def __str__(self): + """Simply print the short name.""" + return self.get('name', u'').encode('utf_8', 'replace') + + def __unicode__(self): + """Simply print the short title.""" + return self.get('name', u'') + + def summary(self): + """Return a string with a pretty-printed summary for the character.""" + if not self: return u'' + s = u'Character\n=====\nName: %s\n' % \ + self.get('name', u'') + bio = self.get('biography') + if bio: + s += u'Biography: %s\n' % bio[0] + filmo = self.get('filmography') + if filmo: + a_list = [x.get('long imdb canonical title', u'') + for x in filmo[:5]] + s += u'Last movies with this character: %s.\n' % u'; '.join(a_list) + return s + + diff --git a/libs/imdb/Company.py b/libs/imdb/Company.py new file mode 100644 index 0000000..5e05c84 --- /dev/null +++ b/libs/imdb/Company.py @@ -0,0 +1,195 @@ +""" +company module (imdb package). + +This module provides the company class, used to store information about +a given company. + +Copyright 2008-2009 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from copy import deepcopy + +from imdb.utils import analyze_company_name, build_company_name, \ + flatten, _Container, cmpCompanies + + +class Company(_Container): + """A company. + + Every information about a company can be accessed as: + companyObject['information'] + to get a list of the kind of information stored in a + company object, use the keys() method; some useful aliases + are defined (as "also known as" for the "akas" key); + see the keys_alias dictionary. + """ + # The default sets of information retrieved. + default_info = ('main',) + + # Aliases for some not-so-intuitive keys. + keys_alias = { + 'distributor': 'distributors', + 'special effects company': 'special effects companies', + 'other company': 'miscellaneous companies', + 'miscellaneous company': 'miscellaneous companies', + 'other companies': 'miscellaneous companies', + 'misc companies': 'miscellaneous companies', + 'misc company': 'miscellaneous companies', + 'production company': 'production companies'} + + keys_tomodify_list = () + + cmpFunct = cmpCompanies + + def _init(self, **kwds): + """Initialize a company object. + + *companyID* -- the unique identifier for the company. + *name* -- the name of the company, if not in the data dictionary. + *myName* -- the nickname you use for this company. + *myID* -- your personal id for this company. + *data* -- a dictionary used to initialize the object. + *notes* -- notes about the given company. + *accessSystem* -- a string representing the data access system used. + *titlesRefs* -- a dictionary with references to movies. + *namesRefs* -- a dictionary with references to persons. + *charactersRefs* -- a dictionary with references to companies. + *modFunct* -- function called returning text fields. + """ + name = kwds.get('name') + if name and not self.data.has_key('name'): + self.set_name(name) + self.companyID = kwds.get('companyID', None) + self.myName = kwds.get('myName', u'') + + def _reset(self): + """Reset the company object.""" + self.companyID = None + self.myName = u'' + + def set_name(self, name): + """Set the name of the company.""" + # XXX: convert name to unicode, if it's a plain string? + # Company diverges a bit from other classes, being able + # to directly handle its "notes". AND THAT'S PROBABLY A BAD IDEA! + oname = name = name.strip() + notes = u'' + if name.endswith(')'): + fparidx = name.find('(') + if fparidx != -1: + notes = name[fparidx:] + name = name[:fparidx].rstrip() + if self.notes: + name = oname + d = analyze_company_name(name) + self.data.update(d) + if notes and not self.notes: + self.notes = notes + + def _additional_keys(self): + """Valid keys to append to the data.keys() list.""" + if self.data.has_key('name'): + return ['long imdb name'] + return [] + + def _getitem(self, key): + """Handle special keys.""" + ## XXX: can a company have an imdbIndex? + if self.data.has_key('name'): + if key == 'long imdb name': + return build_company_name(self.data) + return None + + def getID(self): + """Return the companyID.""" + return self.companyID + + def __nonzero__(self): + """The company is "false" if the self.data does not contain a name.""" + # XXX: check the name and the companyID? + if self.data.get('name'): return 1 + return 0 + + def __contains__(self, item): + """Return true if this company and the given Movie are related.""" + from Movie import Movie + if isinstance(item, Movie): + for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): + if item.isSame(m): + return 1 + return 0 + + def isSameName(self, other): + """Return true if two company have the same name + and/or companyID.""" + if not isinstance(other, self.__class__): + return 0 + if self.data.has_key('name') and \ + other.data.has_key('name') and \ + build_company_name(self.data) == \ + build_company_name(other.data): + return 1 + if self.accessSystem == other.accessSystem and \ + self.companyID is not None and \ + self.companyID == other.companyID: + return 1 + return 0 + isSameCompany = isSameName + + def __deepcopy__(self, memo): + """Return a deep copy of a company instance.""" + c = Company(name=u'', companyID=self.companyID, + myName=self.myName, myID=self.myID, + data=deepcopy(self.data, memo), + notes=self.notes, accessSystem=self.accessSystem, + titlesRefs=deepcopy(self.titlesRefs, memo), + namesRefs=deepcopy(self.namesRefs, memo), + charactersRefs=deepcopy(self.charactersRefs, memo)) + c.current_info = list(self.current_info) + c.set_mod_funct(self.modFunct) + return c + + def __repr__(self): + """String representation of a Company object.""" + r = '' % (self.companyID, + self.accessSystem, + self.get('long imdb name')) + if isinstance(r, unicode): r = r.encode('utf_8', 'replace') + return r + + def __str__(self): + """Simply print the short name.""" + return self.get('name', u'').encode('utf_8', 'replace') + + def __unicode__(self): + """Simply print the short title.""" + return self.get('name', u'') + + def summary(self): + """Return a string with a pretty-printed summary for the company.""" + if not self: return u'' + s = u'Company\n=======\nName: %s\n' % \ + self.get('name', u'') + for k in ('distributor', 'production company', 'miscellaneous company', + 'special effects company'): + d = self.get(k, [])[:5] + if not d: continue + s += u'Last movies from this company (%s): %s.\n' % \ + (k, u'; '.join([x.get('long imdb title', u'') for x in d])) + return s + + diff --git a/libs/imdb/Movie.py b/libs/imdb/Movie.py new file mode 100644 index 0000000..37ae49e --- /dev/null +++ b/libs/imdb/Movie.py @@ -0,0 +1,398 @@ +""" +Movie module (imdb package). + +This module provides the Movie class, used to store information about +a given movie. + +Copyright 2004-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from copy import deepcopy + +from imdb import articles +from imdb.utils import analyze_title, build_title, canonicalTitle, \ + flatten, _Container, cmpMovies + + +class Movie(_Container): + """A Movie. + + Every information about a movie can be accessed as: + movieObject['information'] + to get a list of the kind of information stored in a + Movie object, use the keys() method; some useful aliases + are defined (as "casting" for the "casting director" key); see + the keys_alias dictionary. + """ + # The default sets of information retrieved. + default_info = ('main', 'plot') + + # Aliases for some not-so-intuitive keys. + keys_alias = { + 'tv schedule': 'airing', + 'user rating': 'rating', + 'plot summary': 'plot', + 'plot summaries': 'plot', + 'directed by': 'director', + 'created by': 'creator', + 'writing credits': 'writer', + 'produced by': 'producer', + 'original music by': 'original music', + 'non-original music by': 'non-original music', + 'music': 'original music', + 'cinematography by': 'cinematographer', + 'cinematography': 'cinematographer', + 'film editing by': 'editor', + 'film editing': 'editor', + 'editing': 'editor', + 'actors': 'cast', + 'actresses': 'cast', + 'casting by': 'casting director', + 'casting': 'casting director', + 'art direction by': 'art direction', + 'set decoration by': 'set decoration', + 'costume design by': 'costume designer', + 'costume design': 'costume designer', + 'makeup department': 'make up', + 'makeup': 'make up', + 'make-up': 'make up', + 'production management': 'production manager', + 'production company': 'production companies', + 'second unit director or assistant director': + 'assistant director', + 'second unit director': 'assistant director', + 'sound department': 'sound crew', + 'costume and wardrobe department': 'costume department', + 'special effects by': 'special effects', + 'visual effects by': 'visual effects', + 'special effects company': 'special effects companies', + 'stunts': 'stunt performer', + 'other crew': 'miscellaneous crew', + 'misc crew': 'miscellaneous crew', + 'miscellaneouscrew': 'miscellaneous crew', + 'crewmembers': 'miscellaneous crew', + 'crew members': 'miscellaneous crew', + 'other companies': 'miscellaneous companies', + 'misc companies': 'miscellaneous companies', + 'miscellaneous company': 'miscellaneous companies', + 'misc company': 'miscellaneous companies', + 'other company': 'miscellaneous companies', + 'aka': 'akas', + 'also known as': 'akas', + 'country': 'countries', + 'production country': 'countries', + 'production countries': 'countries', + 'genre': 'genres', + 'runtime': 'runtimes', + 'lang': 'languages', + 'color': 'color info', + 'cover': 'cover url', + 'full-size cover': 'full-size cover url', + 'seasons': 'number of seasons', + 'language': 'languages', + 'certificate': 'certificates', + 'certifications': 'certificates', + 'certification': 'certificates', + 'miscellaneous links': 'misc links', + 'miscellaneous': 'misc links', + 'soundclips': 'sound clips', + 'videoclips': 'video clips', + 'photographs': 'photo sites', + 'distributor': 'distributors', + 'distribution': 'distributors', + 'distribution companies': 'distributors', + 'distribution company': 'distributors', + 'guest': 'guests', + 'guest appearances': 'guests', + 'tv guests': 'guests', + 'notable tv guest appearances': 'guests', + 'episodes cast': 'guests', + 'episodes number': 'number of episodes', + 'amazon review': 'amazon reviews', + 'merchandising': 'merchandising links', + 'merchandise': 'merchandising links', + 'sales': 'merchandising links', + 'faq': 'faqs', + 'parental guide': 'parents guide', + 'frequently asked questions': 'faqs'} + + keys_tomodify_list = ('plot', 'trivia', 'alternate versions', 'goofs', + 'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack', + 'crazy credits', 'business', 'supplements', + 'video review', 'faqs') + + cmpFunct = cmpMovies + + def _init(self, **kwds): + """Initialize a Movie object. + + *movieID* -- the unique identifier for the movie. + *title* -- the title of the Movie, if not in the data dictionary. + *myTitle* -- your personal title for the movie. + *myID* -- your personal identifier for the movie. + *data* -- a dictionary used to initialize the object. + *currentRole* -- a Character instance representing the current role + or duty of a person in this movie, or a Person + object representing the actor/actress who played + a given character in a Movie. If a string is + passed, an object is automatically build. + *roleID* -- if available, the characterID/personID of the currentRole + object. + *roleIsPerson* -- when False (default) the currentRole is assumed + to be a Character object, otherwise a Person. + *notes* -- notes for the person referred in the currentRole + attribute; e.g.: '(voice)'. + *accessSystem* -- a string representing the data access system used. + *titlesRefs* -- a dictionary with references to movies. + *namesRefs* -- a dictionary with references to persons. + *charactersRefs* -- a dictionary with references to characters. + *modFunct* -- function called returning text fields. + """ + title = kwds.get('title') + if title and not self.data.has_key('title'): + self.set_title(title) + self.movieID = kwds.get('movieID', None) + self.myTitle = kwds.get('myTitle', u'') + + def _reset(self): + """Reset the Movie object.""" + self.movieID = None + self.myTitle = u'' + + def set_title(self, title): + """Set the title of the movie.""" + # XXX: convert title to unicode, if it's a plain string? + d_title = analyze_title(title) + self.data.update(d_title) + + def _additional_keys(self): + """Valid keys to append to the data.keys() list.""" + addkeys = [] + if self.data.has_key('title'): + addkeys += ['canonical title', 'long imdb title', + 'long imdb canonical title', + 'smart canonical title', + 'smart long imdb canonical title'] + if self.data.has_key('episode of'): + addkeys += ['long imdb episode title', 'series title', + 'canonical series title', 'episode title', + 'canonical episode title', + 'smart canonical series title', + 'smart canonical episode title'] + if self.data.has_key('cover url'): + addkeys += ['full-size cover url'] + return addkeys + + def guessLanguage(self): + """Guess the language of the title of this movie; returns None + if there are no hints.""" + lang = self.get('languages') + if lang: + lang = lang[0] + else: + country = self.get('countries') + if country: + lang = articles.COUNTRY_LANG.get(country[0]) + return lang + + def smartCanonicalTitle(self, title=None, lang=None): + """Return the canonical title, guessing its language. + The title can be forces with the 'title' argument (internally + used) and the language can be forced with the 'lang' argument, + otherwise it's auto-detected.""" + if title is None: + title = self.data.get('title', u'') + if lang is None: + lang = self.guessLanguage() + return canonicalTitle(title, lang=lang) + + def _getitem(self, key): + """Handle special keys.""" + if self.data.has_key('episode of'): + if key == 'long imdb episode title': + return build_title(self.data) + elif key == 'series title': + return self.data['episode of']['title'] + elif key == 'canonical series title': + ser_title = self.data['episode of']['title'] + return canonicalTitle(ser_title) + elif key == 'smart canonical series title': + ser_title = self.data['episode of']['title'] + return self.smartCanonicalTitle(ser_title) + elif key == 'episode title': + return self.data.get('title', u'') + elif key == 'canonical episode title': + return canonicalTitle(self.data.get('title', u'')) + elif key == 'smart canonical episode title': + return self.smartCanonicalTitle(self.data.get('title', u'')) + if self.data.has_key('title'): + if key == 'title': + return self.data['title'] + elif key == 'long imdb title': + return build_title(self.data) + elif key == 'canonical title': + return canonicalTitle(self.data['title']) + elif key == 'smart canonical title': + return self.smartCanonicalTitle(self.data['title']) + elif key == 'long imdb canonical title': + return build_title(self.data, canonical=1) + elif key == 'smart long imdb canonical title': + return build_title(self.data, canonical=1, + lang=self.guessLanguage()) + if key == 'full-size cover url' and self.data.has_key('cover url'): + return self._re_fullsizeURL.sub('', self.data.get('cover url', '')) + return None + + def getID(self): + """Return the movieID.""" + return self.movieID + + def __nonzero__(self): + """The Movie is "false" if the self.data does not contain a title.""" + # XXX: check the title and the movieID? + if self.data.has_key('title'): return 1 + return 0 + + def isSameTitle(self, other): + """Return true if this and the compared object have the same + long imdb title and/or movieID. + """ + # XXX: obsolete? + if not isinstance(other, self.__class__): return 0 + if self.data.has_key('title') and \ + other.data.has_key('title') and \ + build_title(self.data, canonical=0) == \ + build_title(other.data, canonical=0): + return 1 + if self.accessSystem == other.accessSystem and \ + self.movieID is not None and self.movieID == other.movieID: + return 1 + return 0 + isSameMovie = isSameTitle # XXX: just for backward compatiblity. + + def __contains__(self, item): + """Return true if the given Person object is listed in this Movie, + or if the the given Character is represented in this Movie.""" + from Person import Person + from Character import Character + from Company import Company + if isinstance(item, Person): + for p in flatten(self.data, yieldDictKeys=1, scalar=Person, + toDescend=(list, dict, tuple, Movie)): + if item.isSame(p): + return 1 + elif isinstance(item, Character): + for p in flatten(self.data, yieldDictKeys=1, scalar=Person, + toDescend=(list, dict, tuple, Movie)): + if item.isSame(p.currentRole): + return 1 + elif isinstance(item, Company): + for c in flatten(self.data, yieldDictKeys=1, scalar=Company, + toDescend=(list, dict, tuple, Movie)): + if item.isSame(c): + return 1 + return 0 + + def __deepcopy__(self, memo): + """Return a deep copy of a Movie instance.""" + m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle, + myID=self.myID, data=deepcopy(self.data, memo), + currentRole=deepcopy(self.currentRole, memo), + roleIsPerson=self._roleIsPerson, + notes=self.notes, accessSystem=self.accessSystem, + titlesRefs=deepcopy(self.titlesRefs, memo), + namesRefs=deepcopy(self.namesRefs, memo), + charactersRefs=deepcopy(self.charactersRefs, memo)) + m.current_info = list(self.current_info) + m.set_mod_funct(self.modFunct) + return m + + def __repr__(self): + """String representation of a Movie object.""" + # XXX: add also currentRole and notes, if present? + if self.has_key('long imdb episode title'): + title = self.get('long imdb episode title') + else: + title = self.get('long imdb title') + r = '' % (self.movieID, self.accessSystem, + title) + if isinstance(r, unicode): r = r.encode('utf_8', 'replace') + return r + + def __str__(self): + """Simply print the short title.""" + return self.get('title', u'').encode('utf_8', 'replace') + + def __unicode__(self): + """Simply print the short title.""" + return self.get('title', u'') + + def summary(self): + """Return a string with a pretty-printed summary for the movie.""" + if not self: return u'' + def _nameAndRole(personList, joiner=u', '): + """Build a pretty string with name and role.""" + nl = [] + for person in personList: + n = person.get('name', u'') + if person.currentRole: n += u' (%s)' % person.currentRole + nl.append(n) + return joiner.join(nl) + s = u'Movie\n=====\nTitle: %s\n' % \ + self.get('long imdb canonical title', u'') + genres = self.get('genres') + if genres: s += u'Genres: %s.\n' % u', '.join(genres) + director = self.get('director') + if director: + s += u'Director: %s.\n' % _nameAndRole(director) + writer = self.get('writer') + if writer: + s += u'Writer: %s.\n' % _nameAndRole(writer) + cast = self.get('cast') + if cast: + cast = cast[:5] + s += u'Cast: %s.\n' % _nameAndRole(cast) + runtime = self.get('runtimes') + if runtime: + s += u'Runtime: %s.\n' % u', '.join(runtime) + countries = self.get('countries') + if countries: + s += u'Country: %s.\n' % u', '.join(countries) + lang = self.get('languages') + if lang: + s += u'Language: %s.\n' % u', '.join(lang) + rating = self.get('rating') + if rating: + s += u'Rating: %s' % rating + nr_votes = self.get('votes') + if nr_votes: + s += u' (%s votes)' % nr_votes + s += u'.\n' + plot = self.get('plot') + if not plot: + plot = self.get('plot summary') + if plot: + plot = [plot] + if plot: + plot = plot[0] + i = plot.find('::') + if i != -1: + plot = plot[:i] + s += u'Plot: %s' % plot + return s + + diff --git a/libs/imdb/Person.py b/libs/imdb/Person.py new file mode 100644 index 0000000..6e3e462 --- /dev/null +++ b/libs/imdb/Person.py @@ -0,0 +1,275 @@ +""" +Person module (imdb package). + +This module provides the Person class, used to store information about +a given person. + +Copyright 2004-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from copy import deepcopy + +from imdb.utils import analyze_name, build_name, normalizeName, \ + flatten, _Container, cmpPeople + + +class Person(_Container): + """A Person. + + Every information about a person can be accessed as: + personObject['information'] + to get a list of the kind of information stored in a + Person object, use the keys() method; some useful aliases + are defined (as "biography" for the "mini biography" key); + see the keys_alias dictionary. + """ + # The default sets of information retrieved. + default_info = ('main', 'filmography', 'biography') + + # Aliases for some not-so-intuitive keys. + keys_alias = {'biography': 'mini biography', + 'bio': 'mini biography', + 'aka': 'akas', + 'also known as': 'akas', + 'nick name': 'nick names', + 'nicks': 'nick names', + 'nickname': 'nick names', + 'miscellaneouscrew': 'miscellaneous crew', + 'crewmembers': 'miscellaneous crew', + 'misc': 'miscellaneous crew', + 'guest': 'notable tv guest appearances', + 'guests': 'notable tv guest appearances', + 'tv guest': 'notable tv guest appearances', + 'guest appearances': 'notable tv guest appearances', + 'spouses': 'spouse', + 'salary': 'salary history', + 'salaries': 'salary history', + 'otherworks': 'other works', + "maltin's biography": + "biography from leonard maltin's movie encyclopedia", + "leonard maltin's biography": + "biography from leonard maltin's movie encyclopedia", + 'real name': 'birth name', + 'where are they now': 'where now', + 'personal quotes': 'quotes', + 'mini-biography author': 'imdb mini-biography by', + 'biography author': 'imdb mini-biography by', + 'genre': 'genres', + 'portrayed': 'portrayed in', + 'keys': 'keywords', + 'trademarks': 'trade mark', + 'trade mark': 'trade mark', + 'trade marks': 'trade mark', + 'trademark': 'trade mark', + 'pictorials': 'pictorial', + 'magazine covers': 'magazine cover photo', + 'magazine-covers': 'magazine cover photo', + 'tv series episodes': 'episodes', + 'tv-series episodes': 'episodes', + 'articles': 'article', + 'keyword': 'keywords'} + + # 'nick names'??? + keys_tomodify_list = ('mini biography', 'spouse', 'quotes', 'other works', + 'salary history', 'trivia', 'trade mark', 'news', + 'books', 'biographical movies', 'portrayed in', + 'where now', 'interviews', 'article', + "biography from leonard maltin's movie encyclopedia") + + cmpFunct = cmpPeople + + def _init(self, **kwds): + """Initialize a Person object. + + *personID* -- the unique identifier for the person. + *name* -- the name of the Person, if not in the data dictionary. + *myName* -- the nickname you use for this person. + *myID* -- your personal id for this person. + *data* -- a dictionary used to initialize the object. + *currentRole* -- a Character instance representing the current role + or duty of a person in this movie, or a Person + object representing the actor/actress who played + a given character in a Movie. If a string is + passed, an object is automatically build. + *roleID* -- if available, the characterID/personID of the currentRole + object. + *roleIsPerson* -- when False (default) the currentRole is assumed + to be a Character object, otherwise a Person. + *notes* -- notes about the given person for a specific movie + or role (e.g.: the alias used in the movie credits). + *accessSystem* -- a string representing the data access system used. + *titlesRefs* -- a dictionary with references to movies. + *namesRefs* -- a dictionary with references to persons. + *modFunct* -- function called returning text fields. + *billingPos* -- position of this person in the credits list. + """ + name = kwds.get('name') + if name and not self.data.has_key('name'): + self.set_name(name) + self.personID = kwds.get('personID', None) + self.myName = kwds.get('myName', u'') + self.billingPos = kwds.get('billingPos', None) + + def _reset(self): + """Reset the Person object.""" + self.personID = None + self.myName = u'' + self.billingPos = None + + def _clear(self): + """Reset the dictionary.""" + self.billingPos = None + + def set_name(self, name): + """Set the name of the person.""" + # XXX: convert name to unicode, if it's a plain string? + d = analyze_name(name, canonical=1) + self.data.update(d) + + def _additional_keys(self): + """Valid keys to append to the data.keys() list.""" + addkeys = [] + if self.data.has_key('name'): + addkeys += ['canonical name', 'long imdb name', + 'long imdb canonical name'] + if self.data.has_key('headshot'): + addkeys += ['full-size headshot'] + return addkeys + + def _getitem(self, key): + """Handle special keys.""" + if self.data.has_key('name'): + if key == 'name': + return normalizeName(self.data['name']) + elif key == 'canonical name': + return self.data['name'] + elif key == 'long imdb name': + return build_name(self.data, canonical=0) + elif key == 'long imdb canonical name': + return build_name(self.data) + if key == 'full-size headshot' and self.data.has_key('headshot'): + return self._re_fullsizeURL.sub('', self.data.get('headshot', '')) + return None + + def getID(self): + """Return the personID.""" + return self.personID + + def __nonzero__(self): + """The Person is "false" if the self.data does not contain a name.""" + # XXX: check the name and the personID? + if self.data.has_key('name'): return 1 + return 0 + + def __contains__(self, item): + """Return true if this Person has worked in the given Movie, + or if the fiven Character was played by this Person.""" + from Movie import Movie + from Character import Character + if isinstance(item, Movie): + for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): + if item.isSame(m): + return 1 + elif isinstance(item, Character): + for m in flatten(self.data, yieldDictKeys=1, scalar=Movie): + if item.isSame(m.currentRole): + return 1 + return 0 + + def isSameName(self, other): + """Return true if two persons have the same name and imdbIndex + and/or personID. + """ + if not isinstance(other, self.__class__): + return 0 + if self.data.has_key('name') and \ + other.data.has_key('name') and \ + build_name(self.data, canonical=1) == \ + build_name(other.data, canonical=1): + return 1 + if self.accessSystem == other.accessSystem and \ + self.personID and self.personID == other.personID: + return 1 + return 0 + isSamePerson = isSameName # XXX: just for backward compatiblity. + + def __deepcopy__(self, memo): + """Return a deep copy of a Person instance.""" + p = Person(name=u'', personID=self.personID, myName=self.myName, + myID=self.myID, data=deepcopy(self.data, memo), + currentRole=deepcopy(self.currentRole, memo), + roleIsPerson=self._roleIsPerson, + notes=self.notes, accessSystem=self.accessSystem, + titlesRefs=deepcopy(self.titlesRefs, memo), + namesRefs=deepcopy(self.namesRefs, memo), + charactersRefs=deepcopy(self.charactersRefs, memo)) + p.current_info = list(self.current_info) + p.set_mod_funct(self.modFunct) + p.billingPos = self.billingPos + return p + + def __repr__(self): + """String representation of a Person object.""" + # XXX: add also currentRole and notes, if present? + r = '' % (self.personID, self.accessSystem, + self.get('long imdb canonical name')) + if isinstance(r, unicode): r = r.encode('utf_8', 'replace') + return r + + def __str__(self): + """Simply print the short name.""" + return self.get('name', u'').encode('utf_8', 'replace') + + def __unicode__(self): + """Simply print the short title.""" + return self.get('name', u'') + + def summary(self): + """Return a string with a pretty-printed summary for the person.""" + if not self: return u'' + s = u'Person\n=====\nName: %s\n' % \ + self.get('long imdb canonical name', u'') + bdate = self.get('birth date') + if bdate: + s += u'Birth date: %s' % bdate + bnotes = self.get('birth notes') + if bnotes: + s += u' (%s)' % bnotes + s += u'.\n' + ddate = self.get('death date') + if ddate: + s += u'Death date: %s' % ddate + dnotes = self.get('death notes') + if dnotes: + s += u' (%s)' % dnotes + s += u'.\n' + bio = self.get('mini biography') + if bio: + s += u'Biography: %s\n' % bio[0] + director = self.get('director') + if director: + d_list = [x.get('long imdb canonical title', u'') + for x in director[:3]] + s += u'Last movies directed: %s.\n' % u'; '.join(d_list) + act = self.get('actor') or self.get('actress') + if act: + a_list = [x.get('long imdb canonical title', u'') + for x in act[:5]] + s += u'Last movies acted: %s.\n' % u'; '.join(a_list) + return s + + diff --git a/libs/imdb/__init__.py b/libs/imdb/__init__.py new file mode 100644 index 0000000..4e6ac77 --- /dev/null +++ b/libs/imdb/__init__.py @@ -0,0 +1,902 @@ +""" +imdb package. + +This package can be used to retrieve information about a movie or +a person from the IMDb database. +It can fetch data through different media (e.g.: the IMDb web pages, +a SQL database, etc.) + +Copyright 2004-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +__all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company', + 'available_access_systems'] +__version__ = VERSION = '4.6' + +# Import compatibility module (importing it is enough). +import _compat + +import sys, os, ConfigParser, logging +from types import MethodType + +from imdb import Movie, Person, Character, Company +import imdb._logging +from imdb._exceptions import IMDbError, IMDbDataAccessError +from imdb.utils import build_title, build_name, build_company_name + +_aux_logger = logging.getLogger('imdbpy.aux') + + +# URLs of the main pages for movies, persons, characters and queries. +imdbURL_base = 'http://akas.imdb.com/' +# http://akas.imdb.com/title/ +imdbURL_movie_base = '%stitle/' % imdbURL_base +# http://akas.imdb.com/title/tt%s/ +imdbURL_movie_main = imdbURL_movie_base + 'tt%s/' +# http://akas.imdb.com/name/ +imdbURL_person_base = '%sname/' % imdbURL_base +# http://akas.imdb.com/name/nm%s/ +imdbURL_person_main = imdbURL_person_base + 'nm%s/' +# http://akas.imdb.com/character/ +imdbURL_character_base = '%scharacter/' % imdbURL_base +# http://akas.imdb.com/character/ch%s/ +imdbURL_character_main = imdbURL_character_base + 'ch%s/' +# http://akas.imdb.com/company/ +imdbURL_company_base = '%scompany/' % imdbURL_base +# http://akas.imdb.com/company/co%s/ +imdbURL_company_main = imdbURL_company_base + 'co%s/' +# http://akas.imdb.com/keyword/%s/ +imdbURL_keyword_main = imdbURL_base + 'keyword/%s/' +# http://akas.imdb.com/chart/top +imdbURL_top250 = imdbURL_base + 'chart/top' +# http://akas.imdb.com/chart/bottom +imdbURL_bottom100 = imdbURL_base + 'chart/bottom' +# http://akas.imdb.com/find?%s +imdbURL_find = imdbURL_base + 'find?%s' + +# Name of the configuration file. +confFileName = 'imdbpy.cfg' + +class ConfigParserWithCase(ConfigParser.ConfigParser): + """A case-sensitive parser for configuration files.""" + def __init__(self, defaults=None, confFile=None, *args, **kwds): + """Initialize the parser. + + *defaults* -- defaults values. + *confFile* -- the file (or list of files) to parse.""" + ConfigParser.ConfigParser.__init__(self, defaults=defaults) + if confFile is None: + dotFileName = '.' + confFileName + # Current and home directory. + confFile = [os.path.join(os.getcwd(), confFileName), + os.path.join(os.getcwd(), dotFileName), + os.path.join(os.path.expanduser('~'), confFileName), + os.path.join(os.path.expanduser('~'), dotFileName)] + if os.name == 'posix': + sep = getattr(os.path, 'sep', '/') + # /etc/ and /etc/conf.d/ + confFile.append(os.path.join(sep, 'etc', confFileName)) + confFile.append(os.path.join(sep, 'etc', 'conf.d', + confFileName)) + else: + # etc subdirectory of sys.prefix, for non-unix systems. + confFile.append(os.path.join(sys.prefix, 'etc', confFileName)) + for fname in confFile: + try: + self.read(fname) + except (ConfigParser.MissingSectionHeaderError, + ConfigParser.ParsingError), e: + _aux_logger.warn('Troubles reading config file: %s' % e) + # Stop at the first valid file. + if self.has_section('imdbpy'): + break + + def optionxform(self, optionstr): + """Option names are case sensitive.""" + return optionstr + + def _manageValue(self, value): + """Custom substitutions for values.""" + if not isinstance(value, (str, unicode)): + return value + vlower = value.lower() + if vlower in self._boolean_states: + return self._boolean_states[vlower] + elif vlower == 'none': + return None + return value + + def get(self, section, option, *args, **kwds): + """Return the value of an option from a given section.""" + value = ConfigParser.ConfigParser.get(self, section, option, + *args, **kwds) + return self._manageValue(value) + + def items(self, section, *args, **kwds): + """Return a list of (key, value) tuples of items of the + given section.""" + if section != 'DEFAULT' and not self.has_section(section): + return [] + keys = ConfigParser.ConfigParser.options(self, section) + return [(k, self.get(section, k, *args, **kwds)) for k in keys] + + def getDict(self, section): + """Return a dictionary of items of the specified section.""" + return dict(self.items(section)) + + +def IMDb(accessSystem=None, *arguments, **keywords): + """Return an instance of the appropriate class. + The accessSystem parameter is used to specify the kind of + the preferred access system.""" + if accessSystem is None or accessSystem in ('auto', 'config'): + try: + cfg_file = ConfigParserWithCase(*arguments, **keywords) + # Parameters set by the code take precedence. + kwds = cfg_file.getDict('imdbpy') + if 'accessSystem' in kwds: + accessSystem = kwds['accessSystem'] + del kwds['accessSystem'] + else: + accessSystem = 'http' + kwds.update(keywords) + keywords = kwds + except Exception, e: + logging.getLogger('imdbpy').warn('Unable to read configuration' \ + ' file; complete error: %s' % e) + # It just LOOKS LIKE a bad habit: we tried to read config + # options from some files, but something is gone horribly + # wrong: ignore everything and pretend we were called with + # the 'http' accessSystem. + accessSystem = 'http' + if 'loggingLevel' in keywords: + imdb._logging.setLevel(keywords['loggingLevel']) + del keywords['loggingLevel'] + if 'loggingConfig' in keywords: + logCfg = keywords['loggingConfig'] + del keywords['loggingConfig'] + try: + import logging.config + logging.config.fileConfig(os.path.expanduser(logCfg)) + except Exception, e: + logging.getLogger('imdbpy').warn('unable to read logger ' \ + 'config: %s' % e) + if accessSystem in ('http', 'web', 'html'): + from parser.http import IMDbHTTPAccessSystem + return IMDbHTTPAccessSystem(*arguments, **keywords) + elif accessSystem in ('httpThin', 'webThin', 'htmlThin'): + from parser.http import IMDbHTTPAccessSystem + return IMDbHTTPAccessSystem(isThin=1, *arguments, **keywords) + elif accessSystem in ('mobile',): + from parser.mobile import IMDbMobileAccessSystem + return IMDbMobileAccessSystem(*arguments, **keywords) + elif accessSystem in ('local', 'files'): + # The local access system was removed since IMDbPY 4.2. + raise IMDbError, 'the local access system was removed since IMDbPY 4.2' + elif accessSystem in ('sql', 'db', 'database'): + try: + from parser.sql import IMDbSqlAccessSystem + except ImportError: + raise IMDbError, 'the sql access system is not installed' + return IMDbSqlAccessSystem(*arguments, **keywords) + else: + raise IMDbError, 'unknown kind of data access system: "%s"' \ + % accessSystem + + +def available_access_systems(): + """Return the list of available data access systems.""" + asList = [] + # XXX: trying to import modules is a good thing? + try: + from parser.http import IMDbHTTPAccessSystem + asList += ['http', 'httpThin'] + except ImportError: + pass + try: + from parser.mobile import IMDbMobileAccessSystem + asList.append('mobile') + except ImportError: + pass + try: + from parser.sql import IMDbSqlAccessSystem + asList.append('sql') + except ImportError: + pass + return asList + + +# XXX: I'm not sure this is a good guess. +# I suppose that an argument of the IMDb function can be used to +# set a default encoding for the output, and then Movie, Person and +# Character objects can use this default encoding, returning strings. +# Anyway, passing unicode strings to search_movie(), search_person() +# and search_character() methods is always safer. +encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding() + +class IMDbBase: + """The base class used to search for a movie/person/character and + to get a Movie/Person/Character object. + + This class cannot directly fetch data of any kind and so you + have to search the "real" code into a subclass.""" + + # The name of the preferred access system (MUST be overridden + # in the subclasses). + accessSystem = 'UNKNOWN' + + # Top-level logger for IMDbPY. + _imdb_logger = logging.getLogger('imdbpy') + + def __init__(self, defaultModFunct=None, results=20, keywordsResults=100, + *arguments, **keywords): + """Initialize the access system. + If specified, defaultModFunct is the function used by + default by the Person, Movie and Character objects, when + accessing their text fields. + """ + # The function used to output the strings that need modification (the + # ones containing references to movie titles and person names). + self._defModFunct = defaultModFunct + # Number of results to get. + try: + results = int(results) + except (TypeError, ValueError): + results = 20 + if results < 1: + results = 20 + self._results = results + try: + keywordsResults = int(keywordsResults) + except (TypeError, ValueError): + keywordsResults = 100 + if keywordsResults < 1: + keywordsResults = 100 + self._keywordsResults = keywordsResults + + def _normalize_movieID(self, movieID): + """Normalize the given movieID.""" + # By default, do nothing. + return movieID + + def _normalize_personID(self, personID): + """Normalize the given personID.""" + # By default, do nothing. + return personID + + def _normalize_characterID(self, characterID): + """Normalize the given characterID.""" + # By default, do nothing. + return characterID + + def _normalize_companyID(self, companyID): + """Normalize the given companyID.""" + # By default, do nothing. + return companyID + + def _get_real_movieID(self, movieID): + """Handle title aliases.""" + # By default, do nothing. + return movieID + + def _get_real_personID(self, personID): + """Handle name aliases.""" + # By default, do nothing. + return personID + + def _get_real_characterID(self, characterID): + """Handle character name aliases.""" + # By default, do nothing. + return characterID + + def _get_real_companyID(self, companyID): + """Handle company name aliases.""" + # By default, do nothing. + return companyID + + def _get_infoset(self, prefname): + """Return methods with the name starting with prefname.""" + infoset = [] + excludes = ('%sinfoset' % prefname,) + preflen = len(prefname) + for name in dir(self.__class__): + if name.startswith(prefname) and name not in excludes: + member = getattr(self.__class__, name) + if isinstance(member, MethodType): + infoset.append(name[preflen:].replace('_', ' ')) + return infoset + + def get_movie_infoset(self): + """Return the list of info set available for movies.""" + return self._get_infoset('get_movie_') + + def get_person_infoset(self): + """Return the list of info set available for persons.""" + return self._get_infoset('get_person_') + + def get_character_infoset(self): + """Return the list of info set available for characters.""" + return self._get_infoset('get_character_') + + def get_company_infoset(self): + """Return the list of info set available for companies.""" + return self._get_infoset('get_company_') + + def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None): + """Return a Movie object for the given movieID. + + The movieID is something used to univocally identify a movie; + it can be the imdbID used by the IMDb web server, a file + pointer, a line number in a file, an ID in a database, etc. + + info is the list of sets of information to retrieve. + + If specified, modFunct will be the function used by the Movie + object when accessing its text fields (like 'plot').""" + movieID = self._normalize_movieID(movieID) + movieID = self._get_real_movieID(movieID) + movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem) + modFunct = modFunct or self._defModFunct + if modFunct is not None: + movie.set_mod_funct(modFunct) + self.update(movie, info) + return movie + + get_episode = get_movie + + def _search_movie(self, title, results): + """Return a list of tuples (movieID, {movieData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_movie(self, title, results=None, _episodes=False): + """Return a list of Movie objects for a query for the given title. + The results argument is the maximum number of results to return.""" + if results is None: + results = self._results + try: + results = int(results) + except (ValueError, OverflowError): + results = 20 + # XXX: I suppose it will be much safer if the user provides + # an unicode string... this is just a guess. + if not isinstance(title, unicode): + title = unicode(title, encoding, 'replace') + if not _episodes: + res = self._search_movie(title, results) + else: + res = self._search_episode(title, results) + return [Movie.Movie(movieID=self._get_real_movieID(mi), + data=md, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for mi, md in res][:results] + + def _search_episode(self, title, results): + """Return a list of tuples (movieID, {movieData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_episode(self, title, results=None): + """Return a list of Movie objects for a query for the given title. + The results argument is the maximum number of results to return; + this method searches only for titles of tv (mini) series' episodes.""" + return self.search_movie(title, results=results, _episodes=True) + + def get_person(self, personID, info=Person.Person.default_info, + modFunct=None): + """Return a Person object for the given personID. + + The personID is something used to univocally identify a person; + it can be the imdbID used by the IMDb web server, a file + pointer, a line number in a file, an ID in a database, etc. + + info is the list of sets of information to retrieve. + + If specified, modFunct will be the function used by the Person + object when accessing its text fields (like 'mini biography').""" + personID = self._normalize_personID(personID) + personID = self._get_real_personID(personID) + person = Person.Person(personID=personID, + accessSystem=self.accessSystem) + modFunct = modFunct or self._defModFunct + if modFunct is not None: + person.set_mod_funct(modFunct) + self.update(person, info) + return person + + def _search_person(self, name, results): + """Return a list of tuples (personID, {personData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_person(self, name, results=None): + """Return a list of Person objects for a query for the given name. + + The results argument is the maximum number of results to return.""" + if results is None: + results = self._results + try: + results = int(results) + except (ValueError, OverflowError): + results = 20 + if not isinstance(name, unicode): + name = unicode(name, encoding, 'replace') + res = self._search_person(name, results) + return [Person.Person(personID=self._get_real_personID(pi), + data=pd, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for pi, pd in res][:results] + + def get_character(self, characterID, info=Character.Character.default_info, + modFunct=None): + """Return a Character object for the given characterID. + + The characterID is something used to univocally identify a character; + it can be the imdbID used by the IMDb web server, a file + pointer, a line number in a file, an ID in a database, etc. + + info is the list of sets of information to retrieve. + + If specified, modFunct will be the function used by the Character + object when accessing its text fields (like 'biography').""" + characterID = self._normalize_characterID(characterID) + characterID = self._get_real_characterID(characterID) + character = Character.Character(characterID=characterID, + accessSystem=self.accessSystem) + modFunct = modFunct or self._defModFunct + if modFunct is not None: + character.set_mod_funct(modFunct) + self.update(character, info) + return character + + def _search_character(self, name, results): + """Return a list of tuples (characterID, {characterData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_character(self, name, results=None): + """Return a list of Character objects for a query for the given name. + + The results argument is the maximum number of results to return.""" + if results is None: + results = self._results + try: + results = int(results) + except (ValueError, OverflowError): + results = 20 + if not isinstance(name, unicode): + name = unicode(name, encoding, 'replace') + res = self._search_character(name, results) + return [Character.Character(characterID=self._get_real_characterID(pi), + data=pd, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for pi, pd in res][:results] + + def get_company(self, companyID, info=Company.Company.default_info, + modFunct=None): + """Return a Company object for the given companyID. + + The companyID is something used to univocally identify a company; + it can be the imdbID used by the IMDb web server, a file + pointer, a line number in a file, an ID in a database, etc. + + info is the list of sets of information to retrieve. + + If specified, modFunct will be the function used by the Company + object when accessing its text fields (none, so far).""" + companyID = self._normalize_companyID(companyID) + companyID = self._get_real_companyID(companyID) + company = Company.Company(companyID=companyID, + accessSystem=self.accessSystem) + modFunct = modFunct or self._defModFunct + if modFunct is not None: + company.set_mod_funct(modFunct) + self.update(company, info) + return company + + def _search_company(self, name, results): + """Return a list of tuples (companyID, {companyData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_company(self, name, results=None): + """Return a list of Company objects for a query for the given name. + + The results argument is the maximum number of results to return.""" + if results is None: + results = self._results + try: + results = int(results) + except (ValueError, OverflowError): + results = 20 + if not isinstance(name, unicode): + name = unicode(name, encoding, 'replace') + res = self._search_company(name, results) + return [Company.Company(companyID=self._get_real_companyID(pi), + data=pd, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for pi, pd in res][:results] + + def _search_keyword(self, keyword, results): + """Return a list of 'keyword' strings.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def search_keyword(self, keyword, results=None): + """Search for existing keywords, similar to the given one.""" + if results is None: + results = self._keywordsResults + try: + results = int(results) + except (ValueError, OverflowError): + results = 100 + if not isinstance(keyword, unicode): + keyword = unicode(keyword, encoding, 'replace') + return self._search_keyword(keyword, results) + + def _get_keyword(self, keyword, results): + """Return a list of tuples (movieID, {movieData})""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def get_keyword(self, keyword, results=None): + """Return a list of movies for the given keyword.""" + if results is None: + results = self._keywordsResults + try: + results = int(results) + except (ValueError, OverflowError): + results = 100 + # XXX: I suppose it will be much safer if the user provides + # an unicode string... this is just a guess. + if not isinstance(keyword, unicode): + keyword = unicode(keyword, encoding, 'replace') + res = self._get_keyword(keyword, results) + return [Movie.Movie(movieID=self._get_real_movieID(mi), + data=md, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for mi, md in res][:results] + + def _get_top_bottom_movies(self, kind): + """Return the list of the top 250 or bottom 100 movies.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + # This method must return a list of (movieID, {movieDict}) + # tuples. The kind parameter can be 'top' or 'bottom'. + raise NotImplementedError, 'override this method' + + def get_top250_movies(self): + """Return the list of the top 250 movies.""" + res = self._get_top_bottom_movies('top') + return [Movie.Movie(movieID=self._get_real_movieID(mi), + data=md, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for mi, md in res] + + def get_bottom100_movies(self): + """Return the list of the bottom 100 movies.""" + res = self._get_top_bottom_movies('bottom') + return [Movie.Movie(movieID=self._get_real_movieID(mi), + data=md, modFunct=self._defModFunct, + accessSystem=self.accessSystem) for mi, md in res] + + def new_movie(self, *arguments, **keywords): + """Return a Movie object.""" + # XXX: not really useful... + if 'title' in keywords: + if not isinstance(keywords['title'], unicode): + keywords['title'] = unicode(keywords['title'], + encoding, 'replace') + elif len(arguments) > 1: + if not isinstance(arguments[1], unicode): + arguments[1] = unicode(arguments[1], encoding, 'replace') + return Movie.Movie(accessSystem=self.accessSystem, + *arguments, **keywords) + + def new_person(self, *arguments, **keywords): + """Return a Person object.""" + # XXX: not really useful... + if 'name' in keywords: + if not isinstance(keywords['name'], unicode): + keywords['name'] = unicode(keywords['name'], + encoding, 'replace') + elif len(arguments) > 1: + if not isinstance(arguments[1], unicode): + arguments[1] = unicode(arguments[1], encoding, 'replace') + return Person.Person(accessSystem=self.accessSystem, + *arguments, **keywords) + + def new_character(self, *arguments, **keywords): + """Return a Character object.""" + # XXX: not really useful... + if 'name' in keywords: + if not isinstance(keywords['name'], unicode): + keywords['name'] = unicode(keywords['name'], + encoding, 'replace') + elif len(arguments) > 1: + if not isinstance(arguments[1], unicode): + arguments[1] = unicode(arguments[1], encoding, 'replace') + return Character.Character(accessSystem=self.accessSystem, + *arguments, **keywords) + + def new_company(self, *arguments, **keywords): + """Return a Company object.""" + # XXX: not really useful... + if 'name' in keywords: + if not isinstance(keywords['name'], unicode): + keywords['name'] = unicode(keywords['name'], + encoding, 'replace') + elif len(arguments) > 1: + if not isinstance(arguments[1], unicode): + arguments[1] = unicode(arguments[1], encoding, 'replace') + return Company.Company(accessSystem=self.accessSystem, + *arguments, **keywords) + + def update(self, mop, info=None, override=0): + """Given a Movie, Person, Character or Company object with only + partial information, retrieve the required set of information. + + info is the list of sets of information to retrieve. + + If override is set, the information are retrieved and updated + even if they're already in the object.""" + # XXX: should this be a method of the Movie/Person/Character/Company + # classes? NO! What for instances created by external functions? + mopID = None + prefix = '' + if isinstance(mop, Movie.Movie): + mopID = mop.movieID + prefix = 'movie' + elif isinstance(mop, Person.Person): + mopID = mop.personID + prefix = 'person' + elif isinstance(mop, Character.Character): + mopID = mop.characterID + prefix = 'character' + elif isinstance(mop, Company.Company): + mopID = mop.companyID + prefix = 'company' + else: + raise IMDbError, 'object ' + repr(mop) + \ + ' is not a Movie, Person, Character or Company instance' + if mopID is None: + # XXX: enough? It's obvious that there are Characters + # objects without characterID, so I think they should + # just do nothing, when an i.update(character) is tried. + if prefix == 'character': + return + raise IMDbDataAccessError, \ + 'the supplied object has null movieID, personID or companyID' + if mop.accessSystem == self.accessSystem: + aSystem = self + else: + aSystem = IMDb(mop.accessSystem) + if info is None: + info = mop.default_info + elif info == 'all': + if isinstance(mop, Movie.Movie): + info = self.get_movie_infoset() + elif isinstance(mop, Person.Person): + info = self.get_person_infoset() + elif isinstance(mop, Character.Character): + info = self.get_character_infoset() + else: + info = self.get_company_infoset() + if not isinstance(info, (tuple, list)): + info = (info,) + res = {} + for i in info: + if i in mop.current_info and not override: + continue + if not i: + continue + self._imdb_logger.debug('retrieving "%s" info set', i) + try: + method = getattr(aSystem, 'get_%s_%s' % + (prefix, i.replace(' ', '_'))) + except AttributeError: + self._imdb_logger.error('unknown information set "%s"', i) + # Keeps going. + method = lambda *x: {} + try: + ret = method(mopID) + except Exception, e: + self._imdb_logger.critical('caught an exception retrieving ' \ + 'or parsing "%s" info set for mopID ' \ + '"%s" (accessSystem: %s)', + i, mopID, mop.accessSystem, exc_info=True) + ret = {} + keys = None + if 'data' in ret: + res.update(ret['data']) + if isinstance(ret['data'], dict): + keys = ret['data'].keys() + if 'info sets' in ret: + for ri in ret['info sets']: + mop.add_to_current_info(ri, keys, mainInfoset=i) + else: + mop.add_to_current_info(i, keys) + if 'titlesRefs' in ret: + mop.update_titlesRefs(ret['titlesRefs']) + if 'namesRefs' in ret: + mop.update_namesRefs(ret['namesRefs']) + if 'charactersRefs' in ret: + mop.update_charactersRefs(ret['charactersRefs']) + mop.set_data(res, override=0) + + def get_imdbMovieID(self, movieID): + """Translate a movieID in an imdbID (the ID used by the IMDb + web server); must be overridden by the subclass.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def get_imdbPersonID(self, personID): + """Translate a personID in a imdbID (the ID used by the IMDb + web server); must be overridden by the subclass.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def get_imdbCharacterID(self, characterID): + """Translate a characterID in a imdbID (the ID used by the IMDb + web server); must be overridden by the subclass.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def get_imdbCompanyID(self, companyID): + """Translate a companyID in a imdbID (the ID used by the IMDb + web server); must be overridden by the subclass.""" + # XXX: for the real implementation, see the method of the + # subclass, somewhere under the imdb.parser package. + raise NotImplementedError, 'override this method' + + def _searchIMDb(self, kind, ton): + """Search the IMDb akas server for the given title or name.""" + # The Exact Primary search system has gone AWOL, so we resort + # to the mobile search. :-/ + if not ton: + return None + aSystem = IMDb('mobile') + if kind == 'tt': + searchFunct = aSystem.search_movie + check = 'long imdb canonical title' + elif kind == 'nm': + searchFunct = aSystem.search_person + check = 'long imdb canonical name' + elif kind == 'char': + searchFunct = aSystem.search_character + check = 'long imdb canonical name' + elif kind == 'co': + # XXX: are [COUNTRY] codes included in the results? + searchFunct = aSystem.search_company + check = 'long imdb name' + try: + searchRes = searchFunct(ton) + except IMDbError: + return None + # When only one result is returned, assume it was from an + # exact match. + if len(searchRes) == 1: + return searchRes[0].getID() + for item in searchRes: + # Return the first perfect match. + if item[check] == ton: + return item.getID() + return None + + def title2imdbID(self, title): + """Translate a movie title (in the plain text data files format) + to an imdbID. + Try an Exact Primary Title search on IMDb; + return None if it's unable to get the imdbID.""" + return self._searchIMDb('tt', title) + + def name2imdbID(self, name): + """Translate a person name in an imdbID. + Try an Exact Primary Name search on IMDb; + return None if it's unable to get the imdbID.""" + return self._searchIMDb('tt', name) + + def character2imdbID(self, name): + """Translate a character name in an imdbID. + Try an Exact Primary Name search on IMDb; + return None if it's unable to get the imdbID.""" + return self._searchIMDb('char', name) + + def company2imdbID(self, name): + """Translate a company name in an imdbID. + Try an Exact Primary Name search on IMDb; + return None if it's unable to get the imdbID.""" + return self._searchIMDb('co', name) + + def get_imdbID(self, mop): + """Return the imdbID for the given Movie, Person, Character or Company + object.""" + imdbID = None + if mop.accessSystem == self.accessSystem: + aSystem = self + else: + aSystem = IMDb(mop.accessSystem) + if isinstance(mop, Movie.Movie): + if mop.movieID is not None: + imdbID = aSystem.get_imdbMovieID(mop.movieID) + else: + imdbID = aSystem.title2imdbID(build_title(mop, canonical=0, + ptdf=1)) + elif isinstance(mop, Person.Person): + if mop.personID is not None: + imdbID = aSystem.get_imdbPersonID(mop.personID) + else: + imdbID = aSystem.name2imdbID(build_name(mop, canonical=1)) + elif isinstance(mop, Character.Character): + if mop.characterID is not None: + imdbID = aSystem.get_imdbCharacterID(mop.characterID) + else: + # canonical=0 ? + imdbID = aSystem.character2imdbID(build_name(mop, canonical=1)) + elif isinstance(mop, Company.Company): + if mop.companyID is not None: + imdbID = aSystem.get_imdbCompanyID(mop.companyID) + else: + imdbID = aSystem.company2imdbID(build_company_name(mop)) + else: + raise IMDbError, 'object ' + repr(mop) + \ + ' is not a Movie, Person or Character instance' + return imdbID + + def get_imdbURL(self, mop): + """Return the main IMDb URL for the given Movie, Person, + Character or Company object, or None if unable to get it.""" + imdbID = self.get_imdbID(mop) + if imdbID is None: + return None + if isinstance(mop, Movie.Movie): + url_firstPart = imdbURL_movie_main + elif isinstance(mop, Person.Person): + url_firstPart = imdbURL_person_main + elif isinstance(mop, Character.Character): + url_firstPart = imdbURL_character_main + elif isinstance(mop, Company.Company): + url_firstPart = imdbURL_company_main + else: + raise IMDbError, 'object ' + repr(mop) + \ + ' is not a Movie, Person, Character or Company instance' + return url_firstPart % imdbID + + def get_special_methods(self): + """Return the special methods defined by the subclass.""" + sm_dict = {} + base_methods = [] + for name in dir(IMDbBase): + member = getattr(IMDbBase, name) + if isinstance(member, MethodType): + base_methods.append(name) + for name in dir(self.__class__): + if name.startswith('_') or name in base_methods or \ + name.startswith('get_movie_') or \ + name.startswith('get_person_') or \ + name.startswith('get_company_') or \ + name.startswith('get_character_'): + continue + member = getattr(self.__class__, name) + if isinstance(member, MethodType): + sm_dict.update({name: member.__doc__}) + return sm_dict + diff --git a/libs/imdb/_compat.py b/libs/imdb/_compat.py new file mode 100644 index 0000000..73a4dd1 --- /dev/null +++ b/libs/imdb/_compat.py @@ -0,0 +1,72 @@ +""" +_compat module (imdb package). + +This module provides compatibility functions used by the imdb package +to deal with unusual environments. + +Copyright 2008-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +# TODO: now we're heavily using the 'logging' module, which was not +# present in Python 2.2. To work in a Symbian environment, we +# need to create a fake 'logging' module (its functions may call +# the 'warnings' module, or do nothing at all). + + +import os +# If true, we're working on a Symbian device. +if os.name == 'e32': + # Replace os.path.expandvars and os.path.expanduser, if needed. + def _noact(x): + """Ad-hoc replacement for IMDbPY.""" + return x + try: + os.path.expandvars + except AttributeError: + os.path.expandvars = _noact + try: + os.path.expanduser + except AttributeError: + os.path.expanduser = _noact + + # time.strptime is missing, on Symbian devices. + import time + try: + time.strptime + except AttributeError: + import re + _re_web_time = re.compile(r'Episode dated (\d+) (\w+) (\d+)') + _re_ptdf_time = re.compile(r'\((\d+)-(\d+)-(\d+)\)') + _month2digit = {'January': '1', 'February': '2', 'March': '3', + 'April': '4', 'May': '5', 'June': '6', 'July': '7', + 'August': '8', 'September': '9', 'October': '10', + 'November': '11', 'December': '12'} + def strptime(s, format): + """Ad-hoc strptime replacement for IMDbPY.""" + try: + if format.startswith('Episode'): + res = _re_web_time.findall(s)[0] + return (int(res[2]), int(_month2digit[res[1]]), int(res[0]), + 0, 0, 0, 0, 1, 0) + else: + res = _re_ptdf_time.findall(s)[0] + return (int(res[0]), int(res[1]), int(res[2]), + 0, 0, 0, 0, 1, 0) + except: + raise ValueError, u'error in IMDbPY\'s ad-hoc strptime!' + time.strptime = strptime + diff --git a/libs/imdb/_exceptions.py b/libs/imdb/_exceptions.py new file mode 100644 index 0000000..bfb9688 --- /dev/null +++ b/libs/imdb/_exceptions.py @@ -0,0 +1,46 @@ +""" +_exceptions module (imdb package). + +This module provides the exception hierarchy used by the imdb package. + +Copyright 2004-2009 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import logging + + +class IMDbError(Exception): + """Base class for every exception raised by the imdb package.""" + _logger = logging.getLogger('imdbpy') + + def __init__(self, *args, **kwargs): + """Initialize the exception and pass the message to the log system.""" + # Every raised exception also dispatch a critical log. + self._logger.critical('%s exception raised; args: %s; kwds: %s', + self.__class__.__name__, args, kwargs, + exc_info=True) + super(IMDbError, self).__init__(*args, **kwargs) + +class IMDbDataAccessError(IMDbError): + """Exception raised when is not possible to access needed data.""" + pass + +class IMDbParserError(IMDbError): + """Exception raised when an error occurred parsing the data.""" + pass + + diff --git a/libs/imdb/_logging.py b/libs/imdb/_logging.py new file mode 100644 index 0000000..2b8a286 --- /dev/null +++ b/libs/imdb/_logging.py @@ -0,0 +1,63 @@ +""" +_logging module (imdb package). + +This module provides the logging facilities used by the imdb package. + +Copyright 2009-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import logging + +LEVELS = {'debug': logging.DEBUG, + 'info': logging.INFO, + 'warn': logging.WARNING, + 'warning': logging.WARNING, + 'error': logging.ERROR, + 'critical': logging.CRITICAL} + + +imdbpyLogger = logging.getLogger('imdbpy') +imdbpyStreamHandler = logging.StreamHandler() +imdbpyFormatter = logging.Formatter('%(asctime)s %(levelname)s [%(name)s]' \ + ' %(pathname)s:%(lineno)d: %(message)s') +imdbpyStreamHandler.setFormatter(imdbpyFormatter) +imdbpyLogger.addHandler(imdbpyStreamHandler) + +def setLevel(level): + """Set logging level for the main logger.""" + level = level.lower().strip() + imdbpyLogger.setLevel(LEVELS.get(level, logging.NOTSET)) + imdbpyLogger.log(imdbpyLogger.level, 'set logging threshold to "%s"', + logging.getLevelName(imdbpyLogger.level)) + + +#imdbpyLogger.setLevel(logging.DEBUG) + + +# It can be an idea to have a single function to log and warn: +#import warnings +#def log_and_warn(msg, args=None, logger=None, level=None): +# """Log the message and issue a warning.""" +# if logger is None: +# logger = imdbpyLogger +# if level is None: +# level = logging.WARNING +# if args is None: +# args = () +# #warnings.warn(msg % args, stacklevel=0) +# logger.log(level, msg % args) + diff --git a/libs/imdb/articles.py b/libs/imdb/articles.py new file mode 100644 index 0000000..73ac690 --- /dev/null +++ b/libs/imdb/articles.py @@ -0,0 +1,142 @@ +""" +articles module (imdb package). + +This module provides functions and data to handle in a smart way +articles (in various languages) at the beginning of movie titles. + +Copyright 2009 Davide Alberani + 2009 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +# List of generic articles used when the language of the title is unknown (or +# we don't have information about articles in that language). +# XXX: Managing titles in a lot of different languages, a function to recognize +# an initial article can't be perfect; sometimes we'll stumble upon a short +# word that is an article in some language, but it's not in another; in these +# situations we have to choose if we want to interpret this little word +# as an article or not (remember that we don't know what the original language +# of the title was). +# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it +# seems also to be a preposition in other languages (French?). +# Running a script over the whole list of titles (and aliases), I've found +# that 'en' is used as an article only 376 times, and as another thing 594 +# times, so I've decided to _always_ consider 'en' as a non article. +# +# Here is a list of words that are _never_ considered as articles, complete +# with the cound of times they are used in a way or another: +# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99), +# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70), +# 'da' (23 vs 298), "'n" (8 vs 12) +# +# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56) +# I'm not sure what '-al' is, and so I've left it out... +# +# Generic list of articles in utf-8 encoding: +GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el', + "l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los', + 'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os', + 'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-', + 'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines', + '\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9') + + +# Lists of articles separated by language. If possible, the list should +# be sorted by frequency (not very important, but...) +# If you want to add a list of articles for another language, mail it +# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8 +# encoded. +LANG_ARTICLES = { + 'English': ('the', 'a', 'an'), + 'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'", + 'uno'), + 'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos', + 'unas'), + 'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'), + 'Turkish': (), # Some languages doesn't have articles. +} +LANG_ARTICLESget = LANG_ARTICLES.get + + +# Maps a language to countries where it is the main language. +# If you want to add an entry for another language or country, mail it at +# imdbpy-devel@lists.sourceforge.net . +_LANG_COUNTRIES = { + 'English': ('USA', 'UK', 'Canada', 'Ireland', 'Australia'), + 'Italian': ('Italy',), + 'Spanish': ('Spain', 'Mexico'), + 'Portuguese': ('Portugal', 'Brazil'), + 'Turkish': ('Turkey',), + #'German': ('Germany', 'East Germany', 'West Germany'), + #'French': ('France'), +} + +# Maps countries to their main language. +COUNTRY_LANG = {} +for lang in _LANG_COUNTRIES: + for country in _LANG_COUNTRIES[lang]: + COUNTRY_LANG[country] = lang + + +def toUnicode(articles): + """Convert a list of articles utf-8 encoded to unicode strings.""" + return tuple([art.decode('utf_8') for art in articles]) + + +def toDicts(articles): + """Given a list of utf-8 encoded articles, build two dictionary (one + utf-8 encoded and another one with unicode keys) for faster matches.""" + uArticles = toUnicode(articles) + return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles]) + + +def addTrailingSpace(articles): + """From the given list of utf-8 encoded articles, return two + lists (one utf-8 encoded and another one in unicode) where a space + is added at the end - if the last char is not ' or -.""" + _spArticles = [] + _spUnicodeArticles = [] + for article in articles: + if article[-1] not in ("'", '-'): + article += ' ' + _spArticles.append(article) + _spUnicodeArticles.append(article.decode('utf_8')) + return _spArticles, _spUnicodeArticles + + +# Caches. +_ART_CACHE = {} +_SP_ART_CACHE = {} + +def articlesDictsForLang(lang): + """Return dictionaries of articles specific for the given language, or the + default one if the language is not known.""" + if lang in _ART_CACHE: + return _ART_CACHE[lang] + artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES)) + _ART_CACHE[lang] = artDicts + return artDicts + + +def spArticlesForLang(lang): + """Return lists of articles (plus optional spaces) specific for the + given language, or the default one if the language is not known.""" + if lang in _SP_ART_CACHE: + return _SP_ART_CACHE[lang] + spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES)) + _SP_ART_CACHE[lang] = spArticles + return spArticles + diff --git a/libs/imdb/helpers.py b/libs/imdb/helpers.py new file mode 100644 index 0000000..2ca5306 --- /dev/null +++ b/libs/imdb/helpers.py @@ -0,0 +1,548 @@ +""" +helpers module (imdb package). + +This module provides functions not used directly by the imdb package, +but useful for IMDbPY-based programs. + +Copyright 2006-2010 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +# XXX: find better names for the functions in this modules. + +import re +from cgi import escape +import gettext +from gettext import gettext as _ +gettext.textdomain('imdbpy') + +# The modClearRefs can be used to strip names and titles references from +# the strings in Movie and Person objects. +from imdb.utils import modClearRefs, re_titleRef, re_nameRef, \ + re_characterRef, _tagAttr, _Container, TAGS_TO_MODIFY +from imdb import IMDb, imdbURL_movie_base, imdbURL_person_base, \ + imdbURL_character_base +import imdb.locale +from imdb.Movie import Movie +from imdb.Person import Person +from imdb.Character import Character +from imdb.Company import Company +from imdb.parser.http.utils import re_entcharrefssub, entcharrefs, \ + subXMLRefs, subSGMLRefs +from imdb.parser.http.bsouplxml.etree import BeautifulSoup + + +# An URL, more or less. +_re_href = re.compile(r'(http://.+?)(?=\s|$)', re.I) +_re_hrefsub = _re_href.sub + + +def makeCgiPrintEncoding(encoding): + """Make a function to pretty-print strings for the web.""" + def cgiPrint(s): + """Encode the given string using the %s encoding, and replace + chars outside the given charset with XML char references.""" % encoding + s = escape(s, quote=1) + if isinstance(s, unicode): + s = s.encode(encoding, 'xmlcharrefreplace') + return s + return cgiPrint + +# cgiPrint uses the latin_1 encoding. +cgiPrint = makeCgiPrintEncoding('latin_1') + +# Regular expression for %(varname)s substitutions. +re_subst = re.compile(r'%\((.+?)\)s') +# Regular expression for .... clauses. +re_conditional = re.compile(r'(.+?)') + + +def makeTextNotes(replaceTxtNotes): + """Create a function useful to handle text[::optional_note] values. + replaceTxtNotes is a format string, which can include the following + values: %(text)s and %(notes)s. + Portions of the text can be conditionally excluded, if one of the + values is absent. E.g.: [%(notes)s] will be replaced + with '[notes]' if notes exists, or by an empty string otherwise. + The returned function is suitable be passed as applyToValues argument + of the makeObject2Txt function.""" + def _replacer(s): + outS = replaceTxtNotes + if not isinstance(s, (unicode, str)): + return s + ssplit = s.split('::', 1) + text = ssplit[0] + # Used to keep track of text and note existence. + keysDict = {} + if text: + keysDict['text'] = True + outS = outS.replace('%(text)s', text) + if len(ssplit) == 2: + keysDict['notes'] = True + outS = outS.replace('%(notes)s', ssplit[1]) + else: + outS = outS.replace('%(notes)s', u'') + def _excludeFalseConditionals(matchobj): + # Return an empty string if the conditional is false/empty. + if matchobj.group(1) in keysDict: + return matchobj.group(2) + return u'' + while re_conditional.search(outS): + outS = re_conditional.sub(_excludeFalseConditionals, outS) + return outS + return _replacer + + +def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None, + companyTxt=None, joiner=' / ', + applyToValues=lambda x: x, _recurse=True): + """"Return a function useful to pretty-print Movie, Person, + Character and Company instances. + + *movieTxt* -- how to format a Movie object. + *personTxt* -- how to format a Person object. + *characterTxt* -- how to format a Character object. + *companyTxt* -- how to format a Company object. + *joiner* -- string used to join a list of objects. + *applyToValues* -- function to apply to values. + *_recurse* -- if True (default) manage only the given object. + """ + # Some useful defaults. + if movieTxt is None: + movieTxt = '%(long imdb title)s' + if personTxt is None: + personTxt = '%(long imdb name)s' + if characterTxt is None: + characterTxt = '%(long imdb name)s' + if companyTxt is None: + companyTxt = '%(long imdb name)s' + def object2txt(obj, _limitRecursion=None): + """Pretty-print objects.""" + # Prevent unlimited recursion. + if _limitRecursion is None: + _limitRecursion = 0 + elif _limitRecursion > 5: + return u'' + _limitRecursion += 1 + if isinstance(obj, (list, tuple)): + return joiner.join([object2txt(o, _limitRecursion=_limitRecursion) + for o in obj]) + elif isinstance(obj, dict): + # XXX: not exactly nice, neither useful, I fear. + return joiner.join([u'%s::%s' % + (object2txt(k, _limitRecursion=_limitRecursion), + object2txt(v, _limitRecursion=_limitRecursion)) + for k, v in obj.items()]) + objData = {} + if isinstance(obj, Movie): + objData['movieID'] = obj.movieID + outs = movieTxt + elif isinstance(obj, Person): + objData['personID'] = obj.personID + outs = personTxt + elif isinstance(obj, Character): + objData['characterID'] = obj.characterID + outs = characterTxt + elif isinstance(obj, Company): + objData['companyID'] = obj.companyID + outs = companyTxt + else: + return obj + def _excludeFalseConditionals(matchobj): + # Return an empty string if the conditional is false/empty. + condition = matchobj.group(1) + proceed = obj.get(condition) or getattr(obj, condition, None) + if proceed: + return matchobj.group(2) + else: + return u'' + return matchobj.group(2) + while re_conditional.search(outs): + outs = re_conditional.sub(_excludeFalseConditionals, outs) + for key in re_subst.findall(outs): + value = obj.get(key) or getattr(obj, key, None) + if not isinstance(value, (unicode, str)): + if not _recurse: + if value: + value = unicode(value) + if value: + value = object2txt(value, _limitRecursion=_limitRecursion) + elif value: + value = applyToValues(unicode(value)) + if not value: + value = u'' + elif not isinstance(value, (unicode, str)): + value = unicode(value) + outs = outs.replace(u'%(' + key + u')s', value) + return outs + return object2txt + + +def makeModCGILinks(movieTxt, personTxt, characterTxt=None, + encoding='latin_1'): + """Make a function used to pretty-print movies and persons refereces; + movieTxt and personTxt are the strings used for the substitutions. + movieTxt must contains %(movieID)s and %(title)s, while personTxt + must contains %(personID)s and %(name)s and characterTxt %(characterID)s + and %(name)s; characterTxt is optional, for backward compatibility.""" + _cgiPrint = makeCgiPrintEncoding(encoding) + def modCGILinks(s, titlesRefs, namesRefs, characterRefs=None): + """Substitute movies and persons references.""" + if characterRefs is None: characterRefs = {} + # XXX: look ma'... more nested scopes! + def _replaceMovie(match): + to_replace = match.group(1) + item = titlesRefs.get(to_replace) + if item: + movieID = item.movieID + to_replace = movieTxt % {'movieID': movieID, + 'title': unicode(_cgiPrint(to_replace), + encoding, + 'xmlcharrefreplace')} + return to_replace + def _replacePerson(match): + to_replace = match.group(1) + item = namesRefs.get(to_replace) + if item: + personID = item.personID + to_replace = personTxt % {'personID': personID, + 'name': unicode(_cgiPrint(to_replace), + encoding, + 'xmlcharrefreplace')} + return to_replace + def _replaceCharacter(match): + to_replace = match.group(1) + if characterTxt is None: + return to_replace + item = characterRefs.get(to_replace) + if item: + characterID = item.characterID + if characterID is None: + return to_replace + to_replace = characterTxt % {'characterID': characterID, + 'name': unicode(_cgiPrint(to_replace), + encoding, + 'xmlcharrefreplace')} + return to_replace + s = s.replace('<', '<').replace('>', '>') + s = _re_hrefsub(r'\1', s) + s = re_titleRef.sub(_replaceMovie, s) + s = re_nameRef.sub(_replacePerson, s) + s = re_characterRef.sub(_replaceCharacter, s) + return s + modCGILinks.movieTxt = movieTxt + modCGILinks.personTxt = personTxt + modCGILinks.characterTxt = characterTxt + return modCGILinks + +# links to the imdb.com web site. +_movieTxt = '%(title)s' +_personTxt = '%(name)s' +_characterTxt = '%(name)s' +modHtmlLinks = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt, + characterTxt=_characterTxt) +modHtmlLinksASCII = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt, + characterTxt=_characterTxt, + encoding='ascii') + + +everyentcharrefs = entcharrefs.copy() +for k, v in {'lt':u'<','gt':u'>','amp':u'&','quot':u'"','apos':u'\''}.items(): + everyentcharrefs[k] = v + everyentcharrefs['#%s' % ord(v)] = v +everyentcharrefsget = everyentcharrefs.get +re_everyentcharrefs = re.compile('&(%s|\#160|\#\d{1,5});' % + '|'.join(map(re.escape, everyentcharrefs))) +re_everyentcharrefssub = re_everyentcharrefs.sub + +def _replAllXMLRef(match): + """Replace the matched XML reference.""" + ref = match.group(1) + value = everyentcharrefsget(ref) + if value is None: + if ref[0] == '#': + return unichr(int(ref[1:])) + else: + return ref + return value + +def subXMLHTMLSGMLRefs(s): + """Return the given string with XML/HTML/SGML entity and char references + replaced.""" + return re_everyentcharrefssub(_replAllXMLRef, s) + + +def sortedSeasons(m): + """Return a sorted list of seasons of the given series.""" + seasons = m.get('episodes', {}).keys() + seasons.sort() + return seasons + + +def sortedEpisodes(m, season=None): + """Return a sorted list of episodes of the given series, + considering only the specified season(s) (every season, if None).""" + episodes = [] + seasons = season + if season is None: + seasons = sortedSeasons(m) + else: + if not isinstance(season, (tuple, list)): + seasons = [season] + for s in seasons: + eps_indx = m.get('episodes', {}).get(s, {}).keys() + eps_indx.sort() + for e in eps_indx: + episodes.append(m['episodes'][s][e]) + return episodes + + +# Idea and portions of the code courtesy of none none (dclist at gmail.com) +_re_imdbIDurl = re.compile(r'\b(nm|tt|ch|co)([0-9]{7})\b') +def get_byURL(url, info=None, args=None, kwds=None): + """Return a Movie, Person, Character or Company object for the given URL; + info is the info set to retrieve, args and kwds are respectively a list + and a dictionary or arguments to initialize the data access system. + Returns None if unable to correctly parse the url; can raise + exceptions if unable to retrieve the data.""" + if args is None: args = [] + if kwds is None: kwds = {} + ia = IMDb(*args, **kwds) + match = _re_imdbIDurl.search(url) + if not match: + return None + imdbtype = match.group(1) + imdbID = match.group(2) + if imdbtype == 'tt': + return ia.get_movie(imdbID, info=info) + elif imdbtype == 'nm': + return ia.get_person(imdbID, info=info) + elif imdbtype == 'ch': + return ia.get_character(imdbID, info=info) + elif imdbtype == 'co': + return ia.get_company(imdbID, info=info) + return None + + +# Idea and portions of code courtesy of Basil Shubin. +# Beware that these information are now available directly by +# the Movie/Person/Character instances. +def fullSizeCoverURL(obj): + """Given an URL string or a Movie, Person or Character instance, + returns an URL to the full-size version of the cover/headshot, + or None otherwise. This function is obsolete: the same information + are available as keys: 'full-size cover url' and 'full-size headshot', + respectively for movies and persons/characters.""" + if isinstance(obj, Movie): + coverUrl = obj.get('cover url') + elif isinstance(obj, (Person, Character)): + coverUrl = obj.get('headshot') + else: + coverUrl = obj + if not coverUrl: + return None + return _Container._re_fullsizeURL.sub('', coverUrl) + + +def keyToXML(key): + """Return a key (the ones used to access information in Movie and + other classes instances) converted to the style of the XML output.""" + return _tagAttr(key, '')[0] + + +def translateKey(key): + """Translate a given key.""" + return _(keyToXML(key)) + + +# Maps tags to classes. +_MAP_TOP_OBJ = { + 'person': Person, + 'movie': Movie, + 'character': Character, + 'company': Company +} + +# Tags to be converted to lists. +_TAGS_TO_LIST = dict([(x[0], None) for x in TAGS_TO_MODIFY.values()]) +_TAGS_TO_LIST.update(_MAP_TOP_OBJ) + +def tagToKey(tag): + """Return the name of the tag, taking it from the 'key' attribute, + if present.""" + keyAttr = tag.get('key') + if keyAttr: + if tag.get('keytype') == 'int': + keyAttr = int(keyAttr) + return keyAttr + return tag.name + + +def _valueWithType(tag, tagValue): + """Return tagValue, handling some type conversions.""" + tagType = tag.get('type') + if tagType == 'int': + tagValue = int(tagValue) + elif tagType == 'float': + tagValue = float(tagValue) + return tagValue + + +# Extra tags to get (if values were not already read from title/name). +_titleTags = ('imdbindex', 'kind', 'year') +_nameTags = ('imdbindex') +_companyTags = ('imdbindex', 'country') + +def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None, + _key2infoset=None): + """Recursively parse a tree of tags.""" + # The returned object (usually a _Container subclass, but it can + # be a string, an int, a float, a list or a dictionary). + item = None + if _infoset2keys is None: + _infoset2keys = {} + if _key2infoset is None: + _key2infoset = {} + name = tagToKey(tag) + firstChild = tag.find(recursive=False) + tagStr = (tag.string or u'').strip() + if not tagStr and name == 'item': + # Handles 'item' tags containing text and a 'notes' sub-tag. + tagContent = tag.contents[0] + if isinstance(tagContent, BeautifulSoup.NavigableString): + tagStr = (unicode(tagContent) or u'').strip() + tagType = tag.get('type') + infoset = tag.get('infoset') + if infoset: + _key2infoset[name] = infoset + _infoset2keys.setdefault(infoset, []).append(name) + # Here we use tag.name to avoid tags like + if tag.name in _MAP_TOP_OBJ: + # One of the subclasses of _Container. + item = _MAP_TOP_OBJ[name]() + itemAs = tag.get('access-system') + if itemAs: + if not _as: + _as = itemAs + else: + itemAs = _as + item.accessSystem = itemAs + tagsToGet = [] + theID = tag.get('id') + if name == 'movie': + item.movieID = theID + tagsToGet = _titleTags + theTitle = tag.find('title', recursive=False) + if tag.title: + item.set_title(tag.title.string) + tag.title.extract() + else: + if name == 'person': + item.personID = theID + tagsToGet = _nameTags + theName = tag.find('long imdb canonical name', recursive=False) + if not theName: + theName = tag.find('name', recursive=False) + elif name == 'character': + item.characterID = theID + tagsToGet = _nameTags + theName = tag.find('name', recursive=False) + elif name == 'company': + item.companyID = theID + tagsToGet = _companyTags + theName = tag.find('name', recursive=False) + if theName: + item.set_name(theName.string) + if theName: + theName.extract() + for t in tagsToGet: + if t in item.data: + continue + dataTag = tag.find(t, recursive=False) + if dataTag: + item.data[tagToKey(dataTag)] = _valueWithType(dataTag, + dataTag.string) + if tag.notes: + item.notes = tag.notes.string + tag.notes.extract() + episodeOf = tag.find('episode-of', recursive=False) + if episodeOf: + item.data['episode of'] = parseTags(episodeOf, _topLevel=False, + _as=_as, _infoset2keys=_infoset2keys, + _key2infoset=_key2infoset) + episodeOf.extract() + cRole = tag.find('current-role', recursive=False) + if cRole: + cr = parseTags(cRole, _topLevel=False, _as=_as, + _infoset2keys=_infoset2keys, _key2infoset=_key2infoset) + item.currentRole = cr + cRole.extract() + # XXX: big assumption, here. What about Movie instances used + # as keys in dictionaries? What about other keys (season and + # episode number, for example?) + if not _topLevel: + #tag.extract() + return item + _adder = lambda key, value: item.data.update({key: value}) + elif tagStr: + if tag.notes: + notes = (tag.notes.string or u'').strip() + if notes: + tagStr += u'::%s' % notes + else: + tagStr = _valueWithType(tag, tagStr) + return tagStr + elif firstChild: + firstChildName = tagToKey(firstChild) + if firstChildName in _TAGS_TO_LIST: + item = [] + _adder = lambda key, value: item.append(value) + else: + item = {} + _adder = lambda key, value: item.update({key: value}) + else: + item = {} + _adder = lambda key, value: item.update({name: value}) + for subTag in tag(recursive=False): + subTagKey = tagToKey(subTag) + # Exclude dinamically generated keys. + if tag.name in _MAP_TOP_OBJ and subTagKey in item._additional_keys(): + continue + subItem = parseTags(subTag, _topLevel=False, _as=_as, + _infoset2keys=_infoset2keys, _key2infoset=_key2infoset) + if subItem: + _adder(subTagKey, subItem) + if _topLevel and name in _MAP_TOP_OBJ: + # Add information about 'info sets', but only to the top-level object. + item.infoset2keys = _infoset2keys + item.key2infoset = _key2infoset + item.current_info = _infoset2keys.keys() + return item + + +def parseXML(xml): + """Parse a XML string, returning an appropriate object (usually an + instance of a subclass of _Container.""" + xmlObj = BeautifulSoup.BeautifulStoneSoup(xml, + convertEntities=BeautifulSoup.BeautifulStoneSoup.XHTML_ENTITIES) + if xmlObj: + mainTag = xmlObj.find() + if mainTag: + return parseTags(mainTag) + return None + + diff --git a/libs/imdb/locale/__init__.py b/libs/imdb/locale/__init__.py new file mode 100644 index 0000000..9bc2e46 --- /dev/null +++ b/libs/imdb/locale/__init__.py @@ -0,0 +1,29 @@ +""" +locale package (imdb package). + +This package provides scripts and files for internationalization +of IMDbPY. + +Copyright 2009 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import gettext +import os + +LOCALE_DIR = os.path.dirname(__file__) + +gettext.bindtextdomain('imdbpy', LOCALE_DIR) diff --git a/libs/imdb/locale/generatepot.py b/libs/imdb/locale/generatepot.py new file mode 100644 index 0000000..282f7d4 --- /dev/null +++ b/libs/imdb/locale/generatepot.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +""" +generatepot.py script. + +This script generates the imdbpy.pot file, from the DTD. + +Copyright 2009 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +import sys + +from datetime import datetime as dt + +DEFAULT_MESSAGES = { } + +ELEMENT_PATTERN = r"""\n" +"Language-Team: TEAM NAME \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"Language-Code: en\n" +"Language-Name: English\n" +"Preferred-Encodings: utf-8\n" +"Domain: imdbpy\n" +""" + +if len(sys.argv) != 2: + print "Usage: %s dtd_file" % sys.argv[0] + sys.exit() + +dtdfilename = sys.argv[1] +dtd = open(dtdfilename).read() +elements = re_element.findall(dtd) +uniq = set(elements) +elements = list(uniq) + +print POT_HEADER_TEMPLATE % { + 'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000") +} +for element in sorted(elements): + if element in DEFAULT_MESSAGES: + print '# Default: %s' % DEFAULT_MESSAGES[element] + else: + print '# Default: %s' % element.replace('-', ' ').capitalize() + print 'msgid "%s"' % element + print 'msgstr ""' + # use this part instead of the line above to generate the po file for English + #if element in DEFAULT_MESSAGES: + # print 'msgstr "%s"' % DEFAULT_MESSAGES[element] + #else: + # print 'msgstr "%s"' % element.replace('-', ' ').capitalize() + print + diff --git a/libs/imdb/locale/msgfmt.py b/libs/imdb/locale/msgfmt.py new file mode 100644 index 0000000..9e0ab74 --- /dev/null +++ b/libs/imdb/locale/msgfmt.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python +# -*- coding: iso-8859-1 -*- +"""Generate binary message catalog from textual translation description. + +This program converts a textual Uniforum-style message catalog (.po file) into +a binary GNU catalog (.mo file). This is essentially the same function as the +GNU msgfmt program, however, it is a simpler implementation. + +Usage: msgfmt.py [OPTIONS] filename.po + +Options: + -o file + --output-file=file + Specify the output file to write to. If omitted, output will go to a + file named filename.mo (based off the input file name). + + -h + --help + Print this message and exit. + + -V + --version + Display version information and exit. + +Written by Martin v. Löwis , +refactored / fixed by Thomas Waldmann . +""" + +import sys, os +import getopt, struct, array + +__version__ = "1.3" + +class SyntaxErrorException(Exception): + """raised when having trouble parsing the po file content""" + pass + +class MsgFmt(object): + """transform .po -> .mo format""" + def __init__(self): + self.messages = {} + + def make_filenames(self, filename, outfile=None): + """Compute .mo name from .po name or language""" + if filename.endswith('.po'): + infile = filename + else: + infile = filename + '.po' + if outfile is None: + outfile = os.path.splitext(infile)[0] + '.mo' + return infile, outfile + + def add(self, id, str, fuzzy): + """Add a non-fuzzy translation to the dictionary.""" + if not fuzzy and str: + self.messages[id] = str + + def read_po(self, lines): + ID = 1 + STR = 2 + section = None + fuzzy = False + line_no = 0 + msgid = msgstr = '' + # Parse the catalog + for line in lines: + line_no += 1 + # If we get a comment line after a msgstr, this is a new entry + if line.startswith('#') and section == STR: + self.add(msgid, msgstr, fuzzy) + section = None + fuzzy = False + # Record a fuzzy mark + if line.startswith('#,') and 'fuzzy' in line: + fuzzy = True + # Skip comments + if line.startswith('#'): + continue + # Now we are in a msgid section, output previous section + if line.startswith('msgid'): + if section == STR: + self.add(msgid, msgstr, fuzzy) + fuzzy = False + section = ID + line = line[5:] + msgid = msgstr = '' + # Now we are in a msgstr section + elif line.startswith('msgstr'): + section = STR + line = line[6:] + # Skip empty lines + line = line.strip() + if not line: + continue + # XXX: Does this always follow Python escape semantics? + line = eval(line) + if section == ID: + msgid += line + elif section == STR: + msgstr += line + else: + raise SyntaxErrorException('Syntax error on line %d, before:\n%s' % (line_no, line)) + # Add last entry + if section == STR: + self.add(msgid, msgstr, fuzzy) + + def generate_mo(self): + """Return the generated output.""" + keys = self.messages.keys() + # the keys are sorted in the .mo file + keys.sort() + offsets = [] + ids = '' + strs = '' + for id in keys: + # For each string, we need size and file offset. Each string is NUL + # terminated; the NUL does not count into the size. + offsets.append((len(ids), len(id), len(strs), len(self.messages[id]))) + ids += id + '\0' + strs += self.messages[id] + '\0' + output = [] + # The header is 7 32-bit unsigned integers. We don't use hash tables, so + # the keys start right after the index tables. + # translated string. + keystart = 7*4 + 16*len(keys) + # and the values start after the keys + valuestart = keystart + len(ids) + koffsets = [] + voffsets = [] + # The string table first has the list of keys, then the list of values. + # Each entry has first the size of the string, then the file offset. + for o1, l1, o2, l2 in offsets: + koffsets += [l1, o1 + keystart] + voffsets += [l2, o2 + valuestart] + offsets = koffsets + voffsets + output.append(struct.pack("Iiiiiii", + 0x950412deL, # Magic + 0, # Version + len(keys), # # of entries + 7*4, # start of key index + 7*4 + len(keys)*8, # start of value index + 0, 0)) # size and offset of hash table + output.append(array.array("i", offsets).tostring()) + output.append(ids) + output.append(strs) + return ''.join(output) + + +def make(filename, outfile): + mf = MsgFmt() + infile, outfile = mf.make_filenames(filename, outfile) + try: + lines = file(infile).readlines() + except IOError, msg: + print >> sys.stderr, msg + sys.exit(1) + try: + mf.read_po(lines) + output = mf.generate_mo() + except SyntaxErrorException, msg: + print >> sys.stderr, msg + + try: + open(outfile, "wb").write(output) + except IOError, msg: + print >> sys.stderr, msg + + +def usage(code, msg=''): + print >> sys.stderr, __doc__ + if msg: + print >> sys.stderr, msg + sys.exit(code) + + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'hVo:', ['help', 'version', 'output-file=']) + except getopt.error, msg: + usage(1, msg) + + outfile = None + # parse options + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-V', '--version'): + print >> sys.stderr, "msgfmt.py", __version__ + sys.exit(0) + elif opt in ('-o', '--output-file'): + outfile = arg + # do it + if not args: + print >> sys.stderr, 'No input file given' + print >> sys.stderr, "Try `msgfmt --help' for more information." + return + + for filename in args: + make(filename, outfile) + + +if __name__ == '__main__': + main() + diff --git a/libs/imdb/locale/rebuildmo.py b/libs/imdb/locale/rebuildmo.py new file mode 100644 index 0000000..b72a74c --- /dev/null +++ b/libs/imdb/locale/rebuildmo.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +""" +rebuildmo.py script. + +This script builds the .mo files, from the .po files. + +Copyright 2009 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import glob +import msgfmt +import os + +#LOCALE_DIR = os.path.dirname(__file__) + +def rebuildmo(): + lang_glob = 'imdbpy-*.po' + created = [] + for input_file in glob.glob(lang_glob): + lang = input_file[7:-3] + if not os.path.exists(lang): + os.mkdir(lang) + mo_dir = os.path.join(lang, 'LC_MESSAGES') + if not os.path.exists(mo_dir): + os.mkdir(mo_dir) + output_file = os.path.join(mo_dir, 'imdbpy.mo') + msgfmt.make(input_file, output_file) + created.append(lang) + return created + + +if __name__ == '__main__': + languages = rebuildmo() + print 'Created locale for: %s.' % ' '.join(languages) + diff --git a/libs/imdb/parser/__init__.py b/libs/imdb/parser/__init__.py new file mode 100644 index 0000000..4c3c90a --- /dev/null +++ b/libs/imdb/parser/__init__.py @@ -0,0 +1,28 @@ +""" +parser package (imdb package). + +This package provides various parsers to access IMDb data (e.g.: a +parser for the web/http interface, a parser for the SQL database +interface, etc.). +So far, the http/httpThin, mobile and sql parsers are implemented. + +Copyright 2004-2009 Davide Alberani + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +__all__ = ['http', 'mobile', 'sql'] + + diff --git a/libs/imdb/parser/http/__init__.py b/libs/imdb/parser/http/__init__.py new file mode 100644 index 0000000..ade424f --- /dev/null +++ b/libs/imdb/parser/http/__init__.py @@ -0,0 +1,771 @@ +""" +parser.http package (imdb package). + +This package provides the IMDbHTTPAccessSystem class used to access +IMDb's data through the web interface. +the imdb.IMDb function will return an instance of this class when +called with the 'accessSystem' argument set to "http" or "web" +or "html" (this is the default). + +Copyright 2004-2010 Davide Alberani + 2008 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import sys +import logging +from urllib import FancyURLopener, quote_plus +from codecs import lookup + +from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \ + imdbURL_character_main, imdbURL_company_main, \ + imdbURL_keyword_main, imdbURL_find, imdbURL_top250, \ + imdbURL_bottom100 +from imdb.utils import analyze_title +from imdb._exceptions import IMDbDataAccessError, IMDbParserError + +import searchMovieParser +import searchPersonParser +import searchCharacterParser +import searchCompanyParser +import searchKeywordParser +import movieParser +import personParser +import characterParser +import companyParser +import topBottomParser + +# Logger for miscellaneous functions. +_aux_logger = logging.getLogger('imdbpy.parser.http.aux') + +IN_GAE = False +try: + import google.appengine + IN_GAE = True + _aux_logger.info('IMDbPY is running in the Google App Engine environment') +except ImportError: + pass + + +class _ModuleProxy: + """A proxy to instantiate and access parsers.""" + def __init__(self, module, defaultKeys=None, oldParsers=False, + useModule=None, fallBackToNew=False): + """Initialize a proxy for the given module; defaultKeys, if set, + muste be a dictionary of values to set for instanced objects.""" + if oldParsers or fallBackToNew: + _aux_logger.warn('The old set of parsers was removed; falling ' \ + 'back to the new parsers.') + self.useModule = useModule + if defaultKeys is None: + defaultKeys = {} + self._defaultKeys = defaultKeys + self._module = module + + def __getattr__(self, name): + """Called only when no look-up is found.""" + _sm = self._module + # Read the _OBJECTS dictionary to build the asked parser. + if name in _sm._OBJECTS: + _entry = _sm._OBJECTS[name] + # Initialize the parser. + kwds = {} + if self.useModule: + kwds = {'useModule': self.useModule} + parserClass = _entry[0][0] + obj = parserClass(**kwds) + attrsToSet = self._defaultKeys.copy() + attrsToSet.update(_entry[1] or {}) + # Set attribute to the object. + for key in attrsToSet: + setattr(obj, key, attrsToSet[key]) + setattr(self, name, obj) + return obj + return getattr(_sm, name) + + +PY_VERSION = sys.version_info[:2] + + +# The cookies for the "adult" search. +# Please don't mess with these account. +# Old 'IMDbPY' account. +_old_cookie_id = 'boM2bYxz9MCsOnH9gZ0S9QHs12NWrNdApxsls1Vb5/NGrNdjcHx3dUas10UASoAjVEvhAbGagERgOpNkAPvxdbfKwaV2ikEj9SzXY1WPxABmDKQwdqzwRbM+12NSeJFGUEx3F8as10WwidLzVshDtxaPIbP13NdjVS9UZTYqgTVGrNcT9vyXU1' +_old_cookie_uu = '3M3AXsquTU5Gur/Svik+ewflPm5Rk2ieY3BIPlLjyK3C0Dp9F8UoPgbTyKiGtZp4x1X+uAUGKD7BM2g+dVd8eqEzDErCoYvdcvGLvVLAen1y08hNQtALjVKAe+1hM8g9QbNonlG1/t4S82ieUsBbrSIQbq1yhV6tZ6ArvSbA7rgHc8n5AdReyAmDaJ5Wm/ee3VDoCnGj/LlBs2ieUZNorhHDKK5Q==' +# New 'IMDbPYweb' account. +_cookie_id = 'rH1jNAkjTlNXvHolvBVBsgaPICNZbNdjVjzFwzas9JRmusdjVoqBs/Hs12NR+1WFxEoR9bGKEDUg6sNlADqXwkas12N131Rwdb+UQNGKN8PWrNdjcdqBQVLq8mbGDHP3hqzxhbD692NQi9D0JjpBtRaPIbP1zNdjUOqENQYv1ADWrNcT9vyXU1' +_cookie_uu = 'su4/m8cho4c6HP+W1qgq6wchOmhnF0w+lIWvHjRUPJ6nRA9sccEafjGADJ6hQGrMd4GKqLcz2X4z5+w+M4OIKnRn7FpENH7dxDQu3bQEHyx0ZEyeRFTPHfQEX03XF+yeN1dsPpcXaqjUZAw+lGRfXRQEfz3RIX9IgVEffdBAHw2wQXyf9xdMPrQELw0QNB8dsffsqcdQemjPB0w+moLcPh0JrKrHJ9hjBzdMPpcXTH7XRwwOk=' + + +class _FakeURLOpener(object): + """Fake URLOpener object, used to return empty strings instead of + errors. + """ + def __init__(self, url, headers): + self.url = url + self.headers = headers + def read(self, *args, **kwds): return '' + def close(self, *args, **kwds): pass + def info(self, *args, **kwds): return self.headers + + +class IMDbURLopener(FancyURLopener): + """Fetch web pages and handle errors.""" + _logger = logging.getLogger('imdbpy.parser.http.urlopener') + + def __init__(self, *args, **kwargs): + self._last_url = u'' + FancyURLopener.__init__(self, *args, **kwargs) + # Headers to add to every request. + # XXX: IMDb's web server doesn't like urllib-based programs, + # so lets fake to be Mozilla. + # Wow! I'm shocked by my total lack of ethic! + for header in ('User-Agent', 'User-agent', 'user-agent'): + self.del_header(header) + self.set_header('User-Agent', 'Mozilla/5.0') + # XXX: This class is used also to perform "Exact Primary + # [Title|Name]" searches, and so by default the cookie is set. + c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu) + self.set_header('Cookie', c_header) + + def get_proxy(self): + """Return the used proxy, or an empty string.""" + return self.proxies.get('http', '') + + def set_proxy(self, proxy): + """Set the proxy.""" + if not proxy: + if self.proxies.has_key('http'): + del self.proxies['http'] + else: + if not proxy.lower().startswith('http://'): + proxy = 'http://%s' % proxy + self.proxies['http'] = proxy + + def set_header(self, header, value, _overwrite=True): + """Set a default header.""" + if _overwrite: + self.del_header(header) + self.addheaders.append((header, value)) + + def del_header(self, header): + """Remove a default header.""" + for index in xrange(len(self.addheaders)): + if self.addheaders[index][0] == header: + del self.addheaders[index] + break + + def retrieve_unicode(self, url, size=-1): + """Retrieves the given URL, and returns a unicode string, + trying to guess the encoding of the data (assuming latin_1 + by default)""" + encode = None + try: + if size != -1: + self.set_header('Range', 'bytes=0-%d' % size) + uopener = self.open(url) + kwds = {} + if PY_VERSION > (2, 3) and not IN_GAE: + kwds['size'] = size + content = uopener.read(**kwds) + self._last_url = uopener.url + # Maybe the server is so nice to tell us the charset... + server_encode = uopener.info().getparam('charset') + # Otherwise, look at the content-type HTML meta tag. + if server_encode is None and content: + first_bytes = content[:512] + begin_h = first_bytes.find('text/html; charset=') + if begin_h != -1: + end_h = first_bytes[19+begin_h:].find('"') + if end_h != -1: + server_encode = first_bytes[19+begin_h:19+begin_h+end_h] + if server_encode: + try: + if lookup(server_encode): + encode = server_encode + except (LookupError, ValueError, TypeError): + pass + uopener.close() + if size != -1: + self.del_header('Range') + self.close() + except IOError, e: + if size != -1: + # Ensure that the Range header is removed. + self.del_header('Range') + raise IMDbDataAccessError, {'errcode': e.errno, + 'errmsg': str(e.strerror), + 'url': url, + 'proxy': self.get_proxy(), + 'exception type': 'IOError', + 'original exception': e} + if encode is None: + encode = 'latin_1' + # The detection of the encoding is error prone... + self._logger.warn('Unable to detect the encoding of the retrieved ' + 'page [%s]; falling back to default latin1.', encode) + ##print unicode(content, encode, 'replace').encode('utf8') + return unicode(content, encode, 'replace') + + def http_error_default(self, url, fp, errcode, errmsg, headers): + if errcode == 404: + self._logger.warn('404 code returned for %s: %s (headers: %s)', + url, errmsg, headers) + return _FakeURLOpener(url, headers) + raise IMDbDataAccessError, {'url': 'http:%s' % url, + 'errcode': errcode, + 'errmsg': errmsg, + 'headers': headers, + 'error type': 'http_error_default', + 'proxy': self.get_proxy()} + + def open_unknown(self, fullurl, data=None): + raise IMDbDataAccessError, {'fullurl': fullurl, + 'data': str(data), + 'error type': 'open_unknown', + 'proxy': self.get_proxy()} + + def open_unknown_proxy(self, proxy, fullurl, data=None): + raise IMDbDataAccessError, {'proxy': str(proxy), + 'fullurl': fullurl, + 'error type': 'open_unknown_proxy', + 'data': str(data)} + + +class IMDbHTTPAccessSystem(IMDbBase): + """The class used to access IMDb's data through the web.""" + + accessSystem = 'http' + _http_logger = logging.getLogger('imdbpy.parser.http') + + def __init__(self, isThin=0, adultSearch=1, proxy=-1, oldParsers=False, + fallBackToNew=False, useModule=None, cookie_id=-1, + cookie_uu=None, *arguments, **keywords): + """Initialize the access system.""" + IMDbBase.__init__(self, *arguments, **keywords) + self.urlOpener = IMDbURLopener() + # When isThin is set, we're parsing the "maindetails" page + # of a movie (instead of the "combined" page) and movie/person + # references are not collected if no defaultModFunct is provided. + self.isThin = isThin + self._getRefs = True + self._mdparse = False + if isThin: + if self.accessSystem == 'http': + self.accessSystem = 'httpThin' + self._mdparse = True + if self._defModFunct is None: + self._getRefs = False + from imdb.utils import modNull + self._defModFunct = modNull + self.do_adult_search(adultSearch) + if cookie_id != -1: + if cookie_id is None: + self.del_cookies() + elif cookie_uu is not None: + self.set_cookies(cookie_id, cookie_uu) + if proxy != -1: + self.set_proxy(proxy) + if useModule is not None: + if not isinstance(useModule, (list, tuple)) and ',' in useModule: + useModule = useModule.split(',') + _def = {'_modFunct': self._defModFunct, '_as': self.accessSystem} + # Proxy objects. + self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.scProxy = _ModuleProxy(searchCharacterParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.pProxy = _ModuleProxy(personParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.cProxy = _ModuleProxy(characterParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def, + oldParsers=oldParsers, useModule=useModule, + fallBackToNew=fallBackToNew) + + def _normalize_movieID(self, movieID): + """Normalize the given movieID.""" + try: + return '%07d' % int(movieID) + except ValueError, e: + raise IMDbParserError, 'invalid movieID "%s": %s' % (movieID, e) + + def _normalize_personID(self, personID): + """Normalize the given personID.""" + try: + return '%07d' % int(personID) + except ValueError, e: + raise IMDbParserError, 'invalid personID "%s": %s' % (personID, e) + + def _normalize_characterID(self, characterID): + """Normalize the given characterID.""" + try: + return '%07d' % int(characterID) + except ValueError, e: + raise IMDbParserError, 'invalid characterID "%s": %s' % \ + (characterID, e) + + def _normalize_companyID(self, companyID): + """Normalize the given companyID.""" + try: + return '%07d' % int(companyID) + except ValueError, e: + raise IMDbParserError, 'invalid companyID "%s": %s' % \ + (companyID, e) + + def get_imdbMovieID(self, movieID): + """Translate a movieID in an imdbID; in this implementation + the movieID _is_ the imdbID. + """ + return movieID + + def get_imdbPersonID(self, personID): + """Translate a personID in an imdbID; in this implementation + the personID _is_ the imdbID. + """ + return personID + + def get_imdbCharacterID(self, characterID): + """Translate a characterID in an imdbID; in this implementation + the characterID _is_ the imdbID. + """ + return characterID + + def get_imdbCompanyID(self, companyID): + """Translate a companyID in an imdbID; in this implementation + the companyID _is_ the imdbID. + """ + return companyID + + def get_proxy(self): + """Return the used proxy or an empty string.""" + return self.urlOpener.get_proxy() + + def set_proxy(self, proxy): + """Set the web proxy to use. + + It should be a string like 'http://localhost:8080/'; if the + string is empty, no proxy will be used. + If set, the value of the environment variable HTTP_PROXY is + automatically used. + """ + self.urlOpener.set_proxy(proxy) + + def set_cookies(self, cookie_id, cookie_uu): + """Set a cookie to access an IMDb's account.""" + c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu) + self.urlOpener.set_header('Cookie', c_header) + + def del_cookies(self): + """Remove the used cookie.""" + self.urlOpener.del_header('Cookie') + + def do_adult_search(self, doAdult, + cookie_id=_cookie_id, cookie_uu=_cookie_uu): + """If doAdult is true, 'adult' movies are included in the + search results; cookie_id and cookie_uu are optional + parameters to select a specific account (see your cookie + or cookies.txt file.""" + if doAdult: + self.set_cookies(cookie_id, cookie_uu) + #c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu) + #self.urlOpener.set_header('Cookie', c_header) + else: + self.urlOpener.del_header('Cookie') + + def _retrieve(self, url, size=-1): + """Retrieve the given URL.""" + ##print url + self._http_logger.debug('fetching url %s (size: %d)', url, size) + return self.urlOpener.retrieve_unicode(url, size=size) + + def _get_search_content(self, kind, ton, results): + """Retrieve the web page for a given search. + kind can be 'tt' (for titles), 'nm' (for names), + 'char' (for characters) or 'co' (for companies). + ton is the title or the name to search. + results is the maximum number of results to be retrieved.""" + if isinstance(ton, unicode): + ton = ton.encode('utf-8') + ##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results)) + params = 's=%s;mx=%s;q=%s' % (kind, str(results), quote_plus(ton)) + if kind == 'ep': + params = params.replace('s=ep;', 's=tt;ttype=ep;', 1) + cont = self._retrieve(imdbURL_find % params) + #print 'URL:', imdbURL_find % params + if cont.find('Your search returned more than') == -1 or \ + cont.find("displayed the exact matches") == -1: + return cont + # The retrieved page contains no results, because too many + # titles or names contain the string we're looking for. + params = 's=%s;q=%s;lm=0' % (kind, quote_plus(ton)) + size = 22528 + results * 512 + return self._retrieve(imdbURL_find % params, size=size) + + def _search_movie(self, title, results): + # The URL of the query. + # XXX: To retrieve the complete results list: + # params = urllib.urlencode({'more': 'tt', 'q': title}) + ##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title}) + ##params = 'q=%s&tt=on&mx=%s' % (quote_plus(title), str(results)) + ##cont = self._retrieve(imdbURL_find % params) + cont = self._get_search_content('tt', title, results) + return self.smProxy.search_movie_parser.parse(cont, results=results)['data'] + + def _search_episode(self, title, results): + t_dict = analyze_title(title) + if t_dict['kind'] == 'episode': + title = t_dict['title'] + cont = self._get_search_content('ep', title, results) + return self.smProxy.search_movie_parser.parse(cont, results=results)['data'] + + def get_movie_main(self, movieID): + if not self.isThin: + cont = self._retrieve(imdbURL_movie_main % movieID + 'combined') + else: + cont = self._retrieve(imdbURL_movie_main % movieID + 'maindetails') + return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse) + + def get_movie_full_credits(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'fullcredits') + return self.mProxy.movie_parser.parse(cont) + + def get_movie_plot(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'plotsummary') + return self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_awards(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'awards') + return self.mProxy.movie_awards_parser.parse(cont) + + def get_movie_taglines(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'taglines') + return self.mProxy.taglines_parser.parse(cont) + + def get_movie_keywords(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'keywords') + return self.mProxy.keywords_parser.parse(cont) + + def get_movie_alternate_versions(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'alternateversions') + return self.mProxy.alternateversions_parser.parse(cont, + getRefs=self._getRefs) + + def get_movie_crazy_credits(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'crazycredits') + return self.mProxy.crazycredits_parser.parse(cont, + getRefs=self._getRefs) + + def get_movie_goofs(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'goofs') + return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_quotes(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'quotes') + return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_release_dates(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'releaseinfo') + ret = self.mProxy.releasedates_parser.parse(cont) + ret['info sets'] = ('release dates', 'akas') + return ret + get_movie_akas = get_movie_release_dates + + def get_movie_vote_details(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'ratings') + return self.mProxy.ratings_parser.parse(cont) + + def get_movie_official_sites(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'officialsites') + return self.mProxy.officialsites_parser.parse(cont) + + def get_movie_trivia(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'trivia') + return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_connections(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'movieconnections') + return self.mProxy.connections_parser.parse(cont) + + def get_movie_technical(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'technical') + return self.mProxy.tech_parser.parse(cont) + + def get_movie_business(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'business') + return self.mProxy.business_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_literature(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'literature') + return self.mProxy.literature_parser.parse(cont) + + def get_movie_locations(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'locations') + return self.mProxy.locations_parser.parse(cont) + + def get_movie_soundtrack(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'soundtrack') + return self.mProxy.soundtrack_parser.parse(cont) + + def get_movie_dvd(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'dvd') + return self.mProxy.dvd_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_recommendations(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'recommendations') + return self.mProxy.rec_parser.parse(cont) + + def get_movie_external_reviews(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'externalreviews') + return self.mProxy.externalrev_parser.parse(cont) + + def get_movie_newsgroup_reviews(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'newsgroupreviews') + return self.mProxy.newsgrouprev_parser.parse(cont) + + def get_movie_misc_sites(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'miscsites') + return self.mProxy.misclinks_parser.parse(cont) + + def get_movie_sound_clips(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'soundsites') + return self.mProxy.soundclips_parser.parse(cont) + + def get_movie_video_clips(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'videosites') + return self.mProxy.videoclips_parser.parse(cont) + + def get_movie_photo_sites(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'photosites') + return self.mProxy.photosites_parser.parse(cont) + + def get_movie_news(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'news') + return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_amazon_reviews(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'amazon') + return self.mProxy.amazonrev_parser.parse(cont) + + def get_movie_guests(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'epcast') + return self.mProxy.episodes_cast_parser.parse(cont) + get_movie_episodes_cast = get_movie_guests + + def get_movie_merchandising_links(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'sales') + return self.mProxy.sales_parser.parse(cont) + + def get_movie_episodes(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'episodes') + data_d = self.mProxy.episodes_parser.parse(cont) + # set movie['episode of'].movieID for every episode of the series. + if data_d.get('data', {}).has_key('episodes'): + nr_eps = 0 + for season in data_d['data']['episodes'].values(): + for episode in season.values(): + episode['episode of'].movieID = movieID + nr_eps += 1 + # Number of episodes. + if nr_eps: + data_d['data']['number of episodes'] = nr_eps + return data_d + + def get_movie_episodes_rating(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'epdate') + data_d = self.mProxy.eprating_parser.parse(cont) + # set movie['episode of'].movieID for every episode. + if data_d.get('data', {}).has_key('episodes rating'): + for item in data_d['data']['episodes rating']: + episode = item['episode'] + episode['episode of'].movieID = movieID + return data_d + + def get_movie_faqs(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'faq') + return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs) + + def get_movie_airing(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'tvschedule') + return self.mProxy.airing_parser.parse(cont) + + get_movie_tv_schedule = get_movie_airing + + def get_movie_synopsis(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'synopsis') + return self.mProxy.synopsis_parser.parse(cont) + + def get_movie_parents_guide(self, movieID): + cont = self._retrieve(imdbURL_movie_main % movieID + 'parentalguide') + return self.mProxy.parentsguide_parser.parse(cont) + + def _search_person(self, name, results): + # The URL of the query. + # XXX: To retrieve the complete results list: + # params = urllib.urlencode({'more': 'nm', 'q': name}) + ##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name}) + #params = 'q=%s&nm=on&mx=%s' % (quote_plus(name), str(results)) + #cont = self._retrieve(imdbURL_find % params) + cont = self._get_search_content('nm', name, results) + return self.spProxy.search_person_parser.parse(cont, results=results)['data'] + + def get_person_main(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'maindetails') + ret = self.pProxy.maindetails_parser.parse(cont) + ret['info sets'] = ('main', 'filmography') + return ret + + def get_person_filmography(self, personID): + return self.get_person_main(personID) + + def get_person_biography(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'bio') + return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs) + + def get_person_awards(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'awards') + return self.pProxy.person_awards_parser.parse(cont) + + def get_person_other_works(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'otherworks') + return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs) + + #def get_person_agent(self, personID): + # cont = self._retrieve(imdbURL_person_main % personID + 'agent') + # return self.pProxy.agent_parser.parse(cont) + + def get_person_publicity(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'publicity') + return self.pProxy.publicity_parser.parse(cont) + + def get_person_official_sites(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'officialsites') + return self.pProxy.person_officialsites_parser.parse(cont) + + def get_person_news(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'news') + return self.pProxy.news_parser.parse(cont) + + def get_person_episodes(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'filmoseries') + return self.pProxy.person_series_parser.parse(cont) + + def get_person_merchandising_links(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'forsale') + return self.pProxy.sales_parser.parse(cont) + + def get_person_genres_links(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'filmogenre') + return self.pProxy.person_genres_parser.parse(cont) + + def get_person_keywords_links(self, personID): + cont = self._retrieve(imdbURL_person_main % personID + 'filmokey') + return self.pProxy.person_keywords_parser.parse(cont) + + def _search_character(self, name, results): + cont = self._get_search_content('char', name, results) + return self.scProxy.search_character_parser.parse(cont, results=results)['data'] + + def get_character_main(self, characterID): + cont = self._retrieve(imdbURL_character_main % characterID) + ret = self.cProxy.character_main_parser.parse(cont) + ret['info sets'] = ('main', 'filmography') + return ret + + get_character_filmography = get_character_main + + def get_character_biography(self, characterID): + cont = self._retrieve(imdbURL_character_main % characterID + 'bio') + return self.cProxy.character_bio_parser.parse(cont, + getRefs=self._getRefs) + + def get_character_episodes(self, characterID): + cont = self._retrieve(imdbURL_character_main % characterID + + 'filmoseries') + return self.cProxy.character_series_parser.parse(cont) + + def get_character_quotes(self, characterID): + cont = self._retrieve(imdbURL_character_main % characterID + 'quotes') + return self.cProxy.character_quotes_parser.parse(cont, + getRefs=self._getRefs) + + def _search_company(self, name, results): + cont = self._get_search_content('co', name, results) + url = self.urlOpener._last_url + return self.scompProxy.search_company_parser.parse(cont, url=url, + results=results)['data'] + + def get_company_main(self, companyID): + cont = self._retrieve(imdbURL_company_main % companyID) + ret = self.compProxy.company_main_parser.parse(cont) + return ret + + def _search_keyword(self, keyword, results): + # XXX: the IMDb web server seems to have some serious problem with + # non-ascii keyword. + # E.g.: http://akas.imdb.com/keyword/fianc%E9/ + # will return a 500 Internal Server Error: Redirect Recursion. + keyword = keyword.encode('utf8', 'ignore') + try: + cont = self._get_search_content('kw', keyword, results) + except IMDbDataAccessError: + self._http_logger.warn('unable to search for keyword %s', keyword, + exc_info=True) + return [] + return self.skProxy.search_keyword_parser.parse(cont, results=results)['data'] + + def _get_keyword(self, keyword, results): + keyword = keyword.encode('utf8', 'ignore') + try: + cont = self._retrieve(imdbURL_keyword_main % keyword) + except IMDbDataAccessError: + self._http_logger.warn('unable to get keyword %s', keyword, + exc_info=True) + return [] + return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data'] + + def _get_top_bottom_movies(self, kind): + if kind == 'top': + parser = self.topBottomProxy.top250_parser + url = imdbURL_top250 + elif kind == 'bottom': + parser = self.topBottomProxy.bottom100_parser + url = imdbURL_bottom100 + else: + return [] + cont = self._retrieve(url) + return parser.parse(cont)['data'] + + diff --git a/libs/imdb/parser/http/bsouplxml/__init__.py b/libs/imdb/parser/http/bsouplxml/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/libs/imdb/parser/http/bsouplxml/_bsoup.py b/libs/imdb/parser/http/bsouplxml/_bsoup.py new file mode 100644 index 0000000..afab5da --- /dev/null +++ b/libs/imdb/parser/http/bsouplxml/_bsoup.py @@ -0,0 +1,1970 @@ +""" +imdb.parser.http._bsoup module (imdb.parser.http package). +This is the BeautifulSoup.py module, not modified; it's included here +so that it's not an external dependency. + +Beautiful Soup +Elixir and Tonic +"The Screen-Scraper's Friend" +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup parses a (possibly invalid) XML or HTML document into a +tree representation. It provides methods and Pythonic idioms that make +it easy to navigate, search, and modify the tree. + +A well-formed XML/HTML document yields a well-formed data +structure. An ill-formed XML/HTML document yields a correspondingly +ill-formed data structure. If your document is only locally +well-formed, you can use this library to find and process the +well-formed part of it. + +Beautiful Soup works with Python 2.2 and up. It has no external +dependencies, but you'll have more success at converting data to UTF-8 +if you also install these three packages: + +* chardet, for auto-detecting character encodings + http://chardet.feedparser.org/ +* cjkcodecs and iconv_codec, which add more encodings to the ones supported + by stock Python. + http://cjkpython.i18n.org/ + +Beautiful Soup defines classes for two main parsing strategies: + + * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific + language that kind of looks like XML. + + * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid + or invalid. This class has web browser-like heuristics for + obtaining a sensible parse tree in the face of common HTML errors. + +Beautiful Soup also defines a class (UnicodeDammit) for autodetecting +the encoding of an HTML or XML document, and converting it to +Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. + +For more than you ever wanted to know about Beautiful Soup, see the +documentation: +http://www.crummy.com/software/BeautifulSoup/documentation.html + +Here, have some legalese: + +Copyright (c) 2004-2008, Leonard Richardson + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the the Beautiful Soup Consortium and All + Night Kosher Bakery nor the names of its contributors may be + used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. + +""" +from __future__ import generators + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "3.0.7a" +__copyright__ = "Copyright (c) 2004-2008 Leonard Richardson" +__license__ = "New-style BSD" + +from sgmllib import SGMLParser, SGMLParseError +import codecs +import markupbase +import types +import re +import sgmllib +try: + from htmlentitydefs import name2codepoint +except ImportError: + name2codepoint = {} +try: + set +except NameError: + from sets import Set as set + +#These hacks make Beautiful Soup able to parse XML with namespaces +sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') +markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match + +DEFAULT_OUTPUT_ENCODING = "utf-8" + +# First, the classes that represent markup elements. + +class PageElement: + """Contains the navigational information for some part of the page + (either a tag or a piece of text)""" + + def setup(self, parent=None, previous=None): + """Sets up the initial relations between this element and + other elements.""" + self.parent = parent + self.previous = previous + self.next = None + self.previousSibling = None + self.nextSibling = None + if self.parent and self.parent.contents: + self.previousSibling = self.parent.contents[-1] + self.previousSibling.nextSibling = self + + def replaceWith(self, replaceWith): + oldParent = self.parent + myIndex = self.parent.contents.index(self) + if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: + # We're replacing this element with one of its siblings. + index = self.parent.contents.index(replaceWith) + if index and index < myIndex: + # Furthermore, it comes before this element. That + # means that when we extract it, the index of this + # element will change. + myIndex = myIndex - 1 + self.extract() + oldParent.insert(myIndex, replaceWith) + + def extract(self): + """Destructively rips this element out of the tree.""" + if self.parent: + try: + self.parent.contents.remove(self) + except ValueError: + pass + + #Find the two elements that would be next to each other if + #this element (and any children) hadn't been parsed. Connect + #the two. + lastChild = self._lastRecursiveChild() + nextElement = lastChild.next + + if self.previous: + self.previous.next = nextElement + if nextElement: + nextElement.previous = self.previous + self.previous = None + lastChild.next = None + + self.parent = None + if self.previousSibling: + self.previousSibling.nextSibling = self.nextSibling + if self.nextSibling: + self.nextSibling.previousSibling = self.previousSibling + self.previousSibling = self.nextSibling = None + return self + + def _lastRecursiveChild(self): + "Finds the last element beneath this object to be parsed." + lastChild = self + while hasattr(lastChild, 'contents') and lastChild.contents: + lastChild = lastChild.contents[-1] + return lastChild + + def insert(self, position, newChild): + if (isinstance(newChild, basestring) + or isinstance(newChild, unicode)) \ + and not isinstance(newChild, NavigableString): + newChild = NavigableString(newChild) + + position = min(position, len(self.contents)) + if hasattr(newChild, 'parent') and newChild.parent != None: + # We're 'inserting' an element that's already one + # of this object's children. + if newChild.parent == self: + index = self.find(newChild) + if index and index < position: + # Furthermore we're moving it further down the + # list of this object's children. That means that + # when we extract this element, our target index + # will jump down one. + position = position - 1 + newChild.extract() + + newChild.parent = self + previousChild = None + if position == 0: + newChild.previousSibling = None + newChild.previous = self + else: + previousChild = self.contents[position-1] + newChild.previousSibling = previousChild + newChild.previousSibling.nextSibling = newChild + newChild.previous = previousChild._lastRecursiveChild() + if newChild.previous: + newChild.previous.next = newChild + + newChildsLastElement = newChild._lastRecursiveChild() + + if position >= len(self.contents): + newChild.nextSibling = None + + parent = self + parentsNextSibling = None + while not parentsNextSibling: + parentsNextSibling = parent.nextSibling + parent = parent.parent + if not parent: # This is the last element in the document. + break + if parentsNextSibling: + newChildsLastElement.next = parentsNextSibling + else: + newChildsLastElement.next = None + else: + nextChild = self.contents[position] + newChild.nextSibling = nextChild + if newChild.nextSibling: + newChild.nextSibling.previousSibling = newChild + newChildsLastElement.next = nextChild + + if newChildsLastElement.next: + newChildsLastElement.next.previous = newChildsLastElement + self.contents.insert(position, newChild) + + def append(self, tag): + """Appends the given tag to the contents of this tag.""" + self.insert(len(self.contents), tag) + + def findNext(self, name=None, attrs={}, text=None, **kwargs): + """Returns the first item that matches the given criteria and + appears after this Tag in the document.""" + return self._findOne(self.findAllNext, name, attrs, text, **kwargs) + + def findAllNext(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns all items that match the given criteria and appear + after this Tag in the document.""" + return self._findAll(name, attrs, text, limit, self.nextGenerator, + **kwargs) + + def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): + """Returns the closest sibling to this Tag that matches the + given criteria and appears after this Tag in the document.""" + return self._findOne(self.findNextSiblings, name, attrs, text, + **kwargs) + + def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns the siblings of this Tag that match the given + criteria and appear after this Tag in the document.""" + return self._findAll(name, attrs, text, limit, + self.nextSiblingGenerator, **kwargs) + fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x + + def findPrevious(self, name=None, attrs={}, text=None, **kwargs): + """Returns the first item that matches the given criteria and + appears before this Tag in the document.""" + return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) + + def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns all items that match the given criteria and appear + before this Tag in the document.""" + return self._findAll(name, attrs, text, limit, self.previousGenerator, + **kwargs) + fetchPrevious = findAllPrevious # Compatibility with pre-3.x + + def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): + """Returns the closest sibling to this Tag that matches the + given criteria and appears before this Tag in the document.""" + return self._findOne(self.findPreviousSiblings, name, attrs, text, + **kwargs) + + def findPreviousSiblings(self, name=None, attrs={}, text=None, + limit=None, **kwargs): + """Returns the siblings of this Tag that match the given + criteria and appear before this Tag in the document.""" + return self._findAll(name, attrs, text, limit, + self.previousSiblingGenerator, **kwargs) + fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x + + def findParent(self, name=None, attrs={}, **kwargs): + """Returns the closest parent of this Tag that matches the given + criteria.""" + # NOTE: We can't use _findOne because findParents takes a different + # set of arguments. + r = None + l = self.findParents(name, attrs, 1) + if l: + r = l[0] + return r + + def findParents(self, name=None, attrs={}, limit=None, **kwargs): + """Returns the parents of this Tag that match the given + criteria.""" + + return self._findAll(name, attrs, None, limit, self.parentGenerator, + **kwargs) + fetchParents = findParents # Compatibility with pre-3.x + + #These methods do the real heavy lifting. + + def _findOne(self, method, name, attrs, text, **kwargs): + r = None + l = method(name, attrs, text, 1, **kwargs) + if l: + r = l[0] + return r + + def _findAll(self, name, attrs, text, limit, generator, **kwargs): + "Iterates over a generator looking for things that match." + + if isinstance(name, SoupStrainer): + strainer = name + else: + # Build a SoupStrainer + strainer = SoupStrainer(name, attrs, text, **kwargs) + results = ResultSet(strainer) + g = generator() + while True: + try: + i = g.next() + except StopIteration: + break + if i: + found = strainer.search(i) + if found: + results.append(found) + if limit and len(results) >= limit: + break + return results + + #These Generators can be used to navigate starting from both + #NavigableStrings and Tags. + def nextGenerator(self): + i = self + while i: + i = i.next + yield i + + def nextSiblingGenerator(self): + i = self + while i: + i = i.nextSibling + yield i + + def previousGenerator(self): + i = self + while i: + i = i.previous + yield i + + def previousSiblingGenerator(self): + i = self + while i: + i = i.previousSibling + yield i + + def parentGenerator(self): + i = self + while i: + i = i.parent + yield i + + # Utility methods + def substituteEncoding(self, str, encoding=None): + encoding = encoding or "utf-8" + return str.replace("%SOUP-ENCODING%", encoding) + + def toEncoding(self, s, encoding=None): + """Encodes an object to a string in some encoding, or to Unicode. + .""" + if isinstance(s, unicode): + if encoding: + s = s.encode(encoding) + elif isinstance(s, str): + if encoding: + s = s.encode(encoding) + else: + s = unicode(s) + else: + if encoding: + s = self.toEncoding(str(s), encoding) + else: + s = unicode(s) + return s + +class NavigableString(unicode, PageElement): + + def __new__(cls, value): + """Create a new NavigableString. + + When unpickling a NavigableString, this method is called with + the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be + passed in to the superclass's __new__ or the superclass won't know + how to handle non-ASCII characters. + """ + if isinstance(value, unicode): + return unicode.__new__(cls, value) + return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) + + def __getnewargs__(self): + return (NavigableString.__str__(self),) + + def __getattr__(self, attr): + """text.string gives you text. This is for backwards + compatibility for Navigable*String, but for CData* it lets you + get the string without the CData wrapper.""" + if attr == 'string': + return self + else: + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) + + def __unicode__(self): + return str(self).decode(DEFAULT_OUTPUT_ENCODING) + + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): + if encoding: + return self.encode(encoding) + else: + return self + +class CData(NavigableString): + + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): + return "" % NavigableString.__str__(self, encoding) + +class ProcessingInstruction(NavigableString): + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): + output = self + if "%SOUP-ENCODING%" in output: + output = self.substituteEncoding(output, encoding) + return "" % self.toEncoding(output, encoding) + +class Comment(NavigableString): + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): + return "" % NavigableString.__str__(self, encoding) + +class Declaration(NavigableString): + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): + return "" % NavigableString.__str__(self, encoding) + +class Tag(PageElement): + + """Represents a found HTML tag with its attributes and contents.""" + + def _invert(h): + "Cheap function to invert a hash." + i = {} + for k,v in h.items(): + i[v] = k + return i + + XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", + "quot" : '"', + "amp" : "&", + "lt" : "<", + "gt" : ">" } + + XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) + + def _convertEntities(self, match): + """Used in a call to re.sub to replace HTML, XML, and numeric + entities with the appropriate Unicode characters. If HTML + entities are being converted, any unrecognized entities are + escaped.""" + x = match.group(1) + if self.convertHTMLEntities and x in name2codepoint: + return unichr(name2codepoint[x]) + elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: + if self.convertXMLEntities: + return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] + else: + return u'&%s;' % x + elif len(x) > 0 and x[0] == '#': + # Handle numeric entities + if len(x) > 1 and x[1] == 'x': + return unichr(int(x[2:], 16)) + else: + return unichr(int(x[1:])) + + elif self.escapeUnrecognizedEntities: + return u'&%s;' % x + else: + return u'&%s;' % x + + def __init__(self, parser, name, attrs=None, parent=None, + previous=None): + "Basic constructor." + + # We don't actually store the parser object: that lets extracted + # chunks be garbage-collected + self.parserClass = parser.__class__ + self.isSelfClosing = parser.isSelfClosingTag(name) + self.name = name + if attrs == None: + attrs = [] + self.attrs = attrs + self.contents = [] + self.setup(parent, previous) + self.hidden = False + self.containsSubstitutions = False + self.convertHTMLEntities = parser.convertHTMLEntities + self.convertXMLEntities = parser.convertXMLEntities + self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities + + # Convert any HTML, XML, or numeric entities in the attribute values. + convert = lambda(k, val): (k, + re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", + self._convertEntities, + val)) + self.attrs = map(convert, self.attrs) + + def get(self, key, default=None): + """Returns the value of the 'key' attribute for the tag, or + the value given for 'default' if it doesn't have that + attribute.""" + return self._getAttrMap().get(key, default) + + def has_key(self, key): + return self._getAttrMap().has_key(key) + + def __getitem__(self, key): + """tag[key] returns the value of the 'key' attribute for the tag, + and throws an exception if it's not there.""" + return self._getAttrMap()[key] + + def __iter__(self): + "Iterating over a tag iterates over its contents." + return iter(self.contents) + + def __len__(self): + "The length of a tag is the length of its list of contents." + return len(self.contents) + + def __contains__(self, x): + return x in self.contents + + def __nonzero__(self): + "A tag is non-None even if it has no contents." + return True + + def __setitem__(self, key, value): + """Setting tag[key] sets the value of the 'key' attribute for the + tag.""" + self._getAttrMap() + self.attrMap[key] = value + found = False + for i in range(0, len(self.attrs)): + if self.attrs[i][0] == key: + self.attrs[i] = (key, value) + found = True + if not found: + self.attrs.append((key, value)) + self._getAttrMap()[key] = value + + def __delitem__(self, key): + "Deleting tag[key] deletes all 'key' attributes for the tag." + for item in self.attrs: + if item[0] == key: + self.attrs.remove(item) + #We don't break because bad HTML can define the same + #attribute multiple times. + self._getAttrMap() + if self.attrMap.has_key(key): + del self.attrMap[key] + + def __call__(self, *args, **kwargs): + """Calling a tag like a function is the same as calling its + findAll() method. Eg. tag('a') returns a list of all the A tags + found within this tag.""" + return apply(self.findAll, args, kwargs) + + def __getattr__(self, tag): + #print "Getattr %s.%s" % (self.__class__, tag) + if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: + return self.find(tag[:-3]) + elif tag.find('__') != 0: + return self.find(tag) + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) + + def __eq__(self, other): + """Returns true iff this tag has the same name, the same attributes, + and the same contents (recursively) as the given tag. + + NOTE: right now this will return false if two tags have the + same attributes in a different order. Should this be fixed?""" + if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): + return False + for i in range(0, len(self.contents)): + if self.contents[i] != other.contents[i]: + return False + return True + + def __ne__(self, other): + """Returns true iff this tag is not identical to the other tag, + as defined in __eq__.""" + return not self == other + + def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): + """Renders this tag as a string.""" + return self.__str__(encoding) + + def __unicode__(self): + return self.__str__(None) + + BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + + ")") + + def _sub_entity(self, x): + """Used with a regular expression to substitute the + appropriate XML entity for an XML special character.""" + return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" + + def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, + prettyPrint=False, indentLevel=0): + """Returns a string or Unicode representation of this tag and + its contents. To get Unicode, pass None for encoding. + + NOTE: since Python's HTML parser consumes whitespace, this + method is not certain to reproduce the whitespace present in + the original string.""" + + encodedName = self.toEncoding(self.name, encoding) + + attrs = [] + if self.attrs: + for key, val in self.attrs: + fmt = '%s="%s"' + if isString(val): + if self.containsSubstitutions and '%SOUP-ENCODING%' in val: + val = self.substituteEncoding(val, encoding) + + # The attribute value either: + # + # * Contains no embedded double quotes or single quotes. + # No problem: we enclose it in double quotes. + # * Contains embedded single quotes. No problem: + # double quotes work here too. + # * Contains embedded double quotes. No problem: + # we enclose it in single quotes. + # * Embeds both single _and_ double quotes. This + # can't happen naturally, but it can happen if + # you modify an attribute value after parsing + # the document. Now we have a bit of a + # problem. We solve it by enclosing the + # attribute in single quotes, and escaping any + # embedded single quotes to XML entities. + if '"' in val: + fmt = "%s='%s'" + if "'" in val: + # TODO: replace with apos when + # appropriate. + val = val.replace("'", "&squot;") + + # Now we're okay w/r/t quotes. But the attribute + # value might also contain angle brackets, or + # ampersands that aren't part of entities. We need + # to escape those to XML entities too. + val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) + + attrs.append(fmt % (self.toEncoding(key, encoding), + self.toEncoding(val, encoding))) + close = '' + closeTag = '' + if self.isSelfClosing: + close = ' /' + else: + closeTag = '' % encodedName + + indentTag, indentContents = 0, 0 + if prettyPrint: + indentTag = indentLevel + space = (' ' * (indentTag-1)) + indentContents = indentTag + 1 + contents = self.renderContents(encoding, prettyPrint, indentContents) + if self.hidden: + s = contents + else: + s = [] + attributeString = '' + if attrs: + attributeString = ' ' + ' '.join(attrs) + if prettyPrint: + s.append(space) + s.append('<%s%s%s>' % (encodedName, attributeString, close)) + if prettyPrint: + s.append("\n") + s.append(contents) + if prettyPrint and contents and contents[-1] != "\n": + s.append("\n") + if prettyPrint and closeTag: + s.append(space) + s.append(closeTag) + if prettyPrint and closeTag and self.nextSibling: + s.append("\n") + s = ''.join(s) + return s + + def decompose(self): + """Recursively destroys the contents of this tree.""" + contents = [i for i in self.contents] + for i in contents: + if isinstance(i, Tag): + i.decompose() + else: + i.extract() + self.extract() + + def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): + return self.__str__(encoding, True) + + def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, + prettyPrint=False, indentLevel=0): + """Renders the contents of this tag as a string in the given + encoding. If encoding is None, returns a Unicode string..""" + s=[] + for c in self: + text = None + if isinstance(c, NavigableString): + text = c.__str__(encoding) + elif isinstance(c, Tag): + s.append(c.__str__(encoding, prettyPrint, indentLevel)) + if text and prettyPrint: + text = text.strip() + if text: + if prettyPrint: + s.append(" " * (indentLevel-1)) + s.append(text) + if prettyPrint: + s.append("\n") + return ''.join(s) + + #Soup methods + + def find(self, name=None, attrs={}, recursive=True, text=None, + **kwargs): + """Return only the first child of this Tag matching the given + criteria.""" + r = None + l = self.findAll(name, attrs, recursive, text, 1, **kwargs) + if l: + r = l[0] + return r + findChild = find + + def findAll(self, name=None, attrs={}, recursive=True, text=None, + limit=None, **kwargs): + """Extracts a list of Tag objects that match the given + criteria. You can specify the name of the Tag and any + attributes you want the Tag to have. + + The value of a key-value pair in the 'attrs' map can be a + string, a list of strings, a regular expression object, or a + callable that takes a string and returns whether or not the + string matches for some custom definition of 'matches'. The + same is true of the tag name.""" + generator = self.recursiveChildGenerator + if not recursive: + generator = self.childGenerator + return self._findAll(name, attrs, text, limit, generator, **kwargs) + findChildren = findAll + + # Pre-3.x compatibility methods + first = find + fetch = findAll + + def fetchText(self, text=None, recursive=True, limit=None): + return self.findAll(text=text, recursive=recursive, limit=limit) + + def firstText(self, text=None, recursive=True): + return self.find(text=text, recursive=recursive) + + #Private methods + + def _getAttrMap(self): + """Initializes a map representation of this tag's attributes, + if not already initialized.""" + if not getattr(self, 'attrMap'): + self.attrMap = {} + for (key, value) in self.attrs: + self.attrMap[key] = value + return self.attrMap + + #Generator methods + def childGenerator(self): + for i in range(0, len(self.contents)): + yield self.contents[i] + raise StopIteration + + def recursiveChildGenerator(self): + stack = [(self, 0)] + while stack: + tag, start = stack.pop() + if isinstance(tag, Tag): + for i in range(start, len(tag.contents)): + a = tag.contents[i] + yield a + if isinstance(a, Tag) and tag.contents: + if i < len(tag.contents) - 1: + stack.append((tag, i+1)) + stack.append((a, 0)) + break + raise StopIteration + +# Next, a couple classes to represent queries and their results. +class SoupStrainer: + """Encapsulates a number of ways of matching a markup element (tag or + text).""" + + def __init__(self, name=None, attrs={}, text=None, **kwargs): + self.name = name + if isString(attrs): + kwargs['class'] = attrs + attrs = None + if kwargs: + if attrs: + attrs = attrs.copy() + attrs.update(kwargs) + else: + attrs = kwargs + self.attrs = attrs + self.text = text + + def __str__(self): + if self.text: + return self.text + else: + return "%s|%s" % (self.name, self.attrs) + + def searchTag(self, markupName=None, markupAttrs={}): + found = None + markup = None + if isinstance(markupName, Tag): + markup = markupName + markupAttrs = markup + callFunctionWithTagData = callable(self.name) \ + and not isinstance(markupName, Tag) + + if (not self.name) \ + or callFunctionWithTagData \ + or (markup and self._matches(markup, self.name)) \ + or (not markup and self._matches(markupName, self.name)): + if callFunctionWithTagData: + match = self.name(markupName, markupAttrs) + else: + match = True + markupAttrMap = None + for attr, matchAgainst in self.attrs.items(): + if not markupAttrMap: + if hasattr(markupAttrs, 'get'): + markupAttrMap = markupAttrs + else: + markupAttrMap = {} + for k,v in markupAttrs: + markupAttrMap[k] = v + attrValue = markupAttrMap.get(attr) + if not self._matches(attrValue, matchAgainst): + match = False + break + if match: + if markup: + found = markup + else: + found = markupName + return found + + def search(self, markup): + #print 'looking for %s in %s' % (self, markup) + found = None + # If given a list of items, scan it for a text element that + # matches. + if isList(markup) and not isinstance(markup, Tag): + for element in markup: + if isinstance(element, NavigableString) \ + and self.search(element): + found = element + break + # If it's a Tag, make sure its name or attributes match. + # Don't bother with Tags if we're searching for text. + elif isinstance(markup, Tag): + if not self.text: + found = self.searchTag(markup) + # If it's text, make sure the text matches. + elif isinstance(markup, NavigableString) or \ + isString(markup): + if self._matches(markup, self.text): + found = markup + else: + raise Exception, "I don't know how to match against a %s" \ + % markup.__class__ + return found + + def _matches(self, markup, matchAgainst): + #print "Matching %s against %s" % (markup, matchAgainst) + result = False + if matchAgainst == True and type(matchAgainst) == types.BooleanType: + result = markup != None + elif callable(matchAgainst): + result = matchAgainst(markup) + else: + #Custom match methods take the tag as an argument, but all + #other ways of matching match the tag name as a string. + if isinstance(markup, Tag): + markup = markup.name + if markup and not isString(markup): + markup = unicode(markup) + #Now we know that chunk is either a string, or None. + if hasattr(matchAgainst, 'match'): + # It's a regexp object. + result = markup and matchAgainst.search(markup) + elif isList(matchAgainst): + result = markup in matchAgainst + elif hasattr(matchAgainst, 'items'): + result = markup.has_key(matchAgainst) + elif matchAgainst and isString(markup): + if isinstance(markup, unicode): + matchAgainst = unicode(matchAgainst) + else: + matchAgainst = str(matchAgainst) + + if not result: + result = matchAgainst == markup + return result + +class ResultSet(list): + """A ResultSet is just a list that keeps track of the SoupStrainer + that created it.""" + def __init__(self, source): + list.__init__([]) + self.source = source + +# Now, some helper functions. + +def isList(l): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is listlike.""" + return hasattr(l, '__iter__') \ + or (type(l) in (types.ListType, types.TupleType)) + +def isString(s): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is stringlike.""" + try: + return isinstance(s, unicode) or isinstance(s, basestring) + except NameError: + return isinstance(s, str) + +def buildTagMap(default, *args): + """Turns a list of maps, lists, or scalars into a single map. + Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and + NESTING_RESET_TAGS maps out of lists and partial maps.""" + built = {} + for portion in args: + if hasattr(portion, 'items'): + #It's a map. Merge it. + for k,v in portion.items(): + built[k] = v + elif isList(portion): + #It's a list. Map each item to the default. + for k in portion: + built[k] = default + else: + #It's a scalar. Map it to the default. + built[portion] = default + return built + +# Now, the parser classes. + +class BeautifulStoneSoup(Tag, SGMLParser): + + """This class contains the basic parser and search code. It defines + a parser that knows nothing about tag behavior except for the + following: + + You can't close a tag without closing all the tags it encloses. + That is, "" actually means + "". + + [Another possible explanation is "", but since + this class defines no SELF_CLOSING_TAGS, it will never use that + explanation.] + + This class is useful for parsing XML or made-up markup languages, + or when BeautifulSoup makes an assumption counter to what you were + expecting.""" + + SELF_CLOSING_TAGS = {} + NESTABLE_TAGS = {} + RESET_NESTING_TAGS = {} + QUOTE_TAGS = {} + PRESERVE_WHITESPACE_TAGS = [] + + MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), + lambda x: x.group(1) + ' />'), + (re.compile(']*)>'), + lambda x: '') + ] + + ROOT_TAG_NAME = u'[document]' + + HTML_ENTITIES = "html" + XML_ENTITIES = "xml" + XHTML_ENTITIES = "xhtml" + # TODO: This only exists for backwards-compatibility + ALL_ENTITIES = XHTML_ENTITIES + + # Used when determining whether a text node is all whitespace and + # can be replaced with a single space. A text node that contains + # fancy Unicode spaces (usually non-breaking) should be left + # alone. + STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } + + def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, + markupMassage=True, smartQuotesTo=XML_ENTITIES, + convertEntities=None, selfClosingTags=None, isHTML=False): + """The Soup object is initialized as the 'root tag', and the + provided markup (which can be a string or a file-like object) + is fed into the underlying parser. + + sgmllib will process most bad HTML, and the BeautifulSoup + class has some tricks for dealing with some HTML that kills + sgmllib, but Beautiful Soup can nonetheless choke or lose data + if your data uses self-closing tags or declarations + incorrectly. + + By default, Beautiful Soup uses regexes to sanitize input, + avoiding the vast majority of these problems. If the problems + don't apply to you, pass in False for markupMassage, and + you'll get better performance. + + The default parser massage techniques fix the two most common + instances of invalid HTML that choke sgmllib: + +
(No space between name of closing tag and tag close) + (Extraneous whitespace in declaration) + + You can pass in a custom list of (RE object, replace method) + tuples to get Beautiful Soup to scrub your input the way you + want.""" + + self.parseOnlyThese = parseOnlyThese + self.fromEncoding = fromEncoding + self.smartQuotesTo = smartQuotesTo + self.convertEntities = convertEntities + # Set the rules for how we'll deal with the entities we + # encounter + if self.convertEntities: + # It doesn't make sense to convert encoded characters to + # entities even while you're converting entities to Unicode. + # Just convert it all to Unicode. + self.smartQuotesTo = None + if convertEntities == self.HTML_ENTITIES: + self.convertXMLEntities = False + self.convertHTMLEntities = True + self.escapeUnrecognizedEntities = True + elif convertEntities == self.XHTML_ENTITIES: + self.convertXMLEntities = True + self.convertHTMLEntities = True + self.escapeUnrecognizedEntities = False + elif convertEntities == self.XML_ENTITIES: + self.convertXMLEntities = True + self.convertHTMLEntities = False + self.escapeUnrecognizedEntities = False + else: + self.convertXMLEntities = False + self.convertHTMLEntities = False + self.escapeUnrecognizedEntities = False + + self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) + SGMLParser.__init__(self) + + if hasattr(markup, 'read'): # It's a file-type object. + markup = markup.read() + self.markup = markup + self.markupMassage = markupMassage + try: + self._feed(isHTML=isHTML) + except StopParsing: + pass + self.markup = None # The markup can now be GCed + + def convert_charref(self, name): + """This method fixes a bug in Python's SGMLParser.""" + try: + n = int(name) + except ValueError: + return + if not 0 <= n <= 127 : # ASCII ends at 127, not 255 + return + return self.convert_codepoint(n) + + def _feed(self, inDocumentEncoding=None, isHTML=False): + # Convert the document to Unicode. + markup = self.markup + if isinstance(markup, unicode): + if not hasattr(self, 'originalEncoding'): + self.originalEncoding = None + else: + dammit = UnicodeDammit\ + (markup, [self.fromEncoding, inDocumentEncoding], + smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) + markup = dammit.unicode + self.originalEncoding = dammit.originalEncoding + self.declaredHTMLEncoding = dammit.declaredHTMLEncoding + if markup: + if self.markupMassage: + if not isList(self.markupMassage): + self.markupMassage = self.MARKUP_MASSAGE + for fix, m in self.markupMassage: + markup = fix.sub(m, markup) + # TODO: We get rid of markupMassage so that the + # soup object can be deepcopied later on. Some + # Python installations can't copy regexes. If anyone + # was relying on the existence of markupMassage, this + # might cause problems. + del(self.markupMassage) + self.reset() + + SGMLParser.feed(self, markup) + # Close out any unfinished strings and close all the open tags. + self.endData() + while self.currentTag.name != self.ROOT_TAG_NAME: + self.popTag() + + def __getattr__(self, methodName): + """This method routes method call requests to either the SGMLParser + superclass or the Tag superclass, depending on the method name.""" + #print "__getattr__ called on %s.%s" % (self.__class__, methodName) + + if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ + or methodName.find('do_') == 0: + return SGMLParser.__getattr__(self, methodName) + elif methodName.find('__') != 0: + return Tag.__getattr__(self, methodName) + else: + raise AttributeError + + def isSelfClosingTag(self, name): + """Returns true iff the given string is the name of a + self-closing tag according to this parser.""" + return self.SELF_CLOSING_TAGS.has_key(name) \ + or self.instanceSelfClosingTags.has_key(name) + + def reset(self): + Tag.__init__(self, self, self.ROOT_TAG_NAME) + self.hidden = 1 + SGMLParser.reset(self) + self.currentData = [] + self.currentTag = None + self.tagStack = [] + self.quoteStack = [] + self.pushTag(self) + + def popTag(self): + tag = self.tagStack.pop() + # Tags with just one string-owning child get the child as a + # 'string' property, so that soup.tag.string is shorthand for + # soup.tag.contents[0] + if len(self.currentTag.contents) == 1 and \ + isinstance(self.currentTag.contents[0], NavigableString): + self.currentTag.string = self.currentTag.contents[0] + + #print "Pop", tag.name + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag): + #print "Push", tag.name + if self.currentTag: + self.currentTag.contents.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + + def endData(self, containerClass=NavigableString): + if self.currentData: + currentData = u''.join(self.currentData) + if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and + not set([tag.name for tag in self.tagStack]).intersection( + self.PRESERVE_WHITESPACE_TAGS)): + if '\n' in currentData: + currentData = '\n' + else: + currentData = ' ' + self.currentData = [] + if self.parseOnlyThese and len(self.tagStack) <= 1 and \ + (not self.parseOnlyThese.text or \ + not self.parseOnlyThese.search(currentData)): + return + o = containerClass(currentData) + o.setup(self.currentTag, self.previous) + if self.previous: + self.previous.next = o + self.previous = o + self.currentTag.contents.append(o) + + + def _popToTag(self, name, inclusivePop=True): + """Pops the tag stack up to and including the most recent + instance of the given tag. If inclusivePop is false, pops the tag + stack up to but *not* including the most recent instqance of + the given tag.""" + #print "Popping to %s" % name + if name == self.ROOT_TAG_NAME: + return + + numPops = 0 + mostRecentTag = None + for i in range(len(self.tagStack)-1, 0, -1): + if name == self.tagStack[i].name: + numPops = len(self.tagStack)-i + break + if not inclusivePop: + numPops = numPops - 1 + + for i in range(0, numPops): + mostRecentTag = self.popTag() + return mostRecentTag + + def _smartPop(self, name): + + """We need to pop up to the previous tag of this type, unless + one of this tag's nesting reset triggers comes between this + tag and the previous tag of this type, OR unless this tag is a + generic nesting trigger and another generic nesting trigger + comes between this tag and the previous tag of this type. + + Examples: +

FooBar *

* should pop to 'p', not 'b'. +

FooBar *

* should pop to 'table', not 'p'. +

Foo

Bar *

* should pop to 'tr', not 'p'. + +

    • *
    • * should pop to 'ul', not the first 'li'. +
  • ** should pop to 'table', not the first 'tr' + tag should + implicitly close the previous tag within the same
    ** should pop to 'tr', not the first 'td' + """ + + nestingResetTriggers = self.NESTABLE_TAGS.get(name) + isNestable = nestingResetTriggers != None + isResetNesting = self.RESET_NESTING_TAGS.has_key(name) + popTo = None + inclusive = True + for i in range(len(self.tagStack)-1, 0, -1): + p = self.tagStack[i] + if (not p or p.name == name) and not isNestable: + #Non-nestable tags get popped to the top or to their + #last occurance. + popTo = name + break + if (nestingResetTriggers != None + and p.name in nestingResetTriggers) \ + or (nestingResetTriggers == None and isResetNesting + and self.RESET_NESTING_TAGS.has_key(p.name)): + + #If we encounter one of the nesting reset triggers + #peculiar to this tag, or we encounter another tag + #that causes nesting to reset, pop up to but not + #including that tag. + popTo = p.name + inclusive = False + break + p = p.parent + if popTo: + self._popToTag(popTo, inclusive) + + def unknown_starttag(self, name, attrs, selfClosing=0): + #print "Start tag %s: %s" % (name, attrs) + if self.quoteStack: + #This is not a real tag. + #print "<%s> is not real!" % name + attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) + self.handle_data('<%s%s>' % (name, attrs)) + return + self.endData() + + if not self.isSelfClosingTag(name) and not selfClosing: + self._smartPop(name) + + if self.parseOnlyThese and len(self.tagStack) <= 1 \ + and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): + return + + tag = Tag(self, name, attrs, self.currentTag, self.previous) + if self.previous: + self.previous.next = tag + self.previous = tag + self.pushTag(tag) + if selfClosing or self.isSelfClosingTag(name): + self.popTag() + if name in self.QUOTE_TAGS: + #print "Beginning quote (%s)" % name + self.quoteStack.append(name) + self.literal = 1 + return tag + + def unknown_endtag(self, name): + #print "End tag %s" % name + if self.quoteStack and self.quoteStack[-1] != name: + #This is not a real end tag. + #print " is not real!" % name + self.handle_data('' % name) + return + self.endData() + self._popToTag(name) + if self.quoteStack and self.quoteStack[-1] == name: + self.quoteStack.pop() + self.literal = (len(self.quoteStack) > 0) + + def handle_data(self, data): + self.currentData.append(data) + + def _toStringSubclass(self, text, subclass): + """Adds a certain piece of text to the tree as a NavigableString + subclass.""" + self.endData() + self.handle_data(text) + self.endData(subclass) + + def handle_pi(self, text): + """Handle a processing instruction as a ProcessingInstruction + object, possibly one with a %SOUP-ENCODING% slot into which an + encoding will be plugged later.""" + if text[:3] == "xml": + text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" + self._toStringSubclass(text, ProcessingInstruction) + + def handle_comment(self, text): + "Handle comments as Comment objects." + self._toStringSubclass(text, Comment) + + def handle_charref(self, ref): + "Handle character references as data." + if self.convertEntities: + data = unichr(int(ref)) + else: + data = '&#%s;' % ref + self.handle_data(data) + + def handle_entityref(self, ref): + """Handle entity references as data, possibly converting known + HTML and/or XML entity references to the corresponding Unicode + characters.""" + data = None + if self.convertHTMLEntities: + try: + data = unichr(name2codepoint[ref]) + except KeyError: + pass + + if not data and self.convertXMLEntities: + data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) + + if not data and self.convertHTMLEntities and \ + not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): + # TODO: We've got a problem here. We're told this is + # an entity reference, but it's not an XML entity + # reference or an HTML entity reference. Nonetheless, + # the logical thing to do is to pass it through as an + # unrecognized entity reference. + # + # Except: when the input is "&carol;" this function + # will be called with input "carol". When the input is + # "AT&T", this function will be called with input + # "T". We have no way of knowing whether a semicolon + # was present originally, so we don't know whether + # this is an unknown entity or just a misplaced + # ampersand. + # + # The more common case is a misplaced ampersand, so I + # escape the ampersand and omit the trailing semicolon. + data = "&%s" % ref + if not data: + # This case is different from the one above, because we + # haven't already gone through a supposedly comprehensive + # mapping of entities to Unicode characters. We might not + # have gone through any mapping at all. So the chances are + # very high that this is a real entity, and not a + # misplaced ampersand. + data = "&%s;" % ref + self.handle_data(data) + + def handle_decl(self, data): + "Handle DOCTYPEs and the like as Declaration objects." + self._toStringSubclass(data, Declaration) + + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as a CData object.""" + j = None + if self.rawdata[i:i+9] == '', i) + if k == -1: + k = len(self.rawdata) + data = self.rawdata[i+9:k] + j = k+3 + self._toStringSubclass(data, CData) + else: + try: + j = SGMLParser.parse_declaration(self, i) + except SGMLParseError: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j + +class BeautifulSoup(BeautifulStoneSoup): + + """This parser knows the following facts about HTML: + + * Some tags have no closing tag and should be interpreted as being + closed as soon as they are encountered. + + * The text inside some tags (ie. 'script') may contain tags which + are not really part of the document and which should be parsed + as text, not tags. If you want to parse the text as tags, you can + always fetch it and parse it explicitly. + + * Tag nesting rules: + + Most tags can't be nested at all. For instance, the occurance of + a

    tag should implicitly close the previous

    tag. + +

    Para1

    Para2 + should be transformed into: +

    Para1

    Para2 + + Some tags can be nested arbitrarily. For instance, the occurance + of a

    tag should _not_ implicitly close the previous +
    tag. + + Alice said:
    Bob said:
    Blah + should NOT be transformed into: + Alice said:
    Bob said:
    Blah + + Some tags can be nested, but the nesting is reset by the + interposition of other tags. For instance, a
    , + but not close a tag in another table. + +
    BlahBlah + should be transformed into: +
    BlahBlah + but, + Blah
    Blah + should NOT be transformed into + Blah
    Blah + + Differing assumptions about tag nesting rules are a major source + of problems with the BeautifulSoup class. If BeautifulSoup is not + treating as nestable a tag your page author treats as nestable, + try ICantBelieveItsBeautifulSoup, MinimalSoup, or + BeautifulStoneSoup before writing your own subclass.""" + + def __init__(self, *args, **kwargs): + if not kwargs.has_key('smartQuotesTo'): + kwargs['smartQuotesTo'] = self.HTML_ENTITIES + kwargs['isHTML'] = True + BeautifulStoneSoup.__init__(self, *args, **kwargs) + + SELF_CLOSING_TAGS = buildTagMap(None, + ['br' , 'hr', 'input', 'img', 'meta', + 'spacer', 'link', 'frame', 'base']) + + PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) + + QUOTE_TAGS = {'script' : None, 'textarea' : None} + + #According to the HTML standard, each of these inline tags can + #contain another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', + 'center'] + + #According to the HTML standard, these block tags can contain + #another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] + + #Lists can contain other lists, but there are restrictions. + NESTABLE_LIST_TAGS = { 'ol' : [], + 'ul' : [], + 'li' : ['ul', 'ol'], + 'dl' : [], + 'dd' : ['dl'], + 'dt' : ['dl'] } + + #Tables can contain other tables, but there are restrictions. + NESTABLE_TABLE_TAGS = {'table' : [], + 'tr' : ['table', 'tbody', 'tfoot', 'thead'], + 'td' : ['tr'], + 'th' : ['tr'], + 'thead' : ['table'], + 'tbody' : ['table'], + 'tfoot' : ['table'], + } + + NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] + + #If one of these tags is encountered, all tags up to the next tag of + #this type are popped. + RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', + NON_NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, + NESTABLE_TABLE_TAGS) + + NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) + + # Used to detect the charset in a META tag; see start_meta + CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) + + def start_meta(self, attrs): + """Beautiful Soup can detect a charset included in a META tag, + try to convert the document to that charset, and re-parse the + document from the beginning.""" + httpEquiv = None + contentType = None + contentTypeIndex = None + tagNeedsEncodingSubstitution = False + + for i in range(0, len(attrs)): + key, value = attrs[i] + key = key.lower() + if key == 'http-equiv': + httpEquiv = value + elif key == 'content': + contentType = value + contentTypeIndex = i + + if httpEquiv and contentType: # It's an interesting meta tag. + match = self.CHARSET_RE.search(contentType) + if match: + if (self.declaredHTMLEncoding is not None or + self.originalEncoding == self.fromEncoding): + # An HTML encoding was sniffed while converting + # the document to Unicode, or an HTML encoding was + # sniffed during a previous pass through the + # document, or an encoding was specified + # explicitly and it worked. Rewrite the meta tag. + def rewrite(match): + return match.group(1) + "%SOUP-ENCODING%" + newAttr = self.CHARSET_RE.sub(rewrite, contentType) + attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], + newAttr) + tagNeedsEncodingSubstitution = True + else: + # This is our first pass through the document. + # Go through it again with the encoding information. + newCharset = match.group(3) + if newCharset and newCharset != self.originalEncoding: + self.declaredHTMLEncoding = newCharset + self._feed(self.declaredHTMLEncoding) + raise StopParsing + pass + tag = self.unknown_starttag("meta", attrs) + if tag and tagNeedsEncodingSubstitution: + tag.containsSubstitutions = True + +class StopParsing(Exception): + pass + +class ICantBelieveItsBeautifulSoup(BeautifulSoup): + + """The BeautifulSoup class is oriented towards skipping over + common HTML errors like unclosed tags. However, sometimes it makes + errors of its own. For instance, consider this fragment: + + FooBar + + This is perfectly valid (if bizarre) HTML. However, the + BeautifulSoup class will implicitly close the first b tag when it + encounters the second 'b'. It will think the author wrote + "FooBar", and didn't close the first 'b' tag, because + there's no real-world reason to bold something that's already + bold. When it encounters '' it will close two more 'b' + tags, for a grand total of three tags closed instead of two. This + can throw off the rest of your document structure. The same is + true of a number of other tags, listed below. + + It's much more common for someone to forget to close a 'b' tag + than to actually use nested 'b' tags, and the BeautifulSoup class + handles the common case. This class handles the not-co-common + case: where you can't believe someone wrote what they did, but + it's valid HTML and BeautifulSoup screwed up by assuming it + wouldn't be.""" + + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ + ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', + 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', + 'big'] + + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] + + NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) + +class MinimalSoup(BeautifulSoup): + """The MinimalSoup class is for parsing HTML that contains + pathologically bad markup. It makes no assumptions about tag + nesting, but it does know which tags are self-closing, that + ', re.I|re.S), ''), + # For BeautifulSoup. + (re.compile('', re.I), '') + ] + + def preprocess_dom(self, dom): + # Remove "link this quote" links. + for qLink in self.xpath(dom, "//p[@class='linksoda']"): + qLink.drop_tree() + return dom + + def postprocess_data(self, data): + if 'quotes' not in data: + return {} + for idx, quote in enumerate(data['quotes']): + data['quotes'][idx] = quote.split('::') + return data + + +class DOMHTMLReleaseinfoParser(DOMParserBase): + """Parser for the "release dates" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + rdparser = DOMHTMLReleaseinfoParser() + result = rdparser.parse(releaseinfo_html_string) + """ + extractors = [Extractor(label='release dates', + path="//th[@class='xxxx']/../../tr", + attrs=Attribute(key='release dates', multi=True, + path={'country': ".//td[1]//text()", + 'date': ".//td[2]//text()", + 'notes': ".//td[3]//text()"})), + Extractor(label='akas', + path="//div[@class='_imdbpy_akas']/table/tr", + attrs=Attribute(key='akas', multi=True, + path={'title': "./td[1]/text()", + 'countries': "./td[2]/text()"}))] + + preprocessors = [ + (re.compile('(
    )', re.I | re.M | re.S), + r'
    \1
    ')] + + def postprocess_data(self, data): + if not ('release dates' in data or 'akas' in data): return data + releases = data.get('release dates') or [] + rl = [] + for i in releases: + country = i.get('country') + date = i.get('date') + if not (country and date): continue + country = country.strip() + date = date.strip() + if not (country and date): continue + notes = i['notes'] + info = u'%s::%s' % (country, date) + if notes: + info += notes + rl.append(info) + if releases: + del data['release dates'] + if rl: + data['release dates'] = rl + akas = data.get('akas') or [] + nakas = [] + for aka in akas: + title = aka.get('title', '').strip() + if not title: + continue + countries = aka.get('countries', '').split('/') + if not countries: + nakas.append(title) + else: + for country in countries: + nakas.append('%s::%s' % (title, country.strip())) + if akas: + del data['akas'] + if nakas: + data['akas from release info'] = nakas + return data + + +class DOMHTMLRatingsParser(DOMParserBase): + """Parser for the "user ratings" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + rparser = DOMHTMLRatingsParser() + result = rparser.parse(userratings_html_string) + """ + re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\.\s*median\s*=\s*([0-9])', + re.I) + extractors = [ + Extractor(label='number of votes', + path="//td[b='Percentage']/../../tr", + attrs=[Attribute(key='votes', + multi=True, + path={ + 'votes': "td[1]//text()", + 'ordinal': "td[3]//text()" + })]), + Extractor(label='mean and median', + path="//p[starts-with(text(), 'Arithmetic mean')]", + attrs=Attribute(key='mean and median', + path="text()")), + Extractor(label='rating', + path="//a[starts-with(@href, '/search/title?user_rating=')]", + attrs=Attribute(key='rating', + path="text()")), + Extractor(label='demographic voters', + path="//td[b='Average']/../../tr", + attrs=Attribute(key='demographic voters', + multi=True, + path={ + 'voters': "td[1]//text()", + 'votes': "td[2]//text()", + 'average': "td[3]//text()" + })), + Extractor(label='top 250', + path="//a[text()='top 250']", + attrs=Attribute(key='top 250', + path="./preceding-sibling::text()[1]")) + ] + + def postprocess_data(self, data): + nd = {} + votes = data.get('votes', []) + if votes: + nd['number of votes'] = {} + for i in xrange(1, 11): + nd['number of votes'][int(votes[i]['ordinal'])] = \ + int(votes[i]['votes'].replace(',', '')) + mean = data.get('mean and median', '') + if mean: + means = self.re_means.findall(mean) + if means and len(means[0]) == 2: + am, med = means[0] + try: am = float(am) + except (ValueError, OverflowError): pass + if type(am) is type(1.0): + nd['arithmetic mean'] = am + try: med = int(med) + except (ValueError, OverflowError): pass + if type(med) is type(0): + nd['median'] = med + if 'rating' in data: + nd['rating'] = float(data['rating']) + dem_voters = data.get('demographic voters') + if dem_voters: + nd['demographic'] = {} + for i in xrange(1, len(dem_voters)): + if (dem_voters[i]['votes'] is not None) \ + and (dem_voters[i]['votes'].strip()): + nd['demographic'][dem_voters[i]['voters'].strip().lower()] \ + = (int(dem_voters[i]['votes'].replace(',', '')), + float(dem_voters[i]['average'])) + if 'imdb users' in nd.get('demographic', {}): + nd['votes'] = nd['demographic']['imdb users'][0] + nd['demographic']['all votes'] = nd['demographic']['imdb users'] + del nd['demographic']['imdb users'] + top250 = data.get('top 250') + if top250: + sd = top250[9:] + i = sd.find(' ') + if i != -1: + sd = sd[:i] + try: sd = int(sd) + except (ValueError, OverflowError): pass + if type(sd) is type(0): + nd['top 250 rank'] = sd + return nd + + +class DOMHTMLEpisodesRatings(DOMParserBase): + """Parser for the "episode ratings ... by date" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + erparser = DOMHTMLEpisodesRatings() + result = erparser.parse(eprating_html_string) + """ + _containsObjects = True + + extractors = [Extractor(label='title', path="//title", + attrs=Attribute(key='title', path="./text()")), + Extractor(label='ep ratings', + path="//th/../..//tr", + attrs=Attribute(key='episodes', multi=True, + path={'nr': ".//td[1]/text()", + 'ep title': ".//td[2]//text()", + 'movieID': ".//td[2]/a/@href", + 'rating': ".//td[3]/text()", + 'votes': ".//td[4]/text()"}))] + + def postprocess_data(self, data): + if 'title' not in data or 'episodes' not in data: return {} + nd = [] + title = data['title'] + for i in data['episodes']: + ept = i['ep title'] + movieID = analyze_imdbid(i['movieID']) + votes = i['votes'] + rating = i['rating'] + if not (ept and movieID and votes and rating): continue + try: + votes = int(votes.replace(',', '').replace('.', '')) + except: + pass + try: + rating = float(rating) + except: + pass + ept = ept.strip() + ept = u'%s {%s' % (title, ept) + nr = i['nr'] + if nr: + ept += u' (#%s)' % nr.strip() + ept += '}' + if movieID is not None: + movieID = str(movieID) + m = Movie(title=ept, movieID=movieID, accessSystem=self._as, + modFunct=self._modFunct) + epofdict = m.get('episode of') + if epofdict is not None: + m['episode of'] = Movie(data=epofdict, accessSystem=self._as, + modFunct=self._modFunct) + nd.append({'episode': m, 'votes': votes, 'rating': rating}) + return {'episodes rating': nd} + + +def _normalize_href(href): + if (href is not None) and (not href.lower().startswith('http://')): + if href.startswith('/'): href = href[1:] + href = '%s%s' % (imdbURL_base, href) + return href + + +class DOMHTMLOfficialsitesParser(DOMParserBase): + """Parser for the "official sites", "external reviews", "newsgroup + reviews", "miscellaneous links", "sound clips", "video clips" and + "photographs" pages of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + osparser = DOMHTMLOfficialsitesParser() + result = osparser.parse(officialsites_html_string) + """ + kind = 'official sites' + + extractors = [ + Extractor(label='site', + path="//ol/li/a", + attrs=Attribute(key='self.kind', + multi=True, + path={ + 'link': "./@href", + 'info': "./text()" + }, + postprocess=lambda x: (x.get('info').strip(), + urllib.unquote(_normalize_href(x.get('link')))))) + ] + + +class DOMHTMLConnectionParser(DOMParserBase): + """Parser for the "connections" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + connparser = DOMHTMLConnectionParser() + result = connparser.parse(connections_html_string) + """ + _containsObjects = True + + extractors = [Extractor(label='connection', + group="//div[@class='_imdbpy']", + group_key="./h5/text()", + group_key_normalize=lambda x: x.lower(), + path="./a", + attrs=Attribute(key=None, + path={'title': "./text()", + 'movieID': "./@href"}, + multi=True))] + + preprocessors = [ + ('
    ', '
    '), + # To get the movie's year. + (' (', ' ('), + ('\n
    ', ''), + ('
    - ', '::') + ] + + def postprocess_data(self, data): + for key in data.keys(): + nl = [] + for v in data[key]: + title = v['title'] + ts = title.split('::', 1) + title = ts[0].strip() + notes = u'' + if len(ts) == 2: + notes = ts[1].strip() + m = Movie(title=title, + movieID=analyze_imdbid(v['movieID']), + accessSystem=self._as, notes=notes, + modFunct=self._modFunct) + nl.append(m) + data[key] = nl + if not data: return {} + return {'connections': data} + + +class DOMHTMLLocationsParser(DOMParserBase): + """Parser for the "locations" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + lparser = DOMHTMLLocationsParser() + result = lparser.parse(locations_html_string) + """ + extractors = [Extractor(label='locations', path="//dt", + attrs=Attribute(key='locations', multi=True, + path={'place': ".//text()", + 'note': "./following-sibling::dd[1]" \ + "//text()"}, + postprocess=lambda x: (u'%s::%s' % ( + x['place'].strip(), + (x['note'] or u'').strip())).strip(':')))] + + +class DOMHTMLTechParser(DOMParserBase): + """Parser for the "technical", "business", "literature", + "publicity" (for people) and "contacts (for people) pages of + a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + tparser = HTMLTechParser() + result = tparser.parse(technical_html_string) + """ + kind = 'tech' + + extractors = [Extractor(label='tech', + group="//h5", + group_key="./text()", + group_key_normalize=lambda x: x.lower(), + path="./following-sibling::div[1]", + attrs=Attribute(key=None, + path=".//text()", + postprocess=lambda x: [t.strip() + for t in x.split('\n') if t.strip()]))] + + preprocessors = [ + (re.compile('(
    .*?
    )', re.I), r'\1
    '), + (re.compile('((
    |

    |
    ))\n?
    (?!'), + # the ones below are for the publicity parser + (re.compile('

    (.*?)

    ', re.I), r'\1
    '), + (re.compile('()', re.I), r'\1::'), + (re.compile('()', re.I), r'\n\1'), + # this is for splitting individual entries + (re.compile('
    ', re.I), r'\n'), + ] + + def postprocess_data(self, data): + for key in data: + data[key] = filter(None, data[key]) + if self.kind in ('literature', 'business', 'contacts') and data: + if 'screenplay/teleplay' in data: + data['screenplay-teleplay'] = data['screenplay/teleplay'] + del data['screenplay/teleplay'] + data = {self.kind: data} + else: + if self.kind == 'publicity': + if 'biography (print)' in data: + data['biography-print'] = data['biography (print)'] + del data['biography (print)'] + # Tech info. + for key in data.keys(): + if key.startswith('film negative format'): + data['film negative format'] = data[key] + del data[key] + elif key.startswith('film length'): + data['film length'] = data[key] + del data[key] + return data + + +class DOMHTMLDvdParser(DOMParserBase): + """Parser for the "dvd" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + dparser = DOMHTMLDvdParser() + result = dparser.parse(dvd_html_string) + """ + _defGetRefs = True + extractors = [Extractor(label='dvd', + path="//div[@class='base_layer']", + attrs=[Attribute(key=None, + multi=True, + path={ + 'title': "../table[1]//h3/text()", + 'cover': "../table[1]//img/@src", + 'region': ".//p[b='Region:']/text()", + 'asin': ".//p[b='ASIN:']/text()", + 'upc': ".//p[b='UPC:']/text()", + 'rating': ".//p/b[starts-with(text(), 'Rating:')]/../img/@alt", + 'certificate': ".//p[b='Certificate:']/text()", + 'runtime': ".//p[b='Runtime:']/text()", + 'label': ".//p[b='Label:']/text()", + 'studio': ".//p[b='Studio:']/text()", + 'release date': ".//p[b='Release Date:']/text()", + 'dvd format': ".//p[b='DVD Format:']/text()", + 'dvd features': ".//p[b='DVD Features: ']//text()", + 'supplements': "..//div[span='Supplements']" \ + "/following-sibling::div[1]//text()", + 'review': "..//div[span='Review']/following-sibling::div[1]//text()", + 'titles': "..//div[starts-with(text(), 'Titles in this Product')]" \ + "/..//text()", + }, + postprocess=lambda x: { + 'title': (x.get('title') or u'').strip(), + 'cover': (x.get('cover') or u'').strip(), + 'region': (x.get('region') or u'').strip(), + 'asin': (x.get('asin') or u'').strip(), + 'upc': (x.get('upc') or u'').strip(), + 'rating': (x.get('rating') or u'Not Rated').strip().replace('Rating: ', ''), + 'certificate': (x.get('certificate') or u'').strip(), + 'runtime': (x.get('runtime') or u'').strip(), + 'label': (x.get('label') or u'').strip(), + 'studio': (x.get('studio') or u'').strip(), + 'release date': (x.get('release date') or u'').strip(), + 'dvd format': (x.get('dvd format') or u'').strip(), + 'dvd features': (x.get('dvd features') or u'').strip().replace('DVD Features: ', ''), + 'supplements': (x.get('supplements') or u'').strip(), + 'review': (x.get('review') or u'').strip(), + 'titles in this product': (x.get('titles') or u'').strip().replace('Titles in this Product::', ''), + } + )])] + + preprocessors = [ + (re.compile('

    (\s*', re.I), + r'
    \1
    '), + (re.compile('

    (

    \s*

    (

    ', re.I), r'::') + ] + + def postprocess_data(self, data): + if not data: + return data + dvds = data['dvd'] + for dvd in dvds: + if dvd['cover'].find('noposter') != -1: + del dvd['cover'] + for key in dvd.keys(): + if not dvd[key]: + del dvd[key] + if 'supplements' in dvd: + dvd['supplements'] = dvd['supplements'].split('::') + return data + + +class DOMHTMLRecParser(DOMParserBase): + """Parser for the "recommendations" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + rparser = HTMLRecParser() + result = rparser.parse(recommendations_html_string) + """ + _containsObjects = True + + extractors = [Extractor(label='recommendations', + path="//td[@valign='middle'][1]", + attrs=Attribute(key='../../tr/td[1]//text()', + multi=True, + path={'title': ".//text()", + 'movieID': ".//a/@href"}))] + def postprocess_data(self, data): + for key in data.keys(): + n_key = key + n_keyl = n_key.lower() + if n_keyl == 'suggested by the database': + n_key = 'database' + elif n_keyl == 'imdb users recommend': + n_key = 'users' + data[n_key] = [Movie(title=x['title'], + movieID=analyze_imdbid(x['movieID']), + accessSystem=self._as, modFunct=self._modFunct) + for x in data[key]] + del data[key] + if data: return {'recommendations': data} + return data + + +class DOMHTMLNewsParser(DOMParserBase): + """Parser for the "news" page of a given movie or person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + nwparser = DOMHTMLNewsParser() + result = nwparser.parse(news_html_string) + """ + _defGetRefs = True + + extractors = [ + Extractor(label='news', + path="//h2", + attrs=Attribute(key='news', + multi=True, + path={ + 'title': "./text()", + 'fromdate': "../following-sibling::p[1]/small//text()", + # FIXME: sometimes (see The Matrix (1999))

    is found + # inside news text. + 'body': "../following-sibling::p[2]//text()", + 'link': "../..//a[text()='Permalink']/@href", + 'fulllink': "../..//a[starts-with(text(), " \ + "'See full article at')]/@href" + }, + postprocess=lambda x: { + 'title': x.get('title').strip(), + 'date': x.get('fromdate').split('|')[0].strip(), + 'from': x.get('fromdate').split('|')[1].replace('From ', + '').strip(), + 'body': (x.get('body') or u'').strip(), + 'link': _normalize_href(x.get('link')), + 'full article link': _normalize_href(x.get('fulllink')) + })) + ] + + preprocessors = [ + (re.compile('(]+>

    )', re.I), r'
    \1'), + (re.compile('(
    )', re.I), r'
    \1'), + (re.compile('

    ', re.I), r'') + ] + + def postprocess_data(self, data): + if not data.has_key('news'): + return {} + for news in data['news']: + if news.has_key('full article link'): + if news['full article link'] is None: + del news['full article link'] + return data + + +def _parse_review(x): + result = {} + title = x.get('title').strip() + if title[-1] == ':': title = title[:-1] + result['title'] = title + result['link'] = _normalize_href(x.get('link')) + kind = x.get('kind').strip() + if kind[-1] == ':': kind = kind[:-1] + result['review kind'] = kind + text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||') + review = '\n'.join(text) + if x.get('author') is not None: + author = x.get('author').strip() + review = review.split(author)[0].strip() + result['review author'] = author[2:] + if x.get('item') is not None: + item = x.get('item').strip() + review = review[len(item):].strip() + review = "%s: %s" % (item, review) + result['review'] = review + return result + + +class DOMHTMLAmazonReviewsParser(DOMParserBase): + """Parser for the "amazon reviews" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + arparser = DOMHTMLAmazonReviewsParser() + result = arparser.parse(amazonreviews_html_string) + """ + extractors = [ + Extractor(label='amazon reviews', + group="//h3", + group_key="./a/text()", + group_key_normalize=lambda x: x[:-1], + path="./following-sibling::p[1]/span[@class='_review']", + attrs=Attribute(key=None, + multi=True, + path={ + 'title': "../preceding-sibling::h3[1]/a[1]/text()", + 'link': "../preceding-sibling::h3[1]/a[1]/@href", + 'kind': "./preceding-sibling::b[1]/text()", + 'item': "./i/b/text()", + 'review': ".//text()", + 'author': "./i[starts-with(text(), '--')]/text()" + }, + postprocess=_parse_review)) + ] + + preprocessors = [ + (re.compile('

    \n(?!)', re.I), r'\n'), + (re.compile('(\n\n)', re.I), r'\1'), + (re.compile('(

    \n\n)', re.I), r'\1'), + (re.compile('(\s\n)()', re.I), r'\1\2') + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + nd = [] + for item in data.keys(): + nd = nd + data[item] + return {'amazon reviews': nd} + + +def _parse_merchandising_link(x): + result = {} + link = x.get('link') + result['link'] = _normalize_href(link) + text = x.get('text') + if text is not None: + result['link-text'] = text.strip() + cover = x.get('cover') + if cover is not None: + result['cover'] = cover + description = x.get('description') + if description is not None: + shop = x.get('shop') + if shop is not None: + result['description'] = u'%s::%s' % (shop, description.strip()) + else: + result['description'] = description.strip() + return result + + +class DOMHTMLSalesParser(DOMParserBase): + """Parser for the "merchandising links" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + sparser = DOMHTMLSalesParser() + result = sparser.parse(sales_html_string) + """ + extractors = [ + Extractor(label='shops', + group="//h5/a[@name]/..", + group_key="./a[1]/text()", + group_key_normalize=lambda x: x.lower(), + path=".//following-sibling::table[1]/" \ + "/td[@class='w_rowtable_colshop']//tr[1]", + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./td[2]/a[1]/@href", + 'text': "./td[1]/img[1]/@alt", + 'cover': "./ancestor::td[1]/../td[1]"\ + "/a[1]/img[1]/@src", + }, + postprocess=_parse_merchandising_link)), + Extractor(label='others', + group="//span[@class='_info']/..", + group_key="./h5/a[1]/text()", + group_key_normalize=lambda x: x.lower(), + path="./span[@class='_info']", + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./preceding-sibling::a[1]/@href", + 'shop': "./preceding-sibling::a[1]/text()", + 'description': ".//text()", + }, + postprocess=_parse_merchandising_link)) + ] + + preprocessors = [ + (re.compile('(
    \1'), + (re.compile('(
    \n
    \n)

    ', re.I), r'\1'), + (re.compile('(

    \n)(\n)', re.I), r'\1
    \2'), + (re.compile('(\n)(Search.*?)()(\n)', re.I), r'\3\1\2\4'), + (re.compile('(\n)(Search.*?)(\n)', re.I), + r'\1\2\3') + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + return {'merchandising links': data} + + +def _build_episode(x): + """Create a Movie object for a given series' episode.""" + episode_id = analyze_imdbid(x.get('link')) + episode_title = x.get('title') + e = Movie(movieID=episode_id, title=episode_title) + e['kind'] = u'episode' + oad = x.get('oad') + if oad: + e['original air date'] = oad.strip() + year = x.get('year') + if year is not None: + year = year[5:] + if year == 'unknown': year = u'????' + if year and year.isdigit(): + year = int(year) + e['year'] = year + else: + if oad and oad[-4:].isdigit(): + e['year'] = int(oad[-4:]) + epinfo = x.get('episode') + if epinfo is not None: + season, episode = epinfo.split(':')[0].split(',') + e['season'] = int(season[7:]) + e['episode'] = int(episode[8:]) + else: + e['season'] = 'unknown' + e['episode'] = 'unknown' + plot = x.get('plot') + if plot: + e['plot'] = plot.strip() + return e + + +class DOMHTMLEpisodesParser(DOMParserBase): + """Parser for the "episode list" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + eparser = DOMHTMLEpisodesParser() + result = eparser.parse(episodes_html_string) + """ + _containsObjects = True + + kind = 'episodes list' + _episodes_path = "..//h4" + _oad_path = "./following-sibling::span/strong[1]/text()" + + def _init(self): + self.extractors = [ + Extractor(label='series', + path="//html", + attrs=[Attribute(key='series title', + path=".//title/text()"), + Attribute(key='series movieID', + path=".//h1/a[@class='main']/@href", + postprocess=analyze_imdbid) + ]), + Extractor(label='episodes', + group="//div[@class='_imdbpy']/h3", + group_key="./a/@name", + path=self._episodes_path, + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./a/@href", + 'title': "./a/text()", + 'year': "./preceding-sibling::a[1]/@name", + 'episode': "./text()[1]", + 'oad': self._oad_path, + 'plot': "./following-sibling::text()[1]" + }, + postprocess=_build_episode))] + if self.kind == 'episodes cast': + self.extractors += [ + Extractor(label='cast', + group="//h4", + group_key="./text()[1]", + group_key_normalize=lambda x: x.strip(), + path="./following-sibling::table[1]//td[@class='nm']", + attrs=Attribute(key=None, + multi=True, + path={'person': "..//text()", + 'link': "./a/@href", + 'roleID': \ + "../td[4]/div[@class='_imdbpyrole']/@roleid"}, + postprocess=lambda x: \ + build_person(x.get('person') or u'', + personID=analyze_imdbid(x.get('link')), + roleID=(x.get('roleID') or u'').split('/'), + accessSystem=self._as, + modFunct=self._modFunct))) + ] + + preprocessors = [ + (re.compile('(
    \n)(

    )', re.I), + r'

    \1
    \2'), + (re.compile('(

    \n\n)
    ', re.I), r'\1'), + (re.compile('

    (.*?)

    ', re.I), r'

    \1

    '), + (_reRolesMovie, _manageRoles), + (re.compile('(

    \n)(
    )', re.I), r'\1\2') + ] + + def postprocess_data(self, data): + # A bit extreme? + if not 'series title' in data: return {} + if not 'series movieID' in data: return {} + stitle = data['series title'].replace('- Episode list', '') + stitle = stitle.replace('- Episodes list', '') + stitle = stitle.replace('- Episode cast', '') + stitle = stitle.replace('- Episodes cast', '') + stitle = stitle.strip() + if not stitle: return {} + seriesID = data['series movieID'] + if seriesID is None: return {} + series = Movie(title=stitle, movieID=str(seriesID), + accessSystem=self._as, modFunct=self._modFunct) + nd = {} + for key in data.keys(): + if key.startswith('season-'): + season_key = key[7:] + try: season_key = int(season_key) + except: pass + nd[season_key] = {} + for episode in data[key]: + if not episode: continue + episode_key = episode.get('episode') + if episode_key is None: continue + cast_key = 'Season %s, Episode %s:' % (season_key, + episode_key) + if data.has_key(cast_key): + cast = data[cast_key] + for i in xrange(len(cast)): + cast[i].billingPos = i + 1 + episode['cast'] = cast + episode['episode of'] = series + nd[season_key][episode_key] = episode + if len(nd) == 0: + return {} + return {'episodes': nd} + + +class DOMHTMLEpisodesCastParser(DOMHTMLEpisodesParser): + """Parser for the "episodes cast" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + eparser = DOMHTMLEpisodesParser() + result = eparser.parse(episodes_html_string) + """ + kind = 'episodes cast' + _episodes_path = "..//h4" + _oad_path = "./following-sibling::b[1]/text()" + + +class DOMHTMLFaqsParser(DOMParserBase): + """Parser for the "FAQ" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + fparser = DOMHTMLFaqsParser() + result = fparser.parse(faqs_html_string) + """ + _defGetRefs = True + + # XXX: bsoup and lxml don't match (looks like a minor issue, anyway). + + extractors = [ + Extractor(label='faqs', + path="//div[@class='section']", + attrs=Attribute(key='faqs', + multi=True, + path={ + 'question': "./h3/a/span/text()", + 'answer': "../following-sibling::div[1]//text()" + }, + postprocess=lambda x: u'%s::%s' % (x.get('question').strip(), + '\n\n'.join(x.get('answer').replace( + '\n\n', '\n').strip().split('||'))))) + ] + + preprocessors = [ + (re.compile('

    ', re.I), r'||'), + (re.compile('

    (.*?)

    \n', re.I), r'||\1--'), + (re.compile('(.*?)', re.I), + r'[spoiler]\1[/spoiler]') + ] + + +class DOMHTMLAiringParser(DOMParserBase): + """Parser for the "airing" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + aparser = DOMHTMLAiringParser() + result = aparser.parse(airing_html_string) + """ + _containsObjects = True + + extractors = [ + Extractor(label='series title', + path="//title", + attrs=Attribute(key='series title', path="./text()", + postprocess=lambda x: \ + x.replace(' - TV schedule', u''))), + Extractor(label='series id', + path="//h1/a[@href]", + attrs=Attribute(key='series id', path="./@href")), + + Extractor(label='tv airings', + path="//tr[@class]", + attrs=Attribute(key='airing', + multi=True, + path={ + 'date': "./td[1]//text()", + 'time': "./td[2]//text()", + 'channel': "./td[3]//text()", + 'link': "./td[4]/a[1]/@href", + 'title': "./td[4]//text()", + 'season': "./td[5]//text()", + }, + postprocess=lambda x: { + 'date': x.get('date'), + 'time': x.get('time'), + 'channel': x.get('channel').strip(), + 'link': x.get('link'), + 'title': x.get('title'), + 'season': (x.get('season') or '').strip() + } + )) + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + seriesTitle = data['series title'] + seriesID = analyze_imdbid(data['series id']) + if data.has_key('airing'): + for airing in data['airing']: + title = airing.get('title', '').strip() + if not title: + epsTitle = seriesTitle + if seriesID is None: + continue + epsID = seriesID + else: + epsTitle = '%s {%s}' % (data['series title'], + airing['title']) + epsID = analyze_imdbid(airing['link']) + e = Movie(title=epsTitle, movieID=epsID) + airing['episode'] = e + del airing['link'] + del airing['title'] + if not airing['season']: + del airing['season'] + if 'series title' in data: + del data['series title'] + if 'series id' in data: + del data['series id'] + if 'airing' in data: + data['airing'] = filter(None, data['airing']) + if 'airing' not in data or not data['airing']: + return {} + return data + + +class DOMHTMLSynopsisParser(DOMParserBase): + """Parser for the "synopsis" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + sparser = HTMLSynopsisParser() + result = sparser.parse(synopsis_html_string) + """ + extractors = [ + Extractor(label='synopsis', + path="//div[@class='display'][not(@style)]", + attrs=Attribute(key='synopsis', + path=".//text()", + postprocess=lambda x: '\n\n'.join(x.strip().split('||')))) + ] + + preprocessors = [ + (re.compile('

    ', re.I), r'||') + ] + + +class DOMHTMLParentsGuideParser(DOMParserBase): + """Parser for the "parents guide" page of a given movie. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + pgparser = HTMLParentsGuideParser() + result = pgparser.parse(parentsguide_html_string) + """ + extractors = [ + Extractor(label='parents guide', + group="//div[@class='section']", + group_key="./h3/a/span/text()", + group_key_normalize=lambda x: x.lower(), + path="../following-sibling::div[1]/p", + attrs=Attribute(key=None, + path=".//text()", + postprocess=lambda x: [t.strip().replace('\n', ' ') + for t in x.split('||') if t.strip()])) + ] + + preprocessors = [ + (re.compile('

    ', re.I), r'||') + ] + + def postprocess_data(self, data): + data2 = {} + for key in data: + if data[key]: + data2[key] = data[key] + if not data2: + return {} + return {'parents guide': data2} + + +_OBJECTS = { + 'movie_parser': ((DOMHTMLMovieParser,), None), + 'plot_parser': ((DOMHTMLPlotParser,), None), + 'movie_awards_parser': ((DOMHTMLAwardsParser,), None), + 'taglines_parser': ((DOMHTMLTaglinesParser,), None), + 'keywords_parser': ((DOMHTMLKeywordsParser,), None), + 'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None), + 'goofs_parser': ((DOMHTMLGoofsParser,), None), + 'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None), + 'trivia_parser': ((DOMHTMLTriviaParser,), None), + 'soundtrack_parser': ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}), + 'quotes_parser': ((DOMHTMLQuotesParser,), None), + 'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None), + 'ratings_parser': ((DOMHTMLRatingsParser,), None), + 'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None), + 'externalrev_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'external reviews'}), + 'newsgrouprev_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'newsgroup reviews'}), + 'misclinks_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'misc links'}), + 'soundclips_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'sound clips'}), + 'videoclips_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'video clips'}), + 'photosites_parser': ((DOMHTMLOfficialsitesParser,), + {'kind': 'photo sites'}), + 'connections_parser': ((DOMHTMLConnectionParser,), None), + 'tech_parser': ((DOMHTMLTechParser,), None), + 'business_parser': ((DOMHTMLTechParser,), + {'kind': 'business', '_defGetRefs': 1}), + 'literature_parser': ((DOMHTMLTechParser,), {'kind': 'literature'}), + 'locations_parser': ((DOMHTMLLocationsParser,), None), + 'dvd_parser': ((DOMHTMLDvdParser,), None), + 'rec_parser': ((DOMHTMLRecParser,), None), + 'news_parser': ((DOMHTMLNewsParser,), None), + 'amazonrev_parser': ((DOMHTMLAmazonReviewsParser,), None), + 'sales_parser': ((DOMHTMLSalesParser,), None), + 'episodes_parser': ((DOMHTMLEpisodesParser,), None), + 'episodes_cast_parser': ((DOMHTMLEpisodesCastParser,), None), + 'eprating_parser': ((DOMHTMLEpisodesRatings,), None), + 'movie_faqs_parser': ((DOMHTMLFaqsParser,), None), + 'airing_parser': ((DOMHTMLAiringParser,), None), + 'synopsis_parser': ((DOMHTMLSynopsisParser,), None), + 'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None) +} + diff --git a/libs/imdb/parser/http/personParser.py b/libs/imdb/parser/http/personParser.py new file mode 100644 index 0000000..d35f378 --- /dev/null +++ b/libs/imdb/parser/http/personParser.py @@ -0,0 +1,509 @@ +""" +parser.http.personParser module (imdb package). + +This module provides the classes (and the instances), used to parse +the IMDb pages on the akas.imdb.com server about a person. +E.g., for "Mel Gibson" the referred pages would be: + categorized: http://akas.imdb.com/name/nm0000154/maindetails + biography: http://akas.imdb.com/name/nm0000154/bio + ...and so on... + +Copyright 2004-2010 Davide Alberani + 2008 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +from imdb.Movie import Movie +from imdb.utils import analyze_name, canonicalName, normalizeName, \ + analyze_title, date_and_notes +from utils import build_movie, DOMParserBase, Attribute, Extractor, \ + analyze_imdbid + + +from movieParser import _manageRoles +_reRoles = re.compile(r'(
  • .*? \.\.\.\. )(.*?)(
  • |
    )', + re.I | re.M | re.S) + +def build_date(date): + day = date.get('day') + year = date.get('year') + if day and year: + return "%s %s" % (day, year) + if day: + return day + if year: + return year + return "" + +class DOMHTMLMaindetailsParser(DOMParserBase): + """Parser for the "categorized" (maindetails) page of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + cparser = DOMHTMLMaindetailsParser() + result = cparser.parse(categorized_html_string) + """ + _containsObjects = True + + _birth_attrs = [Attribute(key='birth date', + path={ + 'day': "./div/a[starts-with(@href, " \ + "'/date/')]/text()", + 'year': "./div/a[starts-with(@href, " \ + "'/search/name?birth_year=')]/text()" + }, + postprocess=build_date), + Attribute(key='birth notes', + path="./div/a[starts-with(@href, " \ + "'/search/name?birth_place=')]/text()")] + _death_attrs = [Attribute(key='death date', + path={ + 'day': "./div/a[starts-with(@href, " \ + "'/date/')]/text()", + 'year': "./div/a[starts-with(@href, " \ + "'/search/name?death_date=')]/text()" + }, + postprocess=build_date), + Attribute(key='death notes', + path="./div/text()", + # TODO: check if this slicing is always correct + postprocess=lambda x: x.strip()[2:])] + _film_attrs = [Attribute(key=None, + multi=True, + path={ + 'link': "./a[1]/@href", + 'title': ".//text()", + 'status': "./i/a//text()", + 'roleID': "./div[@class='_imdbpyrole']/@roleid" + }, + postprocess=lambda x: + build_movie(x.get('title') or u'', + movieID=analyze_imdbid(x.get('link') or u''), + roleID=(x.get('roleID') or u'').split('/'), + status=x.get('status') or None))] + + extractors = [ + Extractor(label='page title', + path="//title", + attrs=Attribute(key='name', + path="./text()", + postprocess=lambda x: analyze_name(x, + canonical=1))), + + Extractor(label='birth info', + path="//div[h5='Date of Birth:']", + attrs=_birth_attrs), + + Extractor(label='death info', + path="//div[h5='Date of Death:']", + attrs=_death_attrs), + + Extractor(label='headshot', + path="//a[@name='headshot']", + attrs=Attribute(key='headshot', + path="./img/@src")), + + Extractor(label='akas', + path="//div[h5='Alternate Names:']", + attrs=Attribute(key='akas', + path="./div/text()", + postprocess=lambda x: x.strip().split(' | '))), + + Extractor(label='filmography', + group="//div[@class='filmo'][h5]", + group_key="./h5/a[@name]/text()", + group_key_normalize=lambda x: x.lower()[:-1], + path="./ol/li", + attrs=_film_attrs) + ] + preprocessors = [ + # XXX: check that this doesn't cut "status" or other info... + (re.compile(r'
    (\.\.\.| ?).+?', re.I | re.M | re.S), + ''), + (_reRoles, _manageRoles)] + + def postprocess_data(self, data): + for what in 'birth date', 'death date': + if what in data and not data[what]: + del data[what] + return data + + +class DOMHTMLBioParser(DOMParserBase): + """Parser for the "biography" page of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + bioparser = DOMHTMLBioParser() + result = bioparser.parse(biography_html_string) + """ + _defGetRefs = True + + _birth_attrs = [Attribute(key='birth date', + path={ + 'day': "./a[starts-with(@href, " \ + "'/date/')]/text()", + 'year': "./a[starts-with(@href, " \ + "'/search/name?birth_year=')]/text()" + }, + postprocess=build_date), + Attribute(key='birth notes', + path="./a[starts-with(@href, " \ + "'/search/name?birth_place=')]/text()")] + _death_attrs = [Attribute(key='death date', + path={ + 'day': "./a[starts-with(@href, " \ + "'/date/')]/text()", + 'year': "./a[starts-with(@href, " \ + "'/search/name?death_date=')]/text()" + }, + postprocess=build_date), + Attribute(key='death notes', + path="./text()", + # TODO: check if this slicing is always correct + postprocess=lambda x: u''.join(x).strip()[2:])] + extractors = [ + Extractor(label='birth info', + path="//div[h5='Date of Birth']", + attrs=_birth_attrs), + Extractor(label='death info', + path="//div[h5='Date of Death']", + attrs=_death_attrs), + Extractor(label='nick names', + path="//div[h5='Nickname']", + attrs=Attribute(key='nick names', + path="./text()", + joiner='|', + postprocess=lambda x: [n.strip().replace(' (', + '::(', 1) for n in x.split('|') + if n.strip()])), + Extractor(label='birth name', + path="//div[h5='Birth Name']", + attrs=Attribute(key='birth name', + path="./text()", + postprocess=lambda x: canonicalName(x.strip()))), + Extractor(label='height', + path="//div[h5='Height']", + attrs=Attribute(key='height', + path="./text()", + postprocess=lambda x: x.strip())), + Extractor(label='mini biography', + path="//div[h5='Mini Biography']", + attrs=Attribute(key='mini biography', + multi=True, + path={ + 'bio': "./p//text()", + 'by': "./b/following-sibling::a/text()" + }, + postprocess=lambda x: "%s::%s" % \ + (x.get('bio').strip(), + (x.get('by') or u'').strip() or u'Anonymous'))), + Extractor(label='spouse', + path="//div[h5='Spouse']/table/tr", + attrs=Attribute(key='spouse', + multi=True, + path={ + 'name': "./td[1]//text()", + 'info': "./td[2]//text()" + }, + postprocess=lambda x: ("%s::%s" % \ + (x.get('name').strip(), + (x.get('info') or u'').strip())).strip(':'))), + Extractor(label='trade mark', + path="//div[h5='Trade Mark']/p", + attrs=Attribute(key='trade mark', + multi=True, + path=".//text()", + postprocess=lambda x: x.strip())), + Extractor(label='trivia', + path="//div[h5='Trivia']/p", + attrs=Attribute(key='trivia', + multi=True, + path=".//text()", + postprocess=lambda x: x.strip())), + Extractor(label='quotes', + path="//div[h5='Personal Quotes']/p", + attrs=Attribute(key='quotes', + multi=True, + path=".//text()", + postprocess=lambda x: x.strip())), + Extractor(label='salary', + path="//div[h5='Salary']/table/tr", + attrs=Attribute(key='salary history', + multi=True, + path={ + 'title': "./td[1]//text()", + 'info': "./td[2]/text()", + }, + postprocess=lambda x: "%s::%s" % \ + (x.get('title').strip(), + x.get('info').strip()))), + Extractor(label='where now', + path="//div[h5='Where Are They Now']/p", + attrs=Attribute(key='where now', + multi=True, + path=".//text()", + postprocess=lambda x: x.strip())), + ] + + preprocessors = [ + (re.compile('(
    )', re.I), r'
    \1'), + (re.compile('(
    \n\s+)', re.I + re.DOTALL), r'\1'), + (re.compile('(

    )'), r'
    \1'), + (re.compile('\.

    ([^\s])', re.I), r'. \1') + ] + + def postprocess_data(self, data): + for what in 'birth date', 'death date': + if what in data and not data[what]: + del data[what] + return data + + +class DOMHTMLOtherWorksParser(DOMParserBase): + """Parser for the "other works" and "agent" pages of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + owparser = DOMHTMLOtherWorksParser() + result = owparser.parse(otherworks_html_string) + """ + _defGetRefs = True + kind = 'other works' + + # XXX: looks like the 'agent' page is no more public. + extractors = [ + Extractor(label='other works', + path="//h5[text()='Other works']/" \ + "following-sibling::div[1]", + attrs=Attribute(key='self.kind', + path=".//text()", + postprocess=lambda x: x.strip().split('\n\n'))) + ] + + preprocessors = [ + (re.compile('(
    [^<]+
    )', re.I), + r'\1
    '), + (re.compile('(\n
    \s+)', re.I), r'\1'), + (re.compile('(
    )'), r'
    \1'), + (re.compile('

    ', re.I), r'\n\n') + ] + + +def _build_episode(link, title, minfo, role, roleA, roleAID): + """Build an Movie object for a given episode of a series.""" + episode_id = analyze_imdbid(link) + notes = u'' + minidx = minfo.find(' -') + # Sometimes, for some unknown reason, the role is left in minfo. + if minidx != -1: + slfRole = minfo[minidx+3:].lstrip() + minfo = minfo[:minidx].rstrip() + if slfRole.endswith(')'): + commidx = slfRole.rfind('(') + if commidx != -1: + notes = slfRole[commidx:] + slfRole = slfRole[:commidx] + if slfRole and role is None and roleA is None: + role = slfRole + eps_data = analyze_title(title) + eps_data['kind'] = u'episode' + # FIXME: it's wrong for multiple characters (very rare on tv series?). + if role is None: + role = roleA # At worse, it's None. + if role is None: + roleAID = None + if roleAID is not None: + roleAID = analyze_imdbid(roleAID) + e = Movie(movieID=episode_id, data=eps_data, currentRole=role, + roleID=roleAID, notes=notes) + # XXX: are we missing some notes? + # XXX: does it parse things as "Episode dated 12 May 2005 (12 May 2005)"? + if minfo.startswith('('): + pe = minfo.find(')') + if pe != -1: + date = minfo[1:pe] + if date != '????': + e['original air date'] = date + if eps_data.get('year', '????') == '????': + syear = date.split()[-1] + if syear.isdigit(): + e['year'] = int(syear) + return e + + +class DOMHTMLSeriesParser(DOMParserBase): + """Parser for the "by TV series" page of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + sparser = DOMHTMLSeriesParser() + result = sparser.parse(filmoseries_html_string) + """ + _containsObjects = True + + extractors = [ + Extractor(label='series', + group="//div[@class='filmo']/span[1]", + group_key="./a[1]", + path="./following-sibling::ol[1]/li/a[1]", + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./@href", + 'title': "./text()", + 'info': "./following-sibling::text()", + 'role': "./following-sibling::i[1]/text()", + 'roleA': "./following-sibling::a[1]/text()", + 'roleAID': "./following-sibling::a[1]/@href" + }, + postprocess=lambda x: _build_episode(x.get('link'), + x.get('title'), + (x.get('info') or u'').strip(), + x.get('role'), + x.get('roleA'), + x.get('roleAID')))) + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + nd = {} + for key in data.keys(): + dom = self.get_dom(key) + link = self.xpath(dom, "//a/@href")[0] + title = self.xpath(dom, "//a/text()")[0][1:-1] + series = Movie(movieID=analyze_imdbid(link), + data=analyze_title(title), + accessSystem=self._as, modFunct=self._modFunct) + nd[series] = [] + for episode in data[key]: + # XXX: should we create a copy of 'series', to avoid + # circular references? + episode['episode of'] = series + nd[series].append(episode) + return {'episodes': nd} + + +class DOMHTMLPersonGenresParser(DOMParserBase): + """Parser for the "by genre" and "by keywords" pages of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + gparser = DOMHTMLPersonGenresParser() + result = gparser.parse(bygenre_html_string) + """ + kind = 'genres' + _containsObjects = True + + extractors = [ + Extractor(label='genres', + group="//b/a[@name]/following-sibling::a[1]", + group_key="./text()", + group_key_normalize=lambda x: x.lower(), + path="../../following-sibling::ol[1]/li//a[1]", + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./@href", + 'title': "./text()", + 'info': "./following-sibling::text()" + }, + postprocess=lambda x: \ + build_movie(x.get('title') + \ + x.get('info').split('[')[0], + analyze_imdbid(x.get('link'))))) + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + return {self.kind: data} + + +from movieParser import _parse_merchandising_link + +class DOMHTMLPersonSalesParser(DOMParserBase): + """Parser for the "merchandising links" page of a given person. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + sparser = DOMHTMLPersonSalesParser() + result = sparser.parse(sales_html_string) + """ + extractors = [ + Extractor(label='merchandising links', + group="//span[@class='merch_title']", + group_key=".//text()", + path="./following-sibling::table[1]/" \ + "/td[@class='w_rowtable_colshop']//tr[1]", + attrs=Attribute(key=None, + multi=True, + path={ + 'link': "./td[2]/a[1]/@href", + 'text': "./td[1]/img[1]/@alt", + 'cover': "./ancestor::td[1]/../" \ + "td[1]/a[1]/img[1]/@src", + }, + postprocess=_parse_merchandising_link)), + ] + + preprocessors = [ + (re.compile('(', re.I), r'\1>') + ] + + def postprocess_data(self, data): + if len(data) == 0: + return {} + return {'merchandising links': data} + + +from movieParser import DOMHTMLTechParser +from movieParser import DOMHTMLOfficialsitesParser +from movieParser import DOMHTMLAwardsParser +from movieParser import DOMHTMLNewsParser + + +_OBJECTS = { + 'maindetails_parser': ((DOMHTMLMaindetailsParser,), None), + 'bio_parser': ((DOMHTMLBioParser,), None), + 'otherworks_parser': ((DOMHTMLOtherWorksParser,), None), + #'agent_parser': ((DOMHTMLOtherWorksParser,), {'kind': 'agent'}), + 'person_officialsites_parser': ((DOMHTMLOfficialsitesParser,), None), + 'person_awards_parser': ((DOMHTMLAwardsParser,), {'subject': 'name'}), + 'publicity_parser': ((DOMHTMLTechParser,), {'kind': 'publicity'}), + 'person_series_parser': ((DOMHTMLSeriesParser,), None), + 'person_contacts_parser': ((DOMHTMLTechParser,), {'kind': 'contacts'}), + 'person_genres_parser': ((DOMHTMLPersonGenresParser,), None), + 'person_keywords_parser': ((DOMHTMLPersonGenresParser,), + {'kind': 'keywords'}), + 'news_parser': ((DOMHTMLNewsParser,), None), + 'sales_parser': ((DOMHTMLPersonSalesParser,), None) +} + diff --git a/libs/imdb/parser/http/searchCharacterParser.py b/libs/imdb/parser/http/searchCharacterParser.py new file mode 100644 index 0000000..c81ca7e --- /dev/null +++ b/libs/imdb/parser/http/searchCharacterParser.py @@ -0,0 +1,69 @@ +""" +parser.http.searchCharacterParser module (imdb package). + +This module provides the HTMLSearchCharacterParser class (and the +search_character_parser instance), used to parse the results of a search +for a given character. +E.g., when searching for the name "Jesse James", the parsed page would be: + http://akas.imdb.com/find?s=Characters;mx=20;q=Jesse+James + +Copyright 2007-2009 Davide Alberani + 2008 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from imdb.utils import analyze_name, build_name +from utils import Extractor, Attribute, analyze_imdbid + +from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser + + +class DOMBasicCharacterParser(DOMBasicMovieParser): + """Simply get the name of a character and the imdbID. + + It's used by the DOMHTMLSearchCharacterParser class to return a result + for a direct match (when a search on IMDb results in a single + character, the web server sends directly the movie page.""" + _titleFunct = lambda self, x: analyze_name(x or u'', canonical=False) + + +class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser): + _BaseParser = DOMBasicCharacterParser + _notDirectHitTitle = 'imdb search' + _titleBuilder = lambda self, x: build_name(x, canonical=False) + _linkPrefix = '/character/ch' + + _attrs = [Attribute(key='data', + multi=True, + path={ + 'link': "./a[1]/@href", + 'name': "./a[1]/text()" + }, + postprocess=lambda x: ( + analyze_imdbid(x.get('link') or u''), + {'name': x.get('name')} + ))] + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, " \ + "'/character/ch')]/..", + attrs=_attrs)] + + +_OBJECTS = { + 'search_character_parser': ((DOMHTMLSearchCharacterParser,), + {'kind': 'character', '_basic_parser': DOMBasicCharacterParser}) +} + diff --git a/libs/imdb/parser/http/searchCompanyParser.py b/libs/imdb/parser/http/searchCompanyParser.py new file mode 100644 index 0000000..ab666fb --- /dev/null +++ b/libs/imdb/parser/http/searchCompanyParser.py @@ -0,0 +1,71 @@ +""" +parser.http.searchCompanyParser module (imdb package). + +This module provides the HTMLSearchCompanyParser class (and the +search_company_parser instance), used to parse the results of a search +for a given company. +E.g., when searching for the name "Columbia Pictures", the parsed page would be: + http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures + +Copyright 2008-2009 Davide Alberani <da@erlug.linux.it> + 2008 H. Turgut Uyar <uyar@tekir.org> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from imdb.utils import analyze_company_name, build_company_name +from utils import Extractor, Attribute, analyze_imdbid + +from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser + +class DOMBasicCompanyParser(DOMBasicMovieParser): + """Simply get the name of a company and the imdbID. + + It's used by the DOMHTMLSearchCompanyParser class to return a result + for a direct match (when a search on IMDb results in a single + company, the web server sends directly the company page. + """ + _titleFunct = lambda self, x: analyze_company_name(x or u'') + + +class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser): + _BaseParser = DOMBasicCompanyParser + _notDirectHitTitle = '<title>imdb company' + _titleBuilder = lambda self, x: build_company_name(x) + _linkPrefix = '/company/co' + + _attrs = [Attribute(key='data', + multi=True, + path={ + 'link': "./a[1]/@href", + 'name': "./a[1]/text()", + 'notes': "./text()[1]" + }, + postprocess=lambda x: ( + analyze_imdbid(x.get('link')), + analyze_company_name(x.get('name')+(x.get('notes') + or u''), stripNotes=True) + ))] + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, " \ + "'/company/co')]/..", + attrs=_attrs)] + + +_OBJECTS = { + 'search_company_parser': ((DOMHTMLSearchCompanyParser,), + {'kind': 'company', '_basic_parser': DOMBasicCompanyParser}) +} + diff --git a/libs/imdb/parser/http/searchKeywordParser.py b/libs/imdb/parser/http/searchKeywordParser.py new file mode 100644 index 0000000..ed72906 --- /dev/null +++ b/libs/imdb/parser/http/searchKeywordParser.py @@ -0,0 +1,111 @@ +""" +parser.http.searchKeywordParser module (imdb package). + +This module provides the HTMLSearchKeywordParser class (and the +search_company_parser instance), used to parse the results of a search +for a given keyword. +E.g., when searching for the keyword "alabama", the parsed page would be: + http://akas.imdb.com/find?s=kw;mx=20;q=alabama + +Copyright 2009 Davide Alberani <da@erlug.linux.it> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from utils import Extractor, Attribute, analyze_imdbid +from imdb.utils import analyze_title, analyze_company_name + +from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser + +class DOMBasicKeywordParser(DOMBasicMovieParser): + """Simply get the name of a keyword. + + It's used by the DOMHTMLSearchKeywordParser class to return a result + for a direct match (when a search on IMDb results in a single + keyword, the web server sends directly the keyword page. + """ + # XXX: it's still to be tested! + # I'm not even sure there can be a direct hit, searching for keywords. + _titleFunct = lambda self, x: analyze_company_name(x or u'') + + +class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser): + """Parse the html page that the IMDb web server shows when the + "new search system" is used, searching for keywords similar to + the one given.""" + + _BaseParser = DOMBasicKeywordParser + _notDirectHitTitle = '<title>imdb keyword' + _titleBuilder = lambda self, x: x + _linkPrefix = '/keyword/' + + _attrs = [Attribute(key='data', + multi=True, + path="./a[1]/text()" + )] + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, " \ + "'/keyword/')]/..", + attrs=_attrs)] + + +def custom_analyze_title4kwd(title, yearNote, outline): + """Return a dictionary with the needed info.""" + title = title.strip() + if not title: + return {} + if yearNote: + yearNote = '%s)' % yearNote.split(' ')[0] + title = title + ' ' + yearNote + retDict = analyze_title(title) + if outline: + retDict['plot outline'] = outline + return retDict + + +class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser): + """Parse the html page that the IMDb web server shows when the + "new search system" is used, searching for movies with the given + keyword.""" + + _notDirectHitTitle = '<title>best' + + _attrs = [Attribute(key='data', + multi=True, + path={ + 'link': "./a[1]/@href", + 'info': "./a[1]//text()", + 'ynote': "./span[@class='desc']/text()", + 'outline': "./span[@class='outline']//text()" + }, + postprocess=lambda x: ( + analyze_imdbid(x.get('link') or u''), + custom_analyze_title4kwd(x.get('info') or u'', + x.get('ynote') or u'', + x.get('outline') or u'') + ))] + + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, " \ + "'/title/tt')]/..", + attrs=_attrs)] + + +_OBJECTS = { + 'search_keyword_parser': ((DOMHTMLSearchKeywordParser,), + {'kind': 'keyword', '_basic_parser': DOMBasicKeywordParser}), + 'search_moviekeyword_parser': ((DOMHTMLSearchMovieKeywordParser,), None) +} + diff --git a/libs/imdb/parser/http/searchMovieParser.py b/libs/imdb/parser/http/searchMovieParser.py new file mode 100644 index 0000000..2e7ace9 --- /dev/null +++ b/libs/imdb/parser/http/searchMovieParser.py @@ -0,0 +1,178 @@ +""" +parser.http.searchMovieParser module (imdb package). + +This module provides the HTMLSearchMovieParser class (and the +search_movie_parser instance), used to parse the results of a search +for a given title. +E.g., for when searching for the title "the passion", the parsed +page would be: + http://akas.imdb.com/find?q=the+passion&tt=on&mx=20 + +Copyright 2004-2010 Davide Alberani <da@erlug.linux.it> + 2008 H. Turgut Uyar <uyar@tekir.org> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +from imdb.utils import analyze_title, build_title +from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid + + +class DOMBasicMovieParser(DOMParserBase): + """Simply get the title of a movie and the imdbID. + + It's used by the DOMHTMLSearchMovieParser class to return a result + for a direct match (when a search on IMDb results in a single + movie, the web server sends directly the movie page.""" + # Stay generic enough to be used also for other DOMBasic*Parser classes. + _titleAttrPath = ".//text()" + _linkPath = "//link[@rel='canonical']" + _titleFunct = lambda self, x: analyze_title(x or u'') + + def _init(self): + self.preprocessors += [('<span class="tv-extra">TV mini-series</span>', + '<span class="tv-extra">(mini)</span>')] + self.extractors = [Extractor(label='title', + path="//h1", + attrs=Attribute(key='title', + path=self._titleAttrPath, + postprocess=self._titleFunct)), + Extractor(label='link', + path=self._linkPath, + attrs=Attribute(key='link', path="./@href", + postprocess=lambda x: \ + analyze_imdbid((x or u'').replace( + 'http://pro.imdb.com', '')) + ))] + + # Remove 'More at IMDb Pro' links. + preprocessors = [(re.compile(r'<span class="pro-link".*?</span>'), ''), + (re.compile(r'<a href="http://ad.doubleclick.net.*?;id=(co[0-9]{7});'), r'<a href="http://pro.imdb.com/company/\1"></a>< a href="')] + + def postprocess_data(self, data): + if not 'link' in data: + data = [] + else: + link = data.pop('link') + if (link and data): + data = [(link, data)] + else: + data = [] + return data + + +def custom_analyze_title(title): + """Remove garbage notes after the (year), (year/imdbIndex) or (year) (TV)""" + # XXX: very crappy. :-( + nt = title.split(' ')[0] + if nt: + title = nt + if not title: + return {} + return analyze_title(title) + +# Manage AKAs. +_reAKAStitles = re.compile(r'(?:aka) <em>"(.*?)(<br>|<\/td>)', re.I | re.M) + +class DOMHTMLSearchMovieParser(DOMParserBase): + """Parse the html page that the IMDb web server shows when the + "new search system" is used, for movies.""" + + _BaseParser = DOMBasicMovieParser + _notDirectHitTitle = '<title>imdb title' + _titleBuilder = lambda self, x: build_title(x) + _linkPrefix = '/title/tt' + + _attrs = [Attribute(key='data', + multi=True, + path={ + 'link': "./a[1]/@href", + 'info': ".//text()", + #'akas': ".//div[@class='_imdbpyAKA']//text()" + 'akas': ".//p[@class='find-aka']//text()" + }, + postprocess=lambda x: ( + analyze_imdbid(x.get('link') or u''), + custom_analyze_title(x.get('info') or u''), + x.get('akas') + ))] + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, '/title/tt')]/..", + attrs=_attrs)] + def _init(self): + self.url = u'' + + def _reset(self): + self.url = u'' + + def preprocess_string(self, html_string): + if self._notDirectHitTitle in html_string[:1024].lower(): + if self._linkPrefix == '/title/tt': + # Only for movies. + html_string = html_string.replace('(TV mini-series)', '(mini)') + html_string = html_string.replace('<p class="find-aka">', + '<p class="find-aka">::') + #html_string = _reAKAStitles.sub( + # r'<div class="_imdbpyAKA">\1::</div>\2', html_string) + return html_string + # Direct hit! + dbme = self._BaseParser(useModule=self._useModule) + res = dbme.parse(html_string, url=self.url) + if not res: return u'' + res = res['data'] + if not (res and res[0]): return u'' + link = '%s%s' % (self._linkPrefix, res[0][0]) + # # Tries to cope with companies for which links to pro.imdb.com + # # are missing. + # link = self.url.replace(imdbURL_base[:-1], '') + title = self._titleBuilder(res[0][1]) + if not (link and title): return u'' + link = link.replace('http://pro.imdb.com', '') + new_html = '<td></td><td></td><td><a href="%s">%s</a></td>' % (link, + title) + return new_html + + def postprocess_data(self, data): + if not data.has_key('data'): + data['data'] = [] + results = getattr(self, 'results', None) + if results is not None: + data['data'][:] = data['data'][:results] + # Horrible hack to support AKAs. + if data and data['data'] and len(data['data'][0]) == 3 and \ + isinstance(data['data'][0], tuple): + for idx, datum in enumerate(data['data']): + if not isinstance(datum, tuple): + continue + if datum[2] is not None: + akas = filter(None, datum[2].split('::')) + if self._linkPrefix == '/title/tt': + akas = [a.replace('" - ', '::').rstrip() for a in akas] + akas = [a.replace('aka "', '', 1).lstrip() for a in akas] + datum[1]['akas'] = akas + data['data'][idx] = (datum[0], datum[1]) + else: + data['data'][idx] = (datum[0], datum[1]) + return data + + def add_refs(self, data): + return data + + +_OBJECTS = { + 'search_movie_parser': ((DOMHTMLSearchMovieParser,), None) +} + diff --git a/libs/imdb/parser/http/searchPersonParser.py b/libs/imdb/parser/http/searchPersonParser.py new file mode 100644 index 0000000..1756efc --- /dev/null +++ b/libs/imdb/parser/http/searchPersonParser.py @@ -0,0 +1,92 @@ +""" +parser.http.searchPersonParser module (imdb package). + +This module provides the HTMLSearchPersonParser class (and the +search_person_parser instance), used to parse the results of a search +for a given person. +E.g., when searching for the name "Mel Gibson", the parsed page would be: + http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20 + +Copyright 2004-2010 Davide Alberani <da@erlug.linux.it> + 2008 H. Turgut Uyar <uyar@tekir.org> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +from imdb.utils import analyze_name, build_name +from utils import Extractor, Attribute, analyze_imdbid + +from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser + + +def _cleanName(n): + """Clean the name in a title tag.""" + if not n: + return u'' + n = n.replace('Filmography by type for', '') # FIXME: temporary. + return n + +class DOMBasicPersonParser(DOMBasicMovieParser): + """Simply get the name of a person and the imdbID. + + It's used by the DOMHTMLSearchPersonParser class to return a result + for a direct match (when a search on IMDb results in a single + person, the web server sends directly the movie page.""" + _titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1) + + +_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)', + re.I | re.M) + +class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser): + """Parse the html page that the IMDb web server shows when the + "new search system" is used, for persons.""" + _BaseParser = DOMBasicPersonParser + _notDirectHitTitle = '<title>imdb name' + _titleBuilder = lambda self, x: build_name(x, canonical=True) + _linkPrefix = '/name/nm' + + _attrs = [Attribute(key='data', + multi=True, + path={ + 'link': "./a[1]/@href", + 'name': "./a[1]/text()", + 'index': "./text()[1]", + 'akas': ".//div[@class='_imdbpyAKA']/text()" + }, + postprocess=lambda x: ( + analyze_imdbid(x.get('link') or u''), + analyze_name((x.get('name') or u'') + \ + (x.get('index') or u''), + canonical=1), x.get('akas') + ))] + extractors = [Extractor(label='search', + path="//td[3]/a[starts-with(@href, '/name/nm')]/..", + attrs=_attrs)] + + def preprocess_string(self, html_string): + if self._notDirectHitTitle in html_string[:1024].lower(): + html_string = _reAKASp.sub( + r'\1<div class="_imdbpyAKA">\2::</div>\3', + html_string) + return DOMHTMLSearchMovieParser.preprocess_string(self, html_string) + + +_OBJECTS = { + 'search_person_parser': ((DOMHTMLSearchPersonParser,), + {'kind': 'person', '_basic_parser': DOMBasicPersonParser}) +} + diff --git a/libs/imdb/parser/http/topBottomParser.py b/libs/imdb/parser/http/topBottomParser.py new file mode 100644 index 0000000..f0f2950 --- /dev/null +++ b/libs/imdb/parser/http/topBottomParser.py @@ -0,0 +1,106 @@ +""" +parser.http.topBottomParser module (imdb package). + +This module provides the classes (and the instances), used to parse the +lists of top 250 and bottom 100 movies. +E.g.: + http://akas.imdb.com/chart/top + http://akas.imdb.com/chart/bottom + +Copyright 2009 Davide Alberani <da@erlug.linux.it> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from imdb.utils import analyze_title +from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid + + +class DOMHTMLTop250Parser(DOMParserBase): + """Parser for the "top 250" page. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + tparser = DOMHTMLTop250Parser() + result = tparser.parse(top250_html_string) + """ + label = 'top 250' + ranktext = 'top 250 rank' + + def _init(self): + self.extractors = [Extractor(label=self.label, + path="//div[@id='main']//table//tr", + attrs=Attribute(key=None, + multi=True, + path={self.ranktext: "./td[1]//text()", + 'rating': "./td[2]//text()", + 'title': "./td[3]//text()", + 'movieID': "./td[3]//a/@href", + 'votes': "./td[4]//text()" + }))] + + def postprocess_data(self, data): + if not data or self.label not in data: + return [] + mlist = [] + data = data[self.label] + # Avoid duplicates. A real fix, using XPath, is auspicabile. + # XXX: probably this is no more needed. + seenIDs = [] + for d in data: + if 'movieID' not in d: continue + if self.ranktext not in d: continue + if 'title' not in d: continue + theID = analyze_imdbid(d['movieID']) + if theID is None: + continue + theID = str(theID) + if theID in seenIDs: + continue + seenIDs.append(theID) + minfo = analyze_title(d['title']) + try: minfo[self.ranktext] = int(d[self.ranktext].replace('.', '')) + except: pass + if 'votes' in d: + try: minfo['votes'] = int(d['votes'].replace(',', '')) + except: pass + if 'rating' in d: + try: minfo['rating'] = float(d['rating']) + except: pass + mlist.append((theID, minfo)) + return mlist + + +class DOMHTMLBottom100Parser(DOMHTMLTop250Parser): + """Parser for the "bottom 100" page. + The page should be provided as a string, as taken from + the akas.imdb.com server. The final result will be a + dictionary, with a key for every relevant section. + + Example: + tparser = DOMHTMLBottom100Parser() + result = tparser.parse(bottom100_html_string) + """ + label = 'bottom 100' + ranktext = 'bottom 100 rank' + + +_OBJECTS = { + 'top250_parser': ((DOMHTMLTop250Parser,), None), + 'bottom100_parser': ((DOMHTMLBottom100Parser,), None) +} + diff --git a/libs/imdb/parser/http/utils.py b/libs/imdb/parser/http/utils.py new file mode 100644 index 0000000..2e6c911 --- /dev/null +++ b/libs/imdb/parser/http/utils.py @@ -0,0 +1,817 @@ +""" +parser.http.utils module (imdb package). + +This module provides miscellaneous utilities used by +the imdb.parser.http classes. + +Copyright 2004-2010 Davide Alberani <da@erlug.linux.it> + 2008 H. Turgut Uyar <uyar@tekir.org> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +import logging + +from imdb._exceptions import IMDbError + +from imdb.utils import flatten, _Container +from imdb.Movie import Movie +from imdb.Person import Person +from imdb.Character import Character + + +# Year, imdbIndex and kind. +re_yearKind_index = re.compile(r'(\([0-9\?]{4}(?:/[IVXLCDM]+)?\)(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)') + +# Match imdb ids in href tags +re_imdbid = re.compile(r'(title/tt|name/nm|character/ch|company/co)([0-9]+)') + +def analyze_imdbid(href): + """Return an imdbID from an URL.""" + if not href: + return None + match = re_imdbid.search(href) + if not match: + return None + return str(match.group(2)) + + +_modify_keys = list(Movie.keys_tomodify_list) + list(Person.keys_tomodify_list) +def _putRefs(d, re_titles, re_names, re_characters, lastKey=None): + """Iterate over the strings inside list items or dictionary values, + substitutes movie titles and person names with the (qv) references.""" + if isinstance(d, list): + for i in xrange(len(d)): + if isinstance(d[i], (unicode, str)): + if lastKey in _modify_keys: + if re_names: + d[i] = re_names.sub(ur"'\1' (qv)", d[i]) + if re_titles: + d[i] = re_titles.sub(ur'_\1_ (qv)', d[i]) + if re_characters: + d[i] = re_characters.sub(ur'#\1# (qv)', d[i]) + elif isinstance(d[i], (list, dict)): + _putRefs(d[i], re_titles, re_names, re_characters, + lastKey=lastKey) + elif isinstance(d, dict): + for k, v in d.items(): + lastKey = k + if isinstance(v, (unicode, str)): + if lastKey in _modify_keys: + if re_names: + d[k] = re_names.sub(ur"'\1' (qv)", v) + if re_titles: + d[k] = re_titles.sub(ur'_\1_ (qv)', v) + if re_characters: + d[k] = re_characters.sub(ur'#\1# (qv)', v) + elif isinstance(v, (list, dict)): + _putRefs(d[k], re_titles, re_names, re_characters, + lastKey=lastKey) + + +# Handle HTML/XML/SGML entities. +from htmlentitydefs import entitydefs +entitydefs = entitydefs.copy() +entitydefsget = entitydefs.get +entitydefs['nbsp'] = ' ' + +sgmlentity = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''} +sgmlentityget = sgmlentity.get +_sgmlentkeys = sgmlentity.keys() + +entcharrefs = {} +entcharrefsget = entcharrefs.get +for _k, _v in entitydefs.items(): + if _k in _sgmlentkeys: continue + if _v[0:2] == '&#': + dec_code = _v[1:-1] + _v = unichr(int(_v[2:-1])) + entcharrefs[dec_code] = _v + else: + dec_code = '#' + str(ord(_v)) + _v = unicode(_v, 'latin_1', 'replace') + entcharrefs[dec_code] = _v + entcharrefs[_k] = _v +del _sgmlentkeys, _k, _v +entcharrefs['#160'] = u' ' +entcharrefs['#xA0'] = u' ' +entcharrefs['#xa0'] = u' ' +entcharrefs['#XA0'] = u' ' +entcharrefs['#x22'] = u'"' +entcharrefs['#X22'] = u'"' +# convert &x26; to &, to make BeautifulSoup happy; beware that this +# leaves lone '&' in the html broken, but I assume this is better than +# the contrary... +entcharrefs['#38'] = u'&' +entcharrefs['#x26'] = u'&' +entcharrefs['#x26'] = u'&' + +re_entcharrefs = re.compile('&(%s|\#160|\#\d{1,5}|\#x[0-9a-f]{1,4});' % + '|'.join(map(re.escape, entcharrefs)), re.I) +re_entcharrefssub = re_entcharrefs.sub + +sgmlentity.update(dict([('#34', u'"'), ('#38', u'&'), + ('#60', u'<'), ('#62', u'>'), ('#39', u"'")])) +re_sgmlref = re.compile('&(%s);' % '|'.join(map(re.escape, sgmlentity))) +re_sgmlrefsub = re_sgmlref.sub + +# Matches XML-only single tags, like <br/> ; they are invalid in HTML, +# but widely used by IMDb web site. :-/ +re_xmltags = re.compile('<([a-zA-Z]+)/>') + + +def _replXMLRef(match): + """Replace the matched XML/HTML entities and references; + replace everything except sgml entities like <, >, ...""" + ref = match.group(1) + value = entcharrefsget(ref) + if value is None: + if ref[0] == '#': + ref_code = ref[1:] + if ref_code in ('34', '38', '60', '62', '39'): + return match.group(0) + elif ref_code[0].lower() == 'x': + #if ref[2:] == '26': + # # Don't convert &x26; to &, to make BeautifulSoup happy. + # return '&' + return unichr(int(ref[2:], 16)) + else: + return unichr(int(ref[1:])) + else: + return ref + return value + +def subXMLRefs(s): + """Return the given html string with entity and char references + replaced.""" + return re_entcharrefssub(_replXMLRef, s) + +# XXX: no more used here; move it to mobile (they are imported by helpers, too)? +def _replSGMLRefs(match): + """Replace the matched SGML entity.""" + ref = match.group(1) + return sgmlentityget(ref, ref) + +def subSGMLRefs(s): + """Return the given html string with sgml entity and char references + replaced.""" + return re_sgmlrefsub(_replSGMLRefs, s) + + +_b_p_logger = logging.getLogger('imdbpy.parser.http.build_person') +def build_person(txt, personID=None, billingPos=None, + roleID=None, accessSystem='http', modFunct=None): + """Return a Person instance from the tipical <tr>...</tr> strings + found in the IMDb's web site.""" + #if personID is None + # _b_p_logger.debug('empty name or personID for "%s"', txt) + notes = u'' + role = u'' + # Search the (optional) separator between name and role/notes. + if txt.find('....') != -1: + sep = '....' + elif txt.find('...') != -1: + sep = '...' + else: + sep = '...' + # Replace the first parenthesis, assuming there are only + # notes, after. + # Rationale: no imdbIndex is (ever?) showed on the web site. + txt = txt.replace('(', '...(', 1) + txt_split = txt.split(sep, 1) + name = txt_split[0].strip() + if len(txt_split) == 2: + role_comment = txt_split[1].strip() + # Strip common endings. + if role_comment[-4:] == ' and': + role_comment = role_comment[:-4].rstrip() + elif role_comment[-2:] == ' &': + role_comment = role_comment[:-2].rstrip() + elif role_comment[-6:] == '& ....': + role_comment = role_comment[:-6].rstrip() + # Get the notes. + if roleID is not None: + if not isinstance(roleID, list): + cmt_idx = role_comment.find('(') + if cmt_idx != -1: + role = role_comment[:cmt_idx].rstrip() + notes = role_comment[cmt_idx:] + else: + # Just a role, without notes. + role = role_comment + else: + role = role_comment + else: + # We're managing something that doesn't have a 'role', so + # everything are notes. + notes = role_comment + if role == '....': role = u'' + roleNotes = [] + # Manages multiple roleIDs. + if isinstance(roleID, list): + rolesplit = role.split('/') + role = [] + for r in rolesplit: + nidx = r.find('(') + if nidx != -1: + role.append(r[:nidx].rstrip()) + roleNotes.append(r[nidx:]) + else: + role.append(r) + roleNotes.append(None) + lr = len(role) + lrid = len(roleID) + if lr > lrid: + roleID += [None] * (lrid - lr) + elif lr < lrid: + roleID = roleID[:lr] + for i, rid in enumerate(roleID): + if rid is not None: + roleID[i] = str(rid) + if lr == 1: + role = role[0] + roleID = roleID[0] + elif roleID is not None: + roleID = str(roleID) + if personID is not None: + personID = str(personID) + if (not name) or (personID is None): + # Set to 'debug', since build_person is expected to receive some crap. + _b_p_logger.debug('empty name or personID for "%s"', txt) + # XXX: return None if something strange is detected? + person = Person(name=name, personID=personID, currentRole=role, + roleID=roleID, notes=notes, billingPos=billingPos, + modFunct=modFunct, accessSystem=accessSystem) + if roleNotes and len(roleNotes) == len(roleID): + for idx, role in enumerate(person.currentRole): + if roleNotes[idx]: + role.notes = roleNotes[idx] + return person + + +_b_m_logger = logging.getLogger('imdbpy.parser.http.build_movie') +# To shrink spaces. +re_spaces = re.compile(r'\s+') +def build_movie(txt, movieID=None, roleID=None, status=None, + accessSystem='http', modFunct=None, _parsingCharacter=False, + _parsingCompany=False): + """Given a string as normally seen on the "categorized" page of + a person on the IMDb's web site, returns a Movie instance.""" + if _parsingCharacter: + _defSep = ' Played by ' + elif _parsingCompany: + _defSep = ' ... ' + else: + _defSep = ' .... ' + title = re_spaces.sub(' ', txt).strip() + # Split the role/notes from the movie title. + tsplit = title.split(_defSep, 1) + role = u'' + notes = u'' + roleNotes = [] + if len(tsplit) == 2: + title = tsplit[0].rstrip() + role = tsplit[1].lstrip() + if title[-9:] == 'TV Series': + title = title[:-9].rstrip() + elif title[-14:] == 'TV mini-series': + title = title[:-14] + ' (mini)' + # Try to understand where the movie title ends. + while True: + if title[-1:] != ')': + # Ignore the silly "TV Series" notice. + if title[-9:] == 'TV Series': + title = title[:-9].rstrip() + continue + else: + # Just a title: stop here. + break + # Try to match paired parentheses; yes: sometimes there are + # parentheses inside comments... + nidx = title.rfind('(') + while (nidx != -1 and \ + title[nidx:].count('(') != title[nidx:].count(')')): + nidx = title[:nidx].rfind('(') + # Unbalanced parentheses: stop here. + if nidx == -1: break + # The last item in parentheses seems to be a year: stop here. + first4 = title[nidx+1:nidx+5] + if (first4.isdigit() or first4 == '????') and \ + title[nidx+5:nidx+6] in (')', '/'): break + # The last item in parentheses is a known kind: stop here. + if title[nidx+1:-1] in ('TV', 'V', 'mini', 'VG'): break + # Else, in parentheses there are some notes. + # XXX: should the notes in the role half be kept separated + # from the notes in the movie title half? + if notes: notes = '%s %s' % (title[nidx:], notes) + else: notes = title[nidx:] + title = title[:nidx].rstrip() + if _parsingCharacter and roleID and not role: + roleID = None + if not roleID: + roleID = None + elif len(roleID) == 1: + roleID = roleID[0] + # Manages multiple roleIDs. + if isinstance(roleID, list): + tmprole = role.split('/') + role = [] + for r in tmprole: + nidx = r.find('(') + if nidx != -1: + role.append(r[:nidx].rstrip()) + roleNotes.append(r[nidx:]) + else: + role.append(r) + roleNotes.append(None) + lr = len(role) + lrid = len(roleID) + if lr > lrid: + roleID += [None] * (lrid - lr) + elif lr < lrid: + roleID = roleID[:lr] + for i, rid in enumerate(roleID): + if rid is not None: + roleID[i] = str(rid) + if lr == 1: + role = role[0] + roleID = roleID[0] + elif roleID is not None: + roleID = str(roleID) + if movieID is not None: + movieID = str(movieID) + if (not title) or (movieID is None): + _b_m_logger.error('empty title or movieID for "%s"', txt) + m = Movie(title=title, movieID=movieID, notes=notes, currentRole=role, + roleID=roleID, roleIsPerson=_parsingCharacter, + modFunct=modFunct, accessSystem=accessSystem) + if roleNotes and len(roleNotes) == len(roleID): + for idx, role in enumerate(m.currentRole): + if roleNotes[idx]: + role.notes = roleNotes[idx] + # Status can't be checked here, and must be detected by the parser. + if status: + m['status'] = status + return m + + +class DOMParserBase(object): + """Base parser to handle HTML data from the IMDb's web server.""" + _defGetRefs = False + _containsObjects = False + + preprocessors = [] + extractors = [] + usingModule = None + + _logger = logging.getLogger('imdbpy.parser.http.domparser') + + def __init__(self, useModule=None): + """Initialize the parser. useModule can be used to force it + to use 'BeautifulSoup' or 'lxml'; by default, it's auto-detected, + using 'lxml' if available and falling back to 'BeautifulSoup' + otherwise.""" + # Module to use. + if useModule is None: + useModule = ('lxml', 'BeautifulSoup') + if not isinstance(useModule, (tuple, list)): + useModule = [useModule] + self._useModule = useModule + nrMods = len(useModule) + _gotError = False + for idx, mod in enumerate(useModule): + mod = mod.strip().lower() + try: + if mod == 'lxml': + from lxml.html import fromstring + from lxml.etree import tostring + self._is_xml_unicode = False + self.usingModule = 'lxml' + elif mod == 'beautifulsoup': + from bsouplxml.html import fromstring + from bsouplxml.etree import tostring + self._is_xml_unicode = True + self.usingModule = 'beautifulsoup' + else: + self._logger.warn('unknown module "%s"' % mod) + continue + self.fromstring = fromstring + self._tostring = tostring + if _gotError: + self._logger.warn('falling back to "%s"' % mod) + break + except ImportError, e: + if idx+1 >= nrMods: + # Raise the exception, if we don't have any more + # options to try. + raise IMDbError, 'unable to use any parser in %s: %s' % \ + (str(useModule), str(e)) + else: + self._logger.warn('unable to use "%s": %s' % (mod, str(e))) + _gotError = True + continue + else: + raise IMDbError, 'unable to use parsers in %s' % str(useModule) + # Fall-back defaults. + self._modFunct = None + self._as = 'http' + self._cname = self.__class__.__name__ + self._init() + self.reset() + + def reset(self): + """Reset the parser.""" + # Names and titles references. + self._namesRefs = {} + self._titlesRefs = {} + self._charactersRefs = {} + self._reset() + + def _init(self): + """Subclasses can override this method, if needed.""" + pass + + def _reset(self): + """Subclasses can override this method, if needed.""" + pass + + def parse(self, html_string, getRefs=None, **kwds): + """Return the dictionary generated from the given html string; + getRefs can be used to force the gathering of movies/persons/characters + references.""" + self.reset() + if getRefs is not None: + self.getRefs = getRefs + else: + self.getRefs = self._defGetRefs + # Useful only for the testsuite. + if not isinstance(html_string, unicode): + html_string = unicode(html_string, 'latin_1', 'replace') + html_string = subXMLRefs(html_string) + # Temporary fix: self.parse_dom must work even for empty strings. + html_string = self.preprocess_string(html_string) + html_string = html_string.strip() + # tag attributes like title=""Family Guy"" will be + # converted to title=""Family Guy"" and this confuses BeautifulSoup. + if self.usingModule == 'beautifulsoup': + html_string = html_string.replace('""', '"') + if html_string: + dom = self.get_dom(html_string) + try: + dom = self.preprocess_dom(dom) + except Exception, e: + self._logger.error('%s: caught exception preprocessing DOM', + self._cname, exc_info=True) + if self.getRefs: + try: + self.gather_refs(dom) + except Exception, e: + self._logger.warn('%s: unable to gather refs: %s', + self._cname, exc_info=True) + data = self.parse_dom(dom) + else: + data = {} + try: + data = self.postprocess_data(data) + except Exception, e: + self._logger.error('%s: caught exception postprocessing data', + self._cname, exc_info=True) + if self._containsObjects: + self.set_objects_params(data) + data = self.add_refs(data) + return data + + def _build_empty_dom(self): + from bsouplxml import _bsoup + return _bsoup.BeautifulSoup('') + + def get_dom(self, html_string): + """Return a dom object, from the given string.""" + try: + dom = self.fromstring(html_string) + if dom is None: + dom = self._build_empty_dom() + self._logger.error('%s: using a fake empty DOM', self._cname) + return dom + except Exception, e: + self._logger.error('%s: caught exception parsing DOM', + self._cname, exc_info=True) + return self._build_empty_dom() + + def xpath(self, element, path): + """Return elements matching the given XPath.""" + try: + xpath_result = element.xpath(path) + if self._is_xml_unicode: + return xpath_result + result = [] + for item in xpath_result: + if isinstance(item, str): + item = unicode(item) + result.append(item) + return result + except Exception, e: + self._logger.error('%s: caught exception extracting XPath "%s"', + self._cname, path, exc_info=True) + return [] + + def tostring(self, element): + """Convert the element to a string.""" + if isinstance(element, (unicode, str)): + return unicode(element) + else: + try: + return self._tostring(element, encoding=unicode) + except Exception, e: + self._logger.error('%s: unable to convert to string', + self._cname, exc_info=True) + return u'' + + def clone(self, element): + """Clone an element.""" + return self.fromstring(self.tostring(element)) + + def preprocess_string(self, html_string): + """Here we can modify the text, before it's parsed.""" + if not html_string: + return html_string + # Remove silly  » chars. + html_string = html_string.replace(u' \xbb', u'') + try: + preprocessors = self.preprocessors + except AttributeError: + return html_string + for src, sub in preprocessors: + # re._pattern_type is present only since Python 2.5. + if callable(getattr(src, 'sub', None)): + html_string = src.sub(sub, html_string) + elif isinstance(src, str): + html_string = html_string.replace(src, sub) + elif callable(src): + try: + html_string = src(html_string) + except Exception, e: + _msg = '%s: caught exception preprocessing html' + self._logger.error(_msg, self._cname, exc_info=True) + continue + ##print html_string.encode('utf8') + return html_string + + def gather_refs(self, dom): + """Collect references.""" + grParser = GatherRefs(useModule=self._useModule) + grParser._as = self._as + grParser._modFunct = self._modFunct + refs = grParser.parse_dom(dom) + refs = grParser.postprocess_data(refs) + self._namesRefs = refs['names refs'] + self._titlesRefs = refs['titles refs'] + self._charactersRefs = refs['characters refs'] + + def preprocess_dom(self, dom): + """Last chance to modify the dom, before the rules in self.extractors + are applied by the parse_dom method.""" + return dom + + def parse_dom(self, dom): + """Parse the given dom according to the rules specified + in self.extractors.""" + result = {} + for extractor in self.extractors: + ##print extractor.label + if extractor.group is None: + elements = [(extractor.label, element) + for element in self.xpath(dom, extractor.path)] + else: + groups = self.xpath(dom, extractor.group) + elements = [] + for group in groups: + group_key = self.xpath(group, extractor.group_key) + if not group_key: continue + group_key = group_key[0] + # XXX: always tries the conversion to unicode: + # BeautifulSoup.NavigableString is a subclass + # of unicode, and so it's never converted. + group_key = self.tostring(group_key) + normalizer = extractor.group_key_normalize + if normalizer is not None: + if callable(normalizer): + try: + group_key = normalizer(group_key) + except Exception, e: + _m = '%s: unable to apply group_key normalizer' + self._logger.error(_m, self._cname, + exc_info=True) + group_elements = self.xpath(group, extractor.path) + elements.extend([(group_key, element) + for element in group_elements]) + for group_key, element in elements: + for attr in extractor.attrs: + if isinstance(attr.path, dict): + data = {} + for field in attr.path.keys(): + path = attr.path[field] + value = self.xpath(element, path) + if not value: + data[field] = None + else: + # XXX: use u'' , to join? + data[field] = ''.join(value) + else: + data = self.xpath(element, attr.path) + if not data: + data = None + else: + data = attr.joiner.join(data) + if not data: + continue + attr_postprocess = attr.postprocess + if callable(attr_postprocess): + try: + data = attr_postprocess(data) + except Exception, e: + _m = '%s: unable to apply attr postprocess' + self._logger.error(_m, self._cname, exc_info=True) + key = attr.key + if key is None: + key = group_key + elif key.startswith('.'): + # assuming this is an xpath + try: + key = self.xpath(element, key)[0] + except IndexError: + self._logger.error('%s: XPath returned no items', + self._cname, exc_info=True) + elif key.startswith('self.'): + key = getattr(self, key[5:]) + if attr.multi: + if key not in result: + result[key] = [] + result[key].append(data) + else: + if isinstance(data, dict): + result.update(data) + else: + result[key] = data + return result + + def postprocess_data(self, data): + """Here we can modify the data.""" + return data + + def set_objects_params(self, data): + """Set parameters of Movie/Person/... instances, since they are + not always set in the parser's code.""" + for obj in flatten(data, yieldDictKeys=True, scalar=_Container): + obj.accessSystem = self._as + obj.modFunct = self._modFunct + + def add_refs(self, data): + """Modify data according to the expected output.""" + if self.getRefs: + titl_re = ur'(%s)' % '|'.join([re.escape(x) for x + in self._titlesRefs.keys()]) + if titl_re != ur'()': re_titles = re.compile(titl_re, re.U) + else: re_titles = None + nam_re = ur'(%s)' % '|'.join([re.escape(x) for x + in self._namesRefs.keys()]) + if nam_re != ur'()': re_names = re.compile(nam_re, re.U) + else: re_names = None + chr_re = ur'(%s)' % '|'.join([re.escape(x) for x + in self._charactersRefs.keys()]) + if chr_re != ur'()': re_characters = re.compile(chr_re, re.U) + else: re_characters = None + _putRefs(data, re_titles, re_names, re_characters) + return {'data': data, 'titlesRefs': self._titlesRefs, + 'namesRefs': self._namesRefs, + 'charactersRefs': self._charactersRefs} + + +class Extractor(object): + """Instruct the DOM parser about how to parse a document.""" + def __init__(self, label, path, attrs, group=None, group_key=None, + group_key_normalize=None): + """Initialize an Extractor object, used to instruct the DOM parser + about how to parse a document.""" + # rarely (never?) used, mostly for debugging purposes. + self.label = label + self.group = group + if group_key is None: + self.group_key = ".//text()" + else: + self.group_key = group_key + self.group_key_normalize = group_key_normalize + self.path = path + # A list of attributes to fetch. + if isinstance(attrs, Attribute): + attrs = [attrs] + self.attrs = attrs + + def __repr__(self): + """String representation of an Extractor object.""" + r = '<Extractor id:%s (label=%s, path=%s, attrs=%s, group=%s, ' \ + 'group_key=%s group_key_normalize=%s)>' % (id(self), + self.label, self.path, repr(self.attrs), self.group, + self.group_key, self.group_key_normalize) + return r + + +class Attribute(object): + """The attribute to consider, for a given node.""" + def __init__(self, key, multi=False, path=None, joiner=None, + postprocess=None): + """Initialize an Attribute object, used to specify the + attribute to consider, for a given node.""" + # The key under which information will be saved; can be a string or an + # XPath. If None, the label of the containing extractor will be used. + self.key = key + self.multi = multi + self.path = path + if joiner is None: + joiner = '' + self.joiner = joiner + # Post-process this set of information. + self.postprocess = postprocess + + def __repr__(self): + """String representation of an Attribute object.""" + r = '<Attribute id:%s (key=%s, multi=%s, path=%s, joiner=%s, ' \ + 'postprocess=%s)>' % (id(self), self.key, + self.multi, repr(self.path), + self.joiner, repr(self.postprocess)) + return r + + +def _parse_ref(text, link, info): + """Manage links to references.""" + if link.find('/title/tt') != -1: + yearK = re_yearKind_index.match(info) + if yearK and yearK.start() == 0: + text += ' %s' % info[:yearK.end()] + return (text.replace('\n', ' '), link) + + +class GatherRefs(DOMParserBase): + """Parser used to gather references to movies, persons and characters.""" + _attrs = [Attribute(key=None, multi=True, + path={ + 'text': './text()', + 'link': './@href', + 'info': './following::text()[1]' + }, + postprocess=lambda x: _parse_ref(x.get('text'), x.get('link'), + (x.get('info') or u'').strip()))] + extractors = [ + Extractor(label='names refs', + path="//a[starts-with(@href, '/name/nm')][string-length(@href)=16]", + attrs=_attrs), + + Extractor(label='titles refs', + path="//a[starts-with(@href, '/title/tt')]" \ + "[string-length(@href)=17]", + attrs=_attrs), + + Extractor(label='characters refs', + path="//a[starts-with(@href, '/character/ch')]" \ + "[string-length(@href)=21]", + attrs=_attrs), + ] + + def postprocess_data(self, data): + result = {} + for item in ('names refs', 'titles refs', 'characters refs'): + result[item] = {} + for k, v in data.get(item, []): + if not v.endswith('/'): continue + imdbID = analyze_imdbid(v) + if item == 'names refs': + obj = Person(personID=imdbID, name=k, + accessSystem=self._as, modFunct=self._modFunct) + elif item == 'titles refs': + obj = Movie(movieID=imdbID, title=k, + accessSystem=self._as, modFunct=self._modFunct) + else: + obj = Character(characterID=imdbID, name=k, + accessSystem=self._as, modFunct=self._modFunct) + # XXX: companies aren't handled: are they ever found in text, + # as links to their page? + result[item][k] = obj + return result + + def add_refs(self, data): + return data + + diff --git a/libs/imdb/parser/mobile/__init__.py b/libs/imdb/parser/mobile/__init__.py new file mode 100644 index 0000000..e486aa2 --- /dev/null +++ b/libs/imdb/parser/mobile/__init__.py @@ -0,0 +1,811 @@ +""" +parser.mobile package (imdb package). + +This package provides the IMDbMobileAccessSystem class used to access +IMDb's data for mobile systems. +the imdb.IMDb function will return an instance of this class when +called with the 'accessSystem' argument set to "mobile". + +Copyright 2005-2010 Davide Alberani <da@erlug.linux.it> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import re +import logging +from urllib import unquote + +from imdb import imdbURL_movie_main, imdbURL_person_main, imdbURL_character_main +from imdb.Movie import Movie +from imdb.utils import analyze_title, analyze_name, canonicalName, \ + date_and_notes +from imdb._exceptions import IMDbDataAccessError +from imdb.parser.http import IMDbHTTPAccessSystem +from imdb.parser.http.utils import subXMLRefs, subSGMLRefs, build_person, \ + build_movie, re_spaces + +# XXX NOTE: the first version of this module was heavily based on +# regular expressions. This new version replace regexps with +# find() strings' method calls; despite being less flexible, it +# seems to be at least as fast and, hopefully, much more +# lightweight. Yes: the regexp-based version was too heavyweight +# for systems with very limited CPU power and memory footprint. +re_spacessub = re_spaces.sub +# Strip html. +re_unhtml = re.compile(r'<.+?>') +re_unhtmlsub = re_unhtml.sub +# imdb person or movie ids. +re_imdbID = re.compile(r'(?<=nm|tt|ch)([0-9]{7})\b') + +# movie AKAs. +re_makas = re.compile('(<p class="find-aka">.*?</p>)') + + +def _unHtml(s): + """Return a string without tags and no multiple spaces.""" + return subSGMLRefs(re_spacessub(' ', re_unhtmlsub('', s)).strip()) + + +_inttype = type(0) + +def _getTagsWith(s, cont, toClosure=False, maxRes=None): + """Return the html tags in the 's' string containing the 'cont' + string; if toClosure is True, everything between the opening + tag and the closing tag is returned.""" + lres = [] + bi = s.find(cont) + if bi != -1: + btag = s[:bi].rfind('<') + if btag != -1: + if not toClosure: + etag = s[bi+1:].find('>') + if etag != -1: + endidx = bi+2+etag + lres.append(s[btag:endidx]) + if maxRes is not None and len(lres) >= maxRes: return lres + lres += _getTagsWith(s[endidx:], cont, + toClosure=toClosure) + else: + spaceidx = s[btag:].find(' ') + if spaceidx != -1: + ctag = '</%s>' % s[btag+1:btag+spaceidx] + closeidx = s[bi:].find(ctag) + if closeidx != -1: + endidx = bi+closeidx+len(ctag) + lres.append(s[btag:endidx]) + if maxRes is not None and len(lres) >= maxRes: + return lres + lres += _getTagsWith(s[endidx:], cont, + toClosure=toClosure) + return lres + + +def _findBetween(s, begins, ends, beginindx=0, maxRes=None, lres=None): + """Return the list of strings from the 's' string which are included + between the 'begins' and 'ends' strings.""" + if lres is None: + lres = [] + bi = s.find(begins, beginindx) + if bi != -1: + lbegins = len(begins) + if isinstance(ends, (list, tuple)): + eset = [s.find(end, bi+lbegins) for end in ends] + eset[:] = [x for x in eset if x != -1] + if not eset: ei = -1 + else: ei = min(eset) + else: + ei = s.find(ends, bi+lbegins) + if ei != -1: + match = s[bi+lbegins:ei] + lres.append(match) + if maxRes is not None and len(lres) >= maxRes: return lres + _findBetween(s, begins, ends, beginindx=ei, maxRes=maxRes, + lres=lres) + return lres + + +class IMDbMobileAccessSystem(IMDbHTTPAccessSystem): + """The class used to access IMDb's data through the web for + mobile terminals.""" + + accessSystem = 'mobile' + _mobile_logger = logging.getLogger('imdbpy.parser.mobile') + + def __init__(self, isThin=1, *arguments, **keywords): + self.accessSystem = 'mobile' + IMDbHTTPAccessSystem.__init__(self, isThin, *arguments, **keywords) + + def _clean_html(self, html): + """Normalize the retrieve html.""" + html = re_spaces.sub(' ', html) + # Remove silly  » chars. + html = html.replace(' »', '') + return subXMLRefs(html) + + def _mretrieve(self, url, size=-1): + """Retrieve an html page and normalize it.""" + cont = self._retrieve(url, size=size) + return self._clean_html(cont) + + def _getPersons(self, s, sep='<br/>'): + """Return a list of Person objects, from the string s; items + are assumed to be separated by the sep string.""" + names = s.split(sep) + pl = [] + plappend = pl.append + counter = 1 + for name in names: + pid = re_imdbID.findall(name) + if not pid: continue + characters = _getTagsWith(name, 'class="char"', + toClosure=True, maxRes=1) + chpids = [] + if characters: + for ch in characters[0].split(' / '): + chid = re_imdbID.findall(ch) + if not chid: + chpids.append(None) + else: + chpids.append(chid[-1]) + if not chpids: + chpids = None + elif len(chpids) == 1: + chpids = chpids[0] + name = _unHtml(name) + # Catch unclosed tags. + gt_indx = name.find('>') + if gt_indx != -1: + name = name[gt_indx+1:].lstrip() + if not name: continue + if name.endswith('...'): + name = name[:-3] + p = build_person(name, personID=str(pid[0]), billingPos=counter, + modFunct=self._defModFunct, roleID=chpids, + accessSystem=self.accessSystem) + plappend(p) + counter += 1 + return pl + + def _search_movie(self, title, results): + ##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title}) + ##params = 'q=%s&tt=on&mx=%s' % (urllib.quote_plus(title), str(results)) + ##cont = self._mretrieve(imdbURL_search % params) + cont = subXMLRefs(self._get_search_content('tt', title, results)) + title = _findBetween(cont, '<title>', '', maxRes=1) + res = [] + if not title: + self._mobile_logger.error('no title tag searching for movie %s', + title) + return res + tl = title[0].lower() + if not tl.startswith('imdb title'): + # a direct hit! + title = _unHtml(title[0]) + mid = None + midtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1) + if midtag: + mid = _findBetween(midtag[0], '/title/tt', '/', maxRes=1) + if not (mid and title): + self._mobile_logger.error('no direct hit title/movieID for' \ + ' title %s', title) + return res + if cont.find('TV mini-series') != -1: + title += ' (mini)' + res[:] = [(str(mid[0]), analyze_title(title))] + else: + # XXX: this results*3 prevents some recursion errors, but... + # it's not exactly understandable (i.e.: why 'results' is + # not enough to get all the results?) + lis = _findBetween(cont, 'td valign="top">', '', + maxRes=results*3) + for li in lis: + akas = re_makas.findall(li) + for idx, aka in enumerate(akas): + aka = aka.replace('" - ', '::', 1) + aka = _unHtml(aka) + if aka.startswith('aka "'): + aka = aka[5:].strip() + if aka[-1] == '"': + aka = aka[:-1] + akas[idx] = aka + imdbid = re_imdbID.findall(li) + li = re_makas.sub('', li) + mtitle = _unHtml(li) + if not (imdbid and mtitle): + self._mobile_logger.debug('no title/movieID parsing' \ + ' %s searching for title %s', li, + title) + continue + mtitle = mtitle.replace('(TV mini-series)', '(mini)') + resd = analyze_title(mtitle) + if akas: + resd['akas'] = akas + res.append((str(imdbid[0]), resd)) + return res + + def get_movie_main(self, movieID): + cont = self._mretrieve(imdbURL_movie_main % movieID + 'maindetails') + title = _findBetween(cont, '', '', maxRes=1) + if not title: + raise IMDbDataAccessError, 'unable to get movieID "%s"' % movieID + title = _unHtml(title[0]) + if cont.find('TV mini-series') != -1: + title += ' (mini)' + d = analyze_title(title) + kind = d.get('kind') + tv_series = _findBetween(cont, 'TV Series:', '', maxRes=1) + if tv_series: mid = re_imdbID.findall(tv_series[0]) + else: mid = None + if tv_series and mid: + s_title = _unHtml(tv_series[0]) + s_data = analyze_title(s_title) + m = Movie(movieID=str(mid[0]), data=s_data, + accessSystem=self.accessSystem, + modFunct=self._defModFunct) + d['kind'] = kind = u'episode' + d['episode of'] = m + if kind in ('tv series', 'tv mini series'): + years = _findBetween(cont, '

    ', '

    ', maxRes=1) + if years: + years[:] = _findBetween(years[0], 'TV series', '', + maxRes=1) + if years: + d['series years'] = years[0].strip() + air_date = _findBetween(cont, 'Original Air Date:', '', + maxRes=1) + if air_date: + air_date = air_date[0] + vi = air_date.find('(') + if vi != -1: + date = _unHtml(air_date[:vi]).strip() + if date != '????': + d['original air date'] = date + air_date = air_date[vi:] + season = _findBetween(air_date, 'Season', ',', maxRes=1) + if season: + season = season[0].strip() + try: season = int(season) + except: pass + if season or type(season) is _inttype: + d['season'] = season + episode = _findBetween(air_date, 'Episode', ')', maxRes=1) + if episode: + episode = episode[0].strip() + try: episode = int(episode) + except: pass + if episode or type(season) is _inttype: + d['episode'] = episode + direct = _findBetween(cont, '
    Director', ('', '

    '), + maxRes=1) + if direct: + direct = direct[0] + h5idx = direct.find('/h5>') + if h5idx != -1: + direct = direct[h5idx+4:] + direct = self._getPersons(direct) + if direct: d['director'] = direct + if kind in ('tv series', 'tv mini series', 'episode'): + if kind != 'episode': + seasons = _findBetween(cont, 'Seasons:
    ', '', + maxRes=1) + if seasons: + d['number of seasons'] = seasons[0].count('|') + 1 + creator = _findBetween(cont, 'Created by', ('class="tn15more"', + '', + '

    '), + maxRes=1) + if not creator: + # They change 'Created by' to 'Creator' and viceversa + # from time to time... + # XXX: is 'Creators' also used? + creator = _findBetween(cont, 'Creator:', + ('class="tn15more"', '', + '

    '), maxRes=1) + if creator: + creator = creator[0] + if creator.find('tn15more'): creator = '%s>' % creator + creator = self._getPersons(creator) + if creator: d['creator'] = creator + writers = _findBetween(cont, '
    Writer', ('', '

    '), + maxRes=1) + if writers: + writers = writers[0] + h5idx = writers.find('/h5>') + if h5idx != -1: + writers = writers[h5idx+4:] + writers = self._getPersons(writers) + if writers: d['writer'] = writers + cvurl = _getTagsWith(cont, 'name="poster"', toClosure=True, maxRes=1) + if cvurl: + cvurl = _findBetween(cvurl[0], 'src="', '"', maxRes=1) + if cvurl: d['cover url'] = cvurl[0] + genres = _findBetween(cont, 'href="/Sections/Genres/', '/') + if genres: + d['genres'] = list(set(genres)) + ur = _findBetween(cont, '
    ', '
    ', + maxRes=1) + if ur: + rat = _findBetween(ur[0], '', '', maxRes=1) + if rat: + teni = rat[0].find('/10') + if teni != -1: + rat = rat[0][:teni] + try: + rat = float(rat.strip()) + d['rating'] = rat + except ValueError: + self._mobile_logger.warn('wrong rating: %s', rat) + vi = ur[0].rfind('tn15more">') + if vi != -1 and ur[0][vi+10:].find('await') == -1: + try: + votes = _unHtml(ur[0][vi+10:]).replace('votes', '').strip() + votes = int(votes.replace(',', '')) + d['votes'] = votes + except ValueError: + self._mobile_logger.warn('wrong votes: %s', ur) + top250 = _findBetween(cont, 'href="/chart/top?', '', maxRes=1) + if top250: + fn = top250[0].rfind('#') + if fn != -1: + try: + td = int(top250[0][fn+1:]) + d['top 250 rank'] = td + except ValueError: + self._mobile_logger.warn('wrong top250: %s', top250) + castdata = _findBetween(cont, 'Cast overview', '', maxRes=1) + if not castdata: + castdata = _findBetween(cont, 'Credited cast', '', maxRes=1) + if not castdata: + castdata = _findBetween(cont, 'Complete credited cast', '', + maxRes=1) + if not castdata: + castdata = _findBetween(cont, 'Series Cast Summary', '', + maxRes=1) + if not castdata: + castdata = _findBetween(cont, 'Episode Credited cast', '', + maxRes=1) + if castdata: + castdata = castdata[0] + # Reintegrate the fist tag. + fl = castdata.find('href=') + if fl != -1: castdata = '') + if smib != -1: + smie = castdata.rfind('') + if smie != -1: + castdata = castdata[:smib].strip() + \ + castdata[smie+18:].strip() + castdata = castdata.replace('/tr> ', '', maxRes=1) + if akas: + # For some reason, here
    is still used in place of
    . + akas[:] = [x for x in akas[0].split('
    ') if x.strip()] + akas = [_unHtml(x).replace('" - ','::', 1).lstrip('"').strip() + for x in akas] + if 'See more' in akas: akas.remove('See more') + akas[:] = [x for x in akas if x] + if akas: + d['akas'] = akas + mpaa = _findBetween(cont, 'MPAA
    :', '', maxRes=1) + if mpaa: d['mpaa'] = _unHtml(mpaa[0]) + runtimes = _findBetween(cont, 'Runtime:
    ', '', maxRes=1) + if runtimes: + runtimes = runtimes[0] + runtimes = [x.strip().replace(' min', '').replace(' (', '::(', 1) + for x in runtimes.split('|')] + d['runtimes'] = [_unHtml(x).strip() for x in runtimes] + if kind == 'episode': + # number of episodes. + epsn = _findBetween(cont, 'title="Full Episode List">', '', + maxRes=1) + if epsn: + epsn = epsn[0].replace(' Episodes', '').strip() + if epsn: + try: + epsn = int(epsn) + except: + self._mobile_logger.warn('wrong episodes #: %s', epsn) + d['number of episodes'] = epsn + country = _findBetween(cont, 'Country:', '', maxRes=1) + if country: + country[:] = country[0].split(' | ') + country[:] = ['', '::')) for x in country] + if country: d['countries'] = country + lang = _findBetween(cont, 'Language:', '', maxRes=1) + if lang: + lang[:] = lang[0].split(' | ') + lang[:] = ['', '::')) for x in lang] + if lang: d['languages'] = lang + col = _findBetween(cont, '"/search/title?colors=', '') + if col: + col[:] = col[0].split(' | ') + col[:] = ['', '::')) for x in col] + if col: d['color info'] = col + sm = _findBetween(cont, '/search/title?sound_mixes=', '', + maxRes=1) + if sm: + sm[:] = sm[0].split(' | ') + sm[:] = ['', '::')) for x in sm] + if sm: d['sound mix'] = sm + cert = _findBetween(cont, 'Certification:', '', maxRes=1) + if cert: + cert[:] = cert[0].split(' | ') + cert[:] = [_unHtml(x.replace(' ', '::')) for x in cert] + if cert: d['certificates'] = cert + plotoutline = _findBetween(cont, 'Plot:', [''], + maxRes=1) + if plotoutline: + plotoutline = plotoutline[0].strip() + plotoutline = plotoutline.rstrip('|').rstrip() + if plotoutline: d['plot outline'] = _unHtml(plotoutline) + aratio = _findBetween(cont, 'Aspect Ratio:', [''], + maxRes=1) + if aratio: + aratio = aratio[0].strip().replace(' (', '::(', 1) + if aratio: + d['aspect ratio'] = _unHtml(aratio) + return {'data': d} + + def get_movie_plot(self, movieID): + cont = self._mretrieve(imdbURL_movie_main % movieID + 'plotsummary') + plot = _findBetween(cont, '

    ', '

    ') + plot[:] = [_unHtml(x) for x in plot] + for i in xrange(len(plot)): + p = plot[i] + wbyidx = p.rfind(' Written by ') + if wbyidx != -1: + plot[i] = '%s::%s' % \ + (p[:wbyidx].rstrip(), + p[wbyidx+12:].rstrip().replace('{','<').replace('}','>')) + if plot: return {'data': {'plot': plot}} + return {'data': {}} + + def _search_person(self, name, results): + ##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name}) + ##params = 'q=%s&nm=on&mx=%s' % (urllib.quote_plus(name), str(results)) + ##cont = self._mretrieve(imdbURL_search % params) + cont = subXMLRefs(self._get_search_content('nm', name, results)) + name = _findBetween(cont, '', '', maxRes=1) + res = [] + if not name: + self._mobile_logger.warn('no title tag searching for name %s', name) + return res + nl = name[0].lower() + if not nl.startswith('imdb name'): + # a direct hit! + name = _unHtml(name[0]) + name = name.replace('- Filmography by type' , '').strip() + pid = None + pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1) + if pidtag: + pid = _findBetween(pidtag[0], '/name/nm', '/', maxRes=1) + if not (pid and name): + self._mobile_logger.error('no direct hit name/personID for' \ + ' name %s', name) + return res + res[:] = [(str(pid[0]), analyze_name(name, canonical=1))] + else: + lis = _findBetween(cont, 'td valign="top">', '', + maxRes=results*3) + for li in lis: + akas = _findBetween(li, '"', '"') + for sep in [' aka', '
    birth name']: + sepIdx = li.find(sep) + if sepIdx != -1: + li = li[:sepIdx] + pid = re_imdbID.findall(li) + pname = _unHtml(li) + if not (pid and pname): + self._mobile_logger.debug('no name/personID parsing' \ + ' %s searching for name %s', li, + name) + continue + resd = analyze_name(pname, canonical=1) + if akas: + resd['akas'] = akas + res.append((str(pid[0]), resd)) + return res + + def get_person_main(self, personID, _parseChr=False): + if not _parseChr: + url = imdbURL_person_main % personID + 'maindetails' + else: + url = imdbURL_character_main % personID + s = self._mretrieve(url) + r = {} + name = _findBetween(s, '', '', maxRes=1) + if not name: + if _parseChr: w = 'characterID' + else: w = 'personID' + raise IMDbDataAccessError, 'unable to get %s "%s"' % (w, personID) + name = _unHtml(name[0]) + if _parseChr: + name = name.replace('(Character)', '').strip() + name = name.replace('- Filmography by type', '').strip() + else: + name = name.replace('- Filmography by', '').strip() + r = analyze_name(name, canonical=not _parseChr) + for dKind in ('birth', 'death'): + date = _findBetween(s, '
    Date of %s:
    ' % dKind.capitalize(), + ('
    ', '

    '), maxRes=1) + if date: + date = _unHtml(date[0]) + if date: + date, notes = date_and_notes(date) + if date: + r['%s date' % dKind] = date + if notes: + r['%s notes' % dKind] = notes + akas = _findBetween(s, 'Alternate Names:', ('', + '

    '), maxRes=1) + if akas: + akas = akas[0] + if akas.find(' | ') != -1: + akas = _unHtml(akas).split(' | ') + else: + akas = _unHtml(akas).split(' / ') + if akas: r['akas'] = akas + hs = _findBetween(s, 'name="headshot"', '
    ', maxRes=1) + if hs: + hs[:] = _findBetween(hs[0], 'src="', '"', maxRes=1) + if hs: r['headshot'] = hs[0] + # Build a list of tuples such [('hrefLink', 'section name')] + workkind = _findBetween(s, '
    ', '
    ', + maxRes=1) + if workkind: + workkind[:] = _findBetween(workkind[0], 'href="#', '') + else: + # Assume there's only one section and/or there are no + # section links, for some reason. + workkind[:] = _findBetween(s, '
    ') + workkind[:] = [x.lstrip('"').rstrip(':').lower() for x in workkind] + ws = [] + for work in workkind: + wsplit = work.split('">', 1) + if len(wsplit) == 2: + sect = wsplit[0] + if '"' in sect: + sect = sect[:sect.find('"')] + ws.append((sect, wsplit[1].lower())) + # XXX: I think "guest appearances" are gone. + if s.find(' tag. + if _parseChr and sect == 'filmography': + inisect = s.find('
    ') + else: + inisect = s.find(''), maxRes=1) + if not vtag: + vtag = _findBetween(itag[0], 'value="', ('"', '>'), maxRes=1) + if vtag: + try: + vtag = unquote(str(vtag[0])) + vtag = unicode(vtag, 'latin_1') + r.update(analyze_name(vtag)) + except UnicodeEncodeError: + pass + return {'data': r, 'info sets': ('main', 'filmography')} + + def get_person_biography(self, personID): + cont = self._mretrieve(imdbURL_person_main % personID + 'bio') + d = {} + spouses = _findBetween(cont, 'Spouse', ('', ''), + maxRes=1) + if spouses: + sl = [] + for spouse in spouses[0].split(''): + if spouse.count('') > 1: + spouse = spouse.replace('', '::', 1) + spouse = _unHtml(spouse) + spouse = spouse.replace(':: ', '::').strip() + if spouse: sl.append(spouse) + if sl: d['spouse'] = sl + nnames = _findBetween(cont, '
    Nickname
    ', ('

    ','
    '), + maxRes=1) + if nnames: + nnames = nnames[0] + if nnames: + nnames = [x.strip().replace(' (', '::(', 1) + for x in nnames.split('
    ')] + if nnames: + d['nick names'] = nnames + misc_sects = _findBetween(cont, '
    ', '
    ') + misc_sects[:] = [x.split('
    ') for x in misc_sects] + misc_sects[:] = [x for x in misc_sects if len(x) == 2] + for sect, data in misc_sects: + sect = sect.lower().replace(':', '').strip() + if d.has_key(sect) and sect != 'mini biography': continue + elif sect in ('spouse', 'nickname'): continue + if sect == 'salary': sect = 'salary history' + elif sect == 'where are they now': sect = 'where now' + elif sect == 'personal quotes': sect = 'quotes' + data = data.replace('

    ', '::') + data = data.replace('

    ', ' ') # for multi-paragraphs 'bio' + data = data.replace(' ', '@@@@') + data = data.replace(' ', '::') + data = _unHtml(data) + data = [x.strip() for x in data.split('::')] + data[:] = [x.replace('@@@@', '::') for x in data if x] + if sect == 'height' and data: data = data[0] + elif sect == 'birth name': data = canonicalName(data[0]) + elif sect == 'date of birth': + date, notes = date_and_notes(data[0]) + if date: + d['birth date'] = date + if notes: + d['birth notes'] = notes + continue + elif sect == 'date of death': + date, notes = date_and_notes(data[0]) + if date: + d['death date'] = date + if notes: + d['death notes'] = notes + continue + elif sect == 'mini biography': + ndata = [] + for bio in data: + byidx = bio.rfind('IMDb Mini Biography By') + if byidx != -1: + bioAuth = bio[:byidx].rstrip() + else: + bioAuth = 'Anonymous' + bio = u'%s::%s' % (bioAuth, bio[byidx+23:].lstrip()) + ndata.append(bio) + data[:] = ndata + if 'mini biography' in d: + d['mini biography'].append(ndata[0]) + continue + d[sect] = data + return {'data': d} + + def _search_character(self, name, results): + cont = subXMLRefs(self._get_search_content('char', name, results)) + name = _findBetween(cont, '', '', maxRes=1) + res = [] + if not name: + self._mobile_logger.error('no title tag searching character %s', + name) + return res + nl = name[0].lower() + if not (nl.startswith('imdb search') or nl.startswith('imdb search') \ + or nl.startswith('imdb character')): + # a direct hit! + name = _unHtml(name[0]).replace('(Character)', '').strip() + pid = None + pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1) + if pidtag: + pid = _findBetween(pidtag[0], '/character/ch', '/', maxRes=1) + if not (pid and name): + self._mobile_logger.error('no direct hit name/characterID for' \ + ' character %s', name) + return res + res[:] = [(str(pid[0]), analyze_name(name))] + else: + sects = _findBetween(cont, 'Popular Characters', '', + maxRes=results*3) + sects += _findBetween(cont, 'Characters', '', + maxRes=results*3) + for sect in sects: + lis = _findBetween(sect, '
    ', + ('', '

    '), maxRes=1) + if intro: + intro = _unHtml(intro[0]).strip() + if intro: + d['introduction'] = intro + bios = _findBetween(cont, '
    ', + '
    ') + if bios: + bios = _findBetween(bios[0], '

    ', ('

    ', '

    ')) + if bios: + for bio in bios: + bio = bio.replace('

    ', '::') + bio = bio.replace('\n', ' ') + bio = bio.replace('
    ', '\n') + bio = bio.replace('
    ', '\n') + bio = subSGMLRefs(re_unhtmlsub('', bio).strip()) + bio = bio.replace(' ::', '::').replace(':: ', '::') + bio = bio.replace('::', ': ', 1) + if bio: + d.setdefault('biography', []).append(bio) + return {'data': d} + + diff --git a/libs/imdb/utils.py b/libs/imdb/utils.py new file mode 100644 index 0000000..7d72697 --- /dev/null +++ b/libs/imdb/utils.py @@ -0,0 +1,1536 @@ +""" +utils module (imdb package). + +This module provides basic utilities for the imdb package. + +Copyright 2004-2010 Davide Alberani + 2009 H. Turgut Uyar + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +from __future__ import generators +import re +import string +import logging +from copy import copy, deepcopy +from time import strptime, strftime + +from imdb import VERSION +from imdb import articles +from imdb._exceptions import IMDbParserError + + +# Logger for imdb.utils module. +_utils_logger = logging.getLogger('imdbpy.utils') + +# The regular expression for the "long" year format of IMDb, like +# "(1998)" and "(1986/II)", where the optional roman number (that I call +# "imdbIndex" after the slash is used for movies with the same title +# and year of release. +# XXX: probably L, C, D and M are far too much! ;-) +re_year_index = re.compile(r'\(([0-9\?]{4}(/[IVXLCDM]+)?)\)') + +# Match only the imdbIndex (for name strings). +re_index = re.compile(r'^\(([IVXLCDM]+)\)$') + +# Match the number of episodes. +re_episodes = re.compile('\s?\((\d+) episodes\)', re.I) +re_episode_info = re.compile(r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}') + +# Common suffixes in surnames. +_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van', + 'e', 'von', 'the', 'di', 'du', 'el', 'al') + +def canonicalName(name): + """Return the given name in canonical "Surname, Name" format. + It assumes that name is in the 'Name Surname' format.""" + # XXX: some statistics (as of 17 Apr 2008, over 2288622 names): + # - just a surname: 69476 + # - single surname, single name: 2209656 + # - composed surname, composed name: 9490 + # - composed surname, single name: 67606 + # (2: 59764, 3: 6862, 4: 728) + # - single surname, composed name: 242310 + # (2: 229467, 3: 9901, 4: 2041, 5: 630) + # - Jr.: 8025 + # Don't convert names already in the canonical format. + if name.find(', ') != -1: return name + if isinstance(name, unicode): + joiner = u'%s, %s' + sur_joiner = u'%s %s' + sur_space = u' %s' + space = u' ' + else: + joiner = '%s, %s' + sur_joiner = '%s %s' + sur_space = ' %s' + space = ' ' + sname = name.split(' ') + snl = len(sname) + if snl == 2: + # Just a name and a surname: how boring... + name = joiner % (sname[1], sname[0]) + elif snl > 2: + lsname = [x.lower() for x in sname] + if snl == 3: _indexes = (0, snl-2) + else: _indexes = (0, snl-2, snl-3) + # Check for common surname prefixes at the beginning and near the end. + for index in _indexes: + if lsname[index] not in _sname_suffixes: continue + try: + # Build the surname. + surn = sur_joiner % (sname[index], sname[index+1]) + del sname[index] + del sname[index] + try: + # Handle the "Jr." after the name. + if lsname[index+2].startswith('jr'): + surn += sur_space % sname[index] + del sname[index] + except (IndexError, ValueError): + pass + name = joiner % (surn, space.join(sname)) + break + except ValueError: + continue + else: + name = joiner % (sname[-1], space.join(sname[:-1])) + return name + +def normalizeName(name): + """Return a name in the normal "Name Surname" format.""" + if isinstance(name, unicode): + joiner = u'%s %s' + else: + joiner = '%s %s' + sname = name.split(', ') + if len(sname) == 2: + name = joiner % (sname[1], sname[0]) + return name + +def analyze_name(name, canonical=None): + """Return a dictionary with the name and the optional imdbIndex + keys, from the given string. + + If canonical is None (default), the name is stored in its own style. + If canonical is True, the name is converted to canonical style. + If canonical is False, the name is converted to normal format. + + raise an IMDbParserError exception if the name is not valid. + """ + original_n = name + name = name.strip() + res = {} + imdbIndex = '' + opi = name.rfind('(') + if opi != -1: + cpi = name.rfind(')') + if cpi > opi and re_index.match(name[opi:cpi+1]): + imdbIndex = name[opi+1:cpi] + name = name[:opi].rstrip() + if not name: + raise IMDbParserError, 'invalid name: "%s"' % original_n + if canonical is not None: + if canonical: + name = canonicalName(name) + else: + name = normalizeName(name) + res['name'] = name + if imdbIndex: + res['imdbIndex'] = imdbIndex + return res + + +def build_name(name_dict, canonical=None): + """Given a dictionary that represents a "long" IMDb name, + return a string. + If canonical is None (default), the name is returned in the stored style. + If canonical is True, the name is converted to canonical style. + If canonical is False, the name is converted to normal format. + """ + name = name_dict.get('canonical name') or name_dict.get('name', '') + if not name: return '' + if canonical is not None: + if canonical: + name = canonicalName(name) + else: + name = normalizeName(name) + imdbIndex = name_dict.get('imdbIndex') + if imdbIndex: + name += ' (%s)' % imdbIndex + return name + + +# XXX: here only for backward compatibility. Find and remove any dependency. +_articles = articles.GENERIC_ARTICLES +_unicodeArticles = articles.toUnicode(_articles) +articlesDicts = articles.articlesDictsForLang(None) +spArticles = articles.spArticlesForLang(None) + +def canonicalTitle(title, lang=None): + """Return the title in the canonic format 'Movie Title, The'; + beware that it doesn't handle long imdb titles, but only the + title portion, without year[/imdbIndex] or special markup. + The 'lang' argument can be used to specify the language of the title. + """ + isUnicode = isinstance(title, unicode) + articlesDicts = articles.articlesDictsForLang(lang) + try: + if title.split(', ')[-1].lower() in articlesDicts[isUnicode]: + return title + except IndexError: + pass + if isUnicode: + _format = u'%s, %s' + else: + _format = '%s, %s' + ltitle = title.lower() + spArticles = articles.spArticlesForLang(lang) + for article in spArticles[isUnicode]: + if ltitle.startswith(article): + lart = len(article) + title = _format % (title[lart:], title[:lart]) + if article[-1] == ' ': + title = title[:-1] + break + ## XXX: an attempt using a dictionary lookup. + ##for artSeparator in (' ', "'", '-'): + ## article = _articlesDict.get(ltitle.split(artSeparator)[0]) + ## if article is not None: + ## lart = len(article) + ## # check titles like "una", "I'm Mad" and "L'abbacchio". + ## if title[lart:] == '' or (artSeparator != ' ' and + ## title[lart:][1] != artSeparator): continue + ## title = '%s, %s' % (title[lart:], title[:lart]) + ## if artSeparator == ' ': title = title[1:] + ## break + return title + +def normalizeTitle(title, lang=None): + """Return the title in the normal "The Title" format; + beware that it doesn't handle long imdb titles, but only the + title portion, without year[/imdbIndex] or special markup. + The 'lang' argument can be used to specify the language of the title. + """ + isUnicode = isinstance(title, unicode) + stitle = title.split(', ') + articlesDicts = articles.articlesDictsForLang(lang) + if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]: + sep = ' ' + if stitle[-1][-1] in ("'", '-'): + sep = '' + if isUnicode: + _format = u'%s%s%s' + _joiner = u', ' + else: + _format = '%s%s%s' + _joiner = ', ' + title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1])) + return title + + +def _split_series_episode(title): + """Return the series and the episode titles; if this is not a + series' episode, the returned series title is empty. + This function recognize two different styles: + "The Series" An Episode (2005) + "The Series" (2004) {An Episode (2005) (#season.episode)}""" + series_title = '' + episode_or_year = '' + if title[-1:] == '}': + # Title of the episode, as in the plain text data files. + begin_eps = title.rfind('{') + if begin_eps == -1: return '', '' + series_title = title[:begin_eps].rstrip() + # episode_or_year is returned with the {...} + episode_or_year = title[begin_eps:].strip() + if episode_or_year[:12] == '{SUSPENDED}}': return '', '' + # XXX: works only with tv series; it's still unclear whether + # IMDb will support episodes for tv mini series and tv movies... + elif title[0:1] == '"': + second_quot = title[1:].find('"') + 2 + if second_quot != 1: # a second " was found. + episode_or_year = title[second_quot:].lstrip() + first_char = episode_or_year[0:1] + if not first_char: return '', '' + if first_char != '(': + # There is not a (year) but the title of the episode; + # that means this is an episode title, as returned by + # the web server. + series_title = title[:second_quot] + ##elif episode_or_year[-1:] == '}': + ## # Title of the episode, as in the plain text data files. + ## begin_eps = episode_or_year.find('{') + ## if begin_eps == -1: return series_title, episode_or_year + ## series_title = title[:second_quot+begin_eps].rstrip() + ## # episode_or_year is returned with the {...} + ## episode_or_year = episode_or_year[begin_eps:] + return series_title, episode_or_year + + +def is_series_episode(title): + """Return True if 'title' is an series episode.""" + title = title.strip() + if _split_series_episode(title)[0]: return 1 + return 0 + + +def analyze_title(title, canonical=None, canonicalSeries=None, + canonicalEpisode=None, _emptyString=u''): + """Analyze the given title and return a dictionary with the + "stripped" title, the kind of the show ("movie", "tv series", etc.), + the year of production and the optional imdbIndex (a roman number + used to distinguish between movies with the same title and year). + + If canonical is None (default), the title is stored in its own style. + If canonical is True, the title is converted to canonical style. + If canonical is False, the title is converted to normal format. + + raise an IMDbParserError exception if the title is not valid. + """ + # XXX: introduce the 'lang' argument? + if canonical is not None: + canonicalSeries = canonicalEpisode = canonical + original_t = title + result = {} + title = title.strip() + year = _emptyString + kind = _emptyString + imdbIndex = _emptyString + series_title, episode_or_year = _split_series_episode(title) + if series_title: + # It's an episode of a series. + series_d = analyze_title(series_title, canonical=canonicalSeries) + oad = sen = ep_year = _emptyString + # Plain text data files format. + if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}': + match = re_episode_info.findall(episode_or_year) + if match: + # Episode title, original air date and #season.episode + episode_or_year, oad, sen = match[0] + episode_or_year = episode_or_year.strip() + if not oad: + # No year, but the title is something like (2005-04-12) + if episode_or_year and episode_or_year[0] == '(' and \ + episode_or_year[-1:] == ')' and \ + episode_or_year[1:2] != '#': + oad = episode_or_year + if oad[1:5] and oad[5:6] == '-': + try: + ep_year = int(oad[1:5]) + except (TypeError, ValueError): + pass + if not oad and not sen and episode_or_year.startswith('(#'): + sen = episode_or_year + elif episode_or_year.startswith('Episode dated'): + oad = episode_or_year[14:] + if oad[-4:].isdigit(): + try: + ep_year = int(oad[-4:]) + except (TypeError, ValueError): + pass + episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode) + episode_d['kind'] = u'episode' + episode_d['episode of'] = series_d + if oad: + episode_d['original air date'] = oad[1:-1] + if ep_year and episode_d.get('year') is None: + episode_d['year'] = ep_year + if sen and sen[2:-1].find('.') != -1: + seas, epn = sen[2:-1].split('.') + if seas: + # Set season and episode. + try: seas = int(seas) + except: pass + try: epn = int(epn) + except: pass + episode_d['season'] = seas + if epn: + episode_d['episode'] = epn + return episode_d + # First of all, search for the kind of show. + # XXX: Number of entries at 17 Apr 2008: + # movie: 379,871 + # episode: 483,832 + # tv movie: 61,119 + # tv series: 44,795 + # video movie: 57,915 + # tv mini series: 5,497 + # video game: 5,490 + # More up-to-date statistics: http://us.imdb.com/database_statistics + if title.endswith('(TV)'): + kind = u'tv movie' + title = title[:-4].rstrip() + elif title.endswith('(V)'): + kind = u'video movie' + title = title[:-3].rstrip() + elif title.endswith('(mini)'): + kind = u'tv mini series' + title = title[:-6].rstrip() + elif title.endswith('(VG)'): + kind = u'video game' + title = title[:-4].rstrip() + # Search for the year and the optional imdbIndex (a roman number). + yi = re_year_index.findall(title) + if yi: + last_yi = yi[-1] + year = last_yi[0] + if last_yi[1]: + imdbIndex = last_yi[1][1:] + year = year[:-len(imdbIndex)-1] + i = title.rfind('(%s)' % last_yi[0]) + if i != -1: + title = title[:i-1].rstrip() + # This is a tv (mini) series: strip the '"' at the begin and at the end. + # XXX: strip('"') is not used for compatibility with Python 2.0. + if title and title[0] == title[-1] == '"': + if not kind: + kind = u'tv series' + title = title[1:-1].strip() + if not title: + raise IMDbParserError, 'invalid title: "%s"' % original_t + if canonical is not None: + if canonical: + title = canonicalTitle(title) + else: + title = normalizeTitle(title) + # 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series', + # 'tv movie', 'video movie', 'video game') + result['title'] = title + result['kind'] = kind or u'movie' + if year and year != '????': + try: + result['year'] = int(year) + except (TypeError, ValueError): + pass + if imdbIndex: + result['imdbIndex'] = imdbIndex + if isinstance(_emptyString, str): + result['kind'] = str(kind or 'movie') + return result + + +_web_format = '%d %B %Y' +_ptdf_format = '(%Y-%m-%d)' +def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''): + """Convert a time expressed in the pain text data files, to + the 'Episode dated ...' format used on the web site; if + fromPTDFtoWEB is false, the inverted conversion is applied.""" + try: + if fromPTDFtoWEB: + from_format = _ptdf_format + to_format = _web_format + else: + from_format = u'Episode dated %s' % _web_format + to_format = _ptdf_format + t = strptime(title, from_format) + title = strftime(to_format, t) + if fromPTDFtoWEB: + if title[0] == '0': title = title[1:] + title = u'Episode dated %s' % title + except ValueError: + pass + if isinstance(_emptyString, str): + try: + title = str(title) + except UnicodeDecodeError: + pass + return title + + +def build_title(title_dict, canonical=None, canonicalSeries=None, + canonicalEpisode=None, ptdf=0, lang=None, _doYear=1, + _emptyString=u''): + """Given a dictionary that represents a "long" IMDb title, + return a string. + + If canonical is None (default), the title is returned in the stored style. + If canonical is True, the title is converted to canonical style. + If canonical is False, the title is converted to normal format. + + lang can be used to specify the language of the title. + + If ptdf is true, the plain text data files format is used. + """ + if canonical is not None: + canonicalSeries = canonical + pre_title = _emptyString + kind = title_dict.get('kind') + episode_of = title_dict.get('episode of') + if kind == 'episode' and episode_of is not None: + # Works with both Movie instances and plain dictionaries. + doYear = 0 + if ptdf: + doYear = 1 + pre_title = build_title(episode_of, canonical=canonicalSeries, + ptdf=0, _doYear=doYear, + _emptyString=_emptyString) + ep_dict = {'title': title_dict.get('title', ''), + 'imdbIndex': title_dict.get('imdbIndex')} + ep_title = ep_dict['title'] + if not ptdf: + doYear = 1 + ep_dict['year'] = title_dict.get('year', '????') + if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \ + ep_title[1:5].isdigit(): + ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=1, + _emptyString=_emptyString) + else: + doYear = 0 + if ep_title.startswith('Episode dated'): + ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=0, + _emptyString=_emptyString) + episode_title = build_title(ep_dict, + canonical=canonicalEpisode, ptdf=ptdf, + _doYear=doYear, _emptyString=_emptyString) + if ptdf: + oad = title_dict.get('original air date', _emptyString) + if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \ + episode_title.find(oad) == -1: + episode_title += ' (%s)' % oad + seas = title_dict.get('season') + if seas is not None: + episode_title += ' (#%s' % seas + episode = title_dict.get('episode') + if episode is not None: + episode_title += '.%s' % episode + episode_title += ')' + episode_title = '{%s}' % episode_title + return '%s %s' % (pre_title, episode_title) + title = title_dict.get('title', '') + if not title: return _emptyString + if canonical is not None: + if canonical: + title = canonicalTitle(title, lang=lang) + else: + title = normalizeTitle(title, lang=lang) + if pre_title: + title = '%s %s' % (pre_title, title) + if kind in (u'tv series', u'tv mini series'): + title = '"%s"' % title + if _doYear: + imdbIndex = title_dict.get('imdbIndex') + year = title_dict.get('year') or u'????' + if isinstance(_emptyString, str): + year = str(year) + title += ' (%s' % year + if imdbIndex: + title += '/%s' % imdbIndex + title += ')' + if kind: + if kind == 'tv movie': + title += ' (TV)' + elif kind == 'video movie': + title += ' (V)' + elif kind == 'tv mini series': + title += ' (mini)' + elif kind == 'video game': + title += ' (VG)' + return title + + +def split_company_name_notes(name): + """Return two strings, the first representing the company name, + and the other representing the (optional) notes.""" + name = name.strip() + notes = u'' + if name.endswith(')'): + fpidx = name.find('(') + if fpidx != -1: + notes = name[fpidx:] + name = name[:fpidx].rstrip() + return name, notes + + +def analyze_company_name(name, stripNotes=False): + """Return a dictionary with the name and the optional 'country' + keys, from the given string. + If stripNotes is true, tries to not consider optional notes. + + raise an IMDbParserError exception if the name is not valid. + """ + if stripNotes: + name = split_company_name_notes(name)[0] + o_name = name + name = name.strip() + country = None + if name.endswith(']'): + idx = name.rfind('[') + if idx != -1: + country = name[idx:] + name = name[:idx].rstrip() + if not name: + raise IMDbParserError, 'invalid name: "%s"' % o_name + result = {'name': name} + if country: + result['country'] = country + return result + + +def build_company_name(name_dict, _emptyString=u''): + """Given a dictionary that represents a "long" IMDb company name, + return a string. + """ + name = name_dict.get('name') + if not name: + return _emptyString + country = name_dict.get('country') + if country is not None: + name += ' %s' % country + return name + + +class _LastC: + """Size matters.""" + def __cmp__(self, other): + if isinstance(other, self.__class__): return 0 + return 1 + +_last = _LastC() + +def cmpMovies(m1, m2): + """Compare two movies by year, in reverse order; the imdbIndex is checked + for movies with the same year of production and title.""" + # Sort tv series' episodes. + m1e = m1.get('episode of') + m2e = m2.get('episode of') + if m1e is not None and m2e is not None: + cmp_series = cmpMovies(m1e, m2e) + if cmp_series != 0: + return cmp_series + m1s = m1.get('season') + m2s = m2.get('season') + if m1s is not None and m2s is not None: + if m1s < m2s: + return 1 + elif m1s > m2s: + return -1 + m1p = m1.get('episode') + m2p = m2.get('episode') + if m1p < m2p: + return 1 + elif m1p > m2p: + return -1 + try: + if m1e is None: m1y = int(m1.get('year', 0)) + else: m1y = int(m1e.get('year', 0)) + except ValueError: + m1y = 0 + try: + if m2e is None: m2y = int(m2.get('year', 0)) + else: m2y = int(m2e.get('year', 0)) + except ValueError: + m2y = 0 + if m1y > m2y: return -1 + if m1y < m2y: return 1 + # Ok, these movies have the same production year... + #m1t = m1.get('canonical title', _last) + #m2t = m2.get('canonical title', _last) + # It should works also with normal dictionaries (returned from searches). + #if m1t is _last and m2t is _last: + m1t = m1.get('title', _last) + m2t = m2.get('title', _last) + if m1t < m2t: return -1 + if m1t > m2t: return 1 + # Ok, these movies have the same title... + m1i = m1.get('imdbIndex', _last) + m2i = m2.get('imdbIndex', _last) + if m1i > m2i: return -1 + if m1i < m2i: return 1 + m1id = getattr(m1, 'movieID', None) + # Introduce this check even for other comparisons functions? + # XXX: is it safe to check without knowning the data access system? + # probably not a great idea. Check for 'kind', instead? + if m1id is not None: + m2id = getattr(m2, 'movieID', None) + if m1id > m2id: return -1 + elif m1id < m2id: return 1 + return 0 + + +def cmpPeople(p1, p2): + """Compare two people by billingPos, name and imdbIndex.""" + p1b = getattr(p1, 'billingPos', None) or _last + p2b = getattr(p2, 'billingPos', None) or _last + if p1b > p2b: return 1 + if p1b < p2b: return -1 + p1n = p1.get('canonical name', _last) + p2n = p2.get('canonical name', _last) + if p1n is _last and p2n is _last: + p1n = p1.get('name', _last) + p2n = p2.get('name', _last) + if p1n > p2n: return 1 + if p1n < p2n: return -1 + p1i = p1.get('imdbIndex', _last) + p2i = p2.get('imdbIndex', _last) + if p1i > p2i: return 1 + if p1i < p2i: return -1 + return 0 + + +def cmpCompanies(p1, p2): + """Compare two companies.""" + p1n = p1.get('long imdb name', _last) + p2n = p2.get('long imdb name', _last) + if p1n is _last and p2n is _last: + p1n = p1.get('name', _last) + p2n = p2.get('name', _last) + if p1n > p2n: return 1 + if p1n < p2n: return -1 + p1i = p1.get('country', _last) + p2i = p2.get('country', _last) + if p1i > p2i: return 1 + if p1i < p2i: return -1 + return 0 + + +# References to titles, names and characters. +# XXX: find better regexp! +re_titleRef = re.compile(r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)') +# FIXME: doesn't match persons with ' in the name. +re_nameRef = re.compile(r"'([^']+?)' \(qv\)") +# XXX: good choice? Are there characters with # in the name? +re_characterRef = re.compile(r"#([^']+?)# \(qv\)") + +# Functions used to filter the text strings. +def modNull(s, titlesRefs, namesRefs, charactersRefs): + """Do nothing.""" + return s + +def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs): + """Remove titles references.""" + return re_titleRef.sub(r'\1', s) + +def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs): + """Remove names references.""" + return re_nameRef.sub(r'\1', s) + +def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs): + """Remove characters references""" + return re_characterRef.sub(r'\1', s) + +def modClearRefs(s, titlesRefs, namesRefs, charactersRefs): + """Remove titles, names and characters references.""" + s = modClearTitleRefs(s, {}, {}, {}) + s = modClearCharacterRefs(s, {}, {}, {}) + return modClearNameRefs(s, {}, {}, {}) + + +def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs): + """Modify a string (or string values in a dictionary or strings + in a list), using the provided modFunct function and titlesRefs + namesRefs and charactersRefs references dictionaries.""" + # Notice that it doesn't go any deeper than the first two levels in a list. + if isinstance(o, (unicode, str)): + return modFunct(o, titlesRefs, namesRefs, charactersRefs) + elif isinstance(o, (list, tuple, dict)): + _stillorig = 1 + if isinstance(o, (list, tuple)): keys = xrange(len(o)) + else: keys = o.keys() + for i in keys: + v = o[i] + if isinstance(v, (unicode, str)): + if _stillorig: + o = copy(o) + _stillorig = 0 + o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs) + elif isinstance(v, (list, tuple)): + modifyStrings(o[i], modFunct, titlesRefs, namesRefs, + charactersRefs) + return o + + +def date_and_notes(s): + """Parse (birth|death) date and notes; returns a tuple in the + form (date, notes).""" + s = s.strip() + if not s: return (u'', u'') + notes = u'' + if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february', + 'march', 'april', 'may', 'june', + 'july', 'august', 'september', + 'october', 'november', + 'december', 'ca.', 'circa', + '????,'): + i = s.find(',') + if i != -1: + notes = s[i+1:].strip() + s = s[:i] + else: + notes = s + s = u'' + if s == '????': s = u'' + return s, notes + + +class RolesList(list): + """A list of Person or Character instances, used for the currentRole + property.""" + def __unicode__(self): + return u' / '.join([unicode(x) for x in self]) + + def __str__(self): + # FIXME: does it make sense at all? Return a unicode doesn't + # seem right, in __str__. + return u' / '.join([unicode(x).encode('utf8') for x in self]) + + +# Replace & with &, but only if it's not already part of a charref. +#_re_amp = re.compile(r'(&)(?!\w+;)', re.I) +#_re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])') +_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)') + +def escape4xml(value): + """Escape some chars that can't be present in a XML value.""" + if isinstance(value, int): + value = str(value) + value = _re_amp.sub('&', value) + value = value.replace('"', '"').replace("'", ''') + value = value.replace('<', '<').replace('>', '>') + if isinstance(value, unicode): + value = value.encode('ascii', 'xmlcharrefreplace') + return value + + +def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs): + """Return three lists - for movie titles, persons and characters names - + with two items tuples: the first item is the reference once escaped + by the user-provided modFunct function, the second is the same + reference un-escaped.""" + mRefs = [] + for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'), + (re_nameRef, u"'%s' (qv)"), + (re_characterRef, u'#%s# (qv)')]: + theseRefs = [] + for theRef in refRe.findall(value): + # refTemplate % theRef values don't change for a single + # _Container instance, so this is a good candidate for a + # cache or something - even if it's so rarely used that... + # Moreover, it can grow - ia.update(...) - and change if + # modFunct is modified. + goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs, + charactersRefs) + # Prevents problems with crap in plain text data files. + # We should probably exclude invalid chars and string that + # are too long in the re_*Ref expressions. + if '_' in goodValue or len(goodValue) > 128: + continue + toReplace = escape4xml(goodValue) + # Only the 'value' portion is replaced. + replaceWith = goodValue.replace(theRef, escape4xml(theRef)) + theseRefs.append((toReplace, replaceWith)) + mRefs.append(theseRefs) + return mRefs + + +def _handleTextNotes(s): + """Split text::notes strings.""" + ssplit = s.split('::', 1) + if len(ssplit) == 1: + return s + return u'%s%s' % (ssplit[0], ssplit[1]) + + +def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None, + namesRefs=None, charactersRefs=None): + """Replace some chars that can't be present in a XML text.""" + # XXX: use s.encode(encoding, 'xmlcharrefreplace') ? Probably not + # a great idea: after all, returning a unicode is safe. + if isinstance(value, (unicode, str)): + if not withRefs: + value = _handleTextNotes(escape4xml(value)) + else: + # Replace references that were accidentally escaped. + replaceLists = _refsToReplace(value, modFunct, titlesRefs, + namesRefs, charactersRefs) + value = modFunct(value, titlesRefs or {}, namesRefs or {}, + charactersRefs or {}) + value = _handleTextNotes(escape4xml(value)) + for replaceList in replaceLists: + for toReplace, replaceWith in replaceList: + value = value.replace(toReplace, replaceWith) + else: + value = unicode(value) + return value + + +def _tag4TON(ton, addAccessSystem=False, _containerOnly=False): + """Build a tag for the given _Container instance; + both open and close tags are returned.""" + tag = ton.__class__.__name__.lower() + what = 'name' + if tag == 'movie': + value = ton.get('long imdb title') or ton.get('title', '') + what = 'title' + else: + value = ton.get('long imdb name') or ton.get('name', '') + value = _normalizeValue(value) + extras = u'' + crl = ton.currentRole + if crl: + if not isinstance(crl, list): + crl = [crl] + for cr in crl: + crTag = cr.__class__.__name__.lower() + crValue = cr['long imdb name'] + crValue = _normalizeValue(crValue) + crID = cr.getID() + if crID is not None: + extras += u'<%s id="%s">' \ + u'%s' % (crTag, crID, + crValue, crTag) + else: + extras += u'<%s>%s' % \ + (crTag, crValue, crTag) + if cr.notes: + extras += u'%s' % _normalizeValue(cr.notes) + extras += u'' + theID = ton.getID() + if theID is not None: + beginTag = u'<%s id="%s"' % (tag, theID) + if addAccessSystem and ton.accessSystem: + beginTag += ' access-system="%s"' % ton.accessSystem + if not _containerOnly: + beginTag += u'><%s>%s' % (what, value, what) + else: + beginTag += u'>' + else: + if not _containerOnly: + beginTag = u'<%s><%s>%s' % (tag, what, value, what) + else: + beginTag = u'<%s>' % tag + beginTag += extras + if ton.notes: + beginTag += u'%s' % _normalizeValue(ton.notes) + return (beginTag, u'' % tag) + + +TAGS_TO_MODIFY = { + 'movie.parents-guide': ('item', True), + 'movie.number-of-votes': ('item', True), + 'movie.soundtrack.item': ('item', True), + 'movie.quotes': ('quote', False), + 'movie.quotes.quote': ('line', False), + 'movie.demographic': ('item', True), + 'movie.episodes': ('season', True), + 'movie.episodes.season': ('episode', True), + 'person.merchandising-links': ('item', True), + 'person.genres': ('item', True), + 'person.quotes': ('quote', False), + 'person.keywords': ('item', True), + 'character.quotes': ('item', True), + 'character.quotes.item': ('quote', False), + 'character.quotes.item.quote': ('line', False) + } + +_allchars = string.maketrans('', '') +_keepchars = _allchars.translate(_allchars, string.ascii_lowercase + '-' + + string.digits) + +def _tagAttr(key, fullpath): + """Return a tuple with a tag name and a (possibly empty) attribute, + applying the conversions specified in TAGS_TO_MODIFY and checking + that the tag is safe for a XML document.""" + attrs = {} + _escapedKey = escape4xml(key) + if fullpath in TAGS_TO_MODIFY: + tagName, useTitle = TAGS_TO_MODIFY[fullpath] + if useTitle: + attrs['key'] = _escapedKey + elif not isinstance(key, unicode): + if isinstance(key, str): + tagName = unicode(key, 'ascii', 'ignore') + else: + strType = str(type(key)).replace("", "") + attrs['keytype'] = strType + tagName = unicode(key) + else: + tagName = key + if isinstance(key, int): + attrs['keytype'] = 'int' + origTagName = tagName + tagName = tagName.lower().replace(' ', '-') + tagName = str(tagName).translate(_allchars, _keepchars) + if origTagName != tagName: + if 'key' not in attrs: + attrs['key'] = _escapedKey + if (not tagName) or tagName[0].isdigit() or tagName[0] == '-': + # This is a fail-safe: we should never be here, since unpredictable + # keys must be listed in TAGS_TO_MODIFY. + # This will proably break the DTD/schema, but at least it will + # produce a valid XML. + tagName = 'item' + _utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath)) + attrs['key'] = _escapedKey + return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()]) + + +def _seq2xml(seq, _l=None, withRefs=False, modFunct=None, + titlesRefs=None, namesRefs=None, charactersRefs=None, + _topLevel=True, key2infoset=None, fullpath=''): + """Convert a sequence or a dictionary to a list of XML + unicode strings.""" + if _l is None: + _l = [] + if isinstance(seq, dict): + for key in seq: + value = seq[key] + if isinstance(key, _Container): + # Here we're assuming that a _Container is never a top-level + # key (otherwise we should handle key2infoset). + openTag, closeTag = _tag4TON(key) + # So that fullpath will contains something meaningful. + tagName = key.__class__.__name__.lower() + else: + tagName, attrs = _tagAttr(key, fullpath) + openTag = u'<%s' % tagName + if attrs: + openTag += ' %s' % attrs + if _topLevel and key2infoset and key in key2infoset: + openTag += u' infoset="%s"' % key2infoset[key] + if isinstance(value, int): + openTag += ' type="int"' + elif isinstance(value, float): + openTag += ' type="float"' + openTag += u'>' + closeTag = u'' % tagName + _l.append(openTag) + _seq2xml(value, _l, withRefs, modFunct, titlesRefs, + namesRefs, charactersRefs, _topLevel=False, + fullpath='%s.%s' % (fullpath, tagName)) + _l.append(closeTag) + elif isinstance(seq, (list, tuple)): + tagName, attrs = _tagAttr('item', fullpath) + beginTag = u'<%s' % tagName + if attrs: + beginTag += u' %s' % attrs + #beginTag += u'>' + closeTag = u'' % tagName + for item in seq: + if isinstance(item, _Container): + _seq2xml(item, _l, withRefs, modFunct, titlesRefs, + namesRefs, charactersRefs, _topLevel=False, + fullpath='%s.%s' % (fullpath, + item.__class__.__name__.lower())) + else: + openTag = beginTag + if isinstance(item, int): + openTag += ' type="int"' + elif isinstance(item, float): + openTag += ' type="float"' + openTag += u'>' + _l.append(openTag) + _seq2xml(item, _l, withRefs, modFunct, titlesRefs, + namesRefs, charactersRefs, _topLevel=False, + fullpath='%s.%s' % (fullpath, tagName)) + _l.append(closeTag) + else: + if isinstance(seq, _Container): + _l.extend(_tag4TON(seq)) + else: + # Text, ints, floats and the like. + _l.append(_normalizeValue(seq, withRefs=withRefs, + modFunct=modFunct, + titlesRefs=titlesRefs, + namesRefs=namesRefs, + charactersRefs=charactersRefs)) + return _l + + +_xmlHead = u""" + + +""" +_xmlHead = _xmlHead.replace('{VERSION}', + VERSION.replace('.', '').split('dev')[0][:2]) + + +class _Container(object): + """Base class for Movie, Person, Character and Company classes.""" + # The default sets of information retrieved. + default_info = () + + # Aliases for some not-so-intuitive keys. + keys_alias = {} + + # List of keys to modify. + keys_tomodify_list = () + + # Function used to compare two instances of this class. + cmpFunct = None + + # Regular expression used to build the 'full-size (headshot|cover url)'. + _re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_') + + def __init__(self, myID=None, data=None, notes=u'', + currentRole=u'', roleID=None, roleIsPerson=False, + accessSystem=None, titlesRefs=None, namesRefs=None, + charactersRefs=None, modFunct=None, *args, **kwds): + """Initialize a Movie, Person, Character or Company object. + *myID* -- your personal identifier for this object. + *data* -- a dictionary used to initialize the object. + *notes* -- notes for the person referred in the currentRole + attribute; e.g.: '(voice)' or the alias used in the + movie credits. + *accessSystem* -- a string representing the data access system used. + *currentRole* -- a Character instance representing the current role + or duty of a person in this movie, or a Person + object representing the actor/actress who played + a given character in a Movie. If a string is + passed, an object is automatically build. + *roleID* -- if available, the characterID/personID of the currentRole + object. + *roleIsPerson* -- when False (default) the currentRole is assumed + to be a Character object, otherwise a Person. + *titlesRefs* -- a dictionary with references to movies. + *namesRefs* -- a dictionary with references to persons. + *charactersRefs* -- a dictionary with references to characters. + *modFunct* -- function called returning text fields. + """ + self.reset() + self.accessSystem = accessSystem + self.myID = myID + if data is None: data = {} + self.set_data(data, override=1) + self.notes = notes + if titlesRefs is None: titlesRefs = {} + self.update_titlesRefs(titlesRefs) + if namesRefs is None: namesRefs = {} + self.update_namesRefs(namesRefs) + if charactersRefs is None: charactersRefs = {} + self.update_charactersRefs(charactersRefs) + self.set_mod_funct(modFunct) + self.keys_tomodify = {} + for item in self.keys_tomodify_list: + self.keys_tomodify[item] = None + self._roleIsPerson = roleIsPerson + if not roleIsPerson: + from imdb.Character import Character + self._roleClass = Character + else: + from imdb.Person import Person + self._roleClass = Person + self.currentRole = currentRole + if roleID: + self.roleID = roleID + self._init(*args, **kwds) + + def _get_roleID(self): + """Return the characterID or personID of the currentRole object.""" + if not self.__role: + return None + if isinstance(self.__role, list): + return [x.getID() for x in self.__role] + return self.currentRole.getID() + + def _set_roleID(self, roleID): + """Set the characterID or personID of the currentRole object.""" + if not self.__role: + # XXX: needed? Just ignore it? It's probably safer to + # ignore it, to prevent some bugs in the parsers. + #raise IMDbError,"Can't set ID of an empty Character/Person object." + pass + if not self._roleIsPerson: + if not isinstance(roleID, (list, tuple)): + self.currentRole.characterID = roleID + else: + for index, item in enumerate(roleID): + self.__role[index].characterID = item + else: + if not isinstance(roleID, (list, tuple)): + self.currentRole.personID = roleID + else: + for index, item in enumerate(roleID): + self.__role[index].personID = item + + roleID = property(_get_roleID, _set_roleID, + doc="the characterID or personID of the currentRole object.") + + def _get_currentRole(self): + """Return a Character or Person instance.""" + if self.__role: + return self.__role + return self._roleClass(name=u'', accessSystem=self.accessSystem, + modFunct=self.modFunct) + + def _set_currentRole(self, role): + """Set self.currentRole to a Character or Person instance.""" + if isinstance(role, (unicode, str)): + if not role: + self.__role = None + else: + self.__role = self._roleClass(name=role, modFunct=self.modFunct, + accessSystem=self.accessSystem) + elif isinstance(role, (list, tuple)): + self.__role = RolesList() + for item in role: + if isinstance(item, (unicode, str)): + self.__role.append(self._roleClass(name=item, + accessSystem=self.accessSystem, + modFunct=self.modFunct)) + else: + self.__role.append(item) + if not self.__role: + self.__role = None + else: + self.__role = role + + currentRole = property(_get_currentRole, _set_currentRole, + doc="The role of a Person in a Movie" + \ + " or the interpreter of a Character in a Movie.") + + def _init(self, **kwds): pass + + def reset(self): + """Reset the object.""" + self.data = {} + self.myID = None + self.notes = u'' + self.titlesRefs = {} + self.namesRefs = {} + self.charactersRefs = {} + self.modFunct = modClearRefs + self.current_info = [] + self.infoset2keys = {} + self.key2infoset = {} + self.__role = None + self._reset() + + def _reset(self): pass + + def clear(self): + """Reset the dictionary.""" + self.data.clear() + self.notes = u'' + self.titlesRefs = {} + self.namesRefs = {} + self.charactersRefs = {} + self.current_info = [] + self.infoset2keys = {} + self.key2infoset = {} + self.__role = None + self._clear() + + def _clear(self): pass + + def get_current_info(self): + """Return the current set of information retrieved.""" + return self.current_info + + def update_infoset_map(self, infoset, keys, mainInfoset): + """Update the mappings between infoset and keys.""" + if keys is None: + keys = [] + if mainInfoset is not None: + theIS = mainInfoset + else: + theIS = infoset + self.infoset2keys[theIS] = keys + for key in keys: + self.key2infoset[key] = theIS + + def set_current_info(self, ci): + """Set the current set of information retrieved.""" + # XXX:Remove? It's never used and there's no way to update infoset2keys. + self.current_info = ci + + def add_to_current_info(self, val, keys=None, mainInfoset=None): + """Add a set of information to the current list.""" + if val not in self.current_info: + self.current_info.append(val) + self.update_infoset_map(val, keys, mainInfoset) + + def has_current_info(self, val): + """Return true if the given set of information is in the list.""" + return val in self.current_info + + def set_mod_funct(self, modFunct): + """Set the fuction used to modify the strings.""" + if modFunct is None: modFunct = modClearRefs + self.modFunct = modFunct + + def update_titlesRefs(self, titlesRefs): + """Update the dictionary with the references to movies.""" + self.titlesRefs.update(titlesRefs) + + def get_titlesRefs(self): + """Return the dictionary with the references to movies.""" + return self.titlesRefs + + def update_namesRefs(self, namesRefs): + """Update the dictionary with the references to names.""" + self.namesRefs.update(namesRefs) + + def get_namesRefs(self): + """Return the dictionary with the references to names.""" + return self.namesRefs + + def update_charactersRefs(self, charactersRefs): + """Update the dictionary with the references to characters.""" + self.charactersRefs.update(charactersRefs) + + def get_charactersRefs(self): + """Return the dictionary with the references to characters.""" + return self.charactersRefs + + def set_data(self, data, override=0): + """Set the movie data to the given dictionary; if 'override' is + set, the previous data is removed, otherwise the two dictionary + are merged. + """ + if not override: + self.data.update(data) + else: + self.data = data + + def getID(self): + """Return movieID, personID, characterID or companyID.""" + raise NotImplementedError, 'override this method' + + def __cmp__(self, other): + """Compare two Movie, Person, Character or Company objects.""" + # XXX: raise an exception? + if self.cmpFunct is None: return -1 + if not isinstance(other, self.__class__): return -1 + return self.cmpFunct(other) + + def __hash__(self): + """Hash for this object.""" + # XXX: does it always work correctly? + theID = self.getID() + if theID is not None and self.accessSystem not in ('UNKNOWN', None): + # Handle 'http' and 'mobile' as they are the same access system. + acs = self.accessSystem + if acs in ('mobile', 'httpThin'): + acs = 'http' + # There must be some indication of the kind of the object, too. + s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs) + else: + s4h = repr(self) + return hash(s4h) + + def isSame(self, other): + """Return True if the two represent the same object.""" + if not isinstance(other, self.__class__): return 0 + if hash(self) == hash(other): return 1 + return 0 + + def __len__(self): + """Number of items in the data dictionary.""" + return len(self.data) + + def getAsXML(self, key, _with_add_keys=True): + """Return a XML representation of the specified key, or None + if empty. If _with_add_keys is False, dinamically generated + keys are excluded.""" + # Prevent modifyStrings in __getitem__ to be called; if needed, + # it will be called by the _normalizeValue function. + origModFunct = self.modFunct + self.modFunct = modNull + # XXX: not totally sure it's a good idea, but could prevent + # problems (i.e.: the returned string always contains + # a DTD valid tag, and not something that can be only in + # the keys_alias map). + key = self.keys_alias.get(key, key) + if (not _with_add_keys) and (key in self._additional_keys()): + self.modFunct = origModFunct + return None + try: + withRefs = False + if key in self.keys_tomodify and \ + origModFunct not in (None, modNull): + withRefs = True + value = self.get(key) + if value is None: + return None + tag = self.__class__.__name__.lower() + return u''.join(_seq2xml({key: value}, withRefs=withRefs, + modFunct=origModFunct, + titlesRefs=self.titlesRefs, + namesRefs=self.namesRefs, + charactersRefs=self.charactersRefs, + key2infoset=self.key2infoset, + fullpath=tag)) + finally: + self.modFunct = origModFunct + + def asXML(self, _with_add_keys=True): + """Return a XML representation of the whole object. + If _with_add_keys is False, dinamically generated keys are excluded.""" + beginTag, endTag = _tag4TON(self, addAccessSystem=True, + _containerOnly=True) + resList = [beginTag] + for key in self.keys(): + value = self.getAsXML(key, _with_add_keys=_with_add_keys) + if not value: + continue + resList.append(value) + resList.append(endTag) + head = _xmlHead % self.__class__.__name__.lower() + return head + u''.join(resList) + + def _getitem(self, key): + """Handle special keys.""" + return None + + def __getitem__(self, key): + """Return the value for a given key, checking key aliases; + a KeyError exception is raised if the key is not found. + """ + value = self._getitem(key) + if value is not None: return value + # Handle key aliases. + key = self.keys_alias.get(key, key) + rawData = self.data[key] + if key in self.keys_tomodify and \ + self.modFunct not in (None, modNull): + try: + return modifyStrings(rawData, self.modFunct, self.titlesRefs, + self.namesRefs, self.charactersRefs) + except RuntimeError, e: + # Symbian/python 2.2 has a poor regexp implementation. + import warnings + warnings.warn('RuntimeError in ' + "imdb.utils._Container.__getitem__; if it's not " + "a recursion limit exceeded and we're not running " + "in a Symbian environment, it's a bug:\n%s" % e) + return rawData + + def __setitem__(self, key, item): + """Directly store the item with the given key.""" + self.data[key] = item + + def __delitem__(self, key): + """Remove the given section or key.""" + # XXX: how to remove an item of a section? + del self.data[key] + + def _additional_keys(self): + """Valid keys to append to the data.keys() list.""" + return [] + + def keys(self): + """Return a list of valid keys.""" + return self.data.keys() + self._additional_keys() + + def items(self): + """Return the items in the dictionary.""" + return [(k, self.get(k)) for k in self.keys()] + + # XXX: is this enough? + def iteritems(self): return self.data.iteritems() + def iterkeys(self): return self.data.iterkeys() + def itervalues(self): return self.data.itervalues() + + def values(self): + """Return the values in the dictionary.""" + return [self.get(k) for k in self.keys()] + + def has_key(self, key): + """Return true if a given section is defined.""" + try: + self.__getitem__(key) + except KeyError: + return 0 + return 1 + + # XXX: really useful??? + # consider also that this will confuse people who meant to + # call ia.update(movieObject, 'data set') instead. + def update(self, dict): + self.data.update(dict) + + def get(self, key, failobj=None): + """Return the given section, or default if it's not found.""" + try: + return self.__getitem__(key) + except KeyError: + return failobj + + def setdefault(self, key, failobj=None): + if not self.has_key(key): + self[key] = failobj + return self[key] + + def pop(self, key, *args): + return self.data.pop(key, *args) + + def popitem(self): + return self.data.popitem() + + def __repr__(self): + """String representation of an object.""" + raise NotImplementedError, 'override this method' + + def __str__(self): + """Movie title or person name.""" + raise NotImplementedError, 'override this method' + + def __contains__(self, key): + raise NotImplementedError, 'override this method' + + def append_item(self, key, item): + """The item is appended to the list identified by the given key.""" + self.data.setdefault(key, []).append(item) + + def set_item(self, key, item): + """Directly store the item with the given key.""" + self.data[key] = item + + def __nonzero__(self): + """Return true if self.data contains something.""" + if self.data: return 1 + return 0 + + def __deepcopy__(self, memo): + raise NotImplementedError, 'override this method' + + def copy(self): + """Return a deep copy of the object itself.""" + return deepcopy(self) + + +def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0, + onlyKeysType=(_Container,), scalar=None): + """Iterate over nested lists and dictionaries; toDescend is a list + or a tuple of types to be considered non-scalar; if yieldDictKeys is + true, also dictionaries' keys are yielded; if scalar is not None, only + items of the given type(s) are yielded.""" + if scalar is None or isinstance(seq, scalar): + yield seq + if isinstance(seq, toDescend): + if isinstance(seq, (dict, _Container)): + if yieldDictKeys: + # Yield also the keys of the dictionary. + for key in seq.iterkeys(): + for k in flatten(key, toDescend=toDescend, + yieldDictKeys=yieldDictKeys, + onlyKeysType=onlyKeysType, scalar=scalar): + if onlyKeysType and isinstance(k, onlyKeysType): + yield k + for value in seq.itervalues(): + for v in flatten(value, toDescend=toDescend, + yieldDictKeys=yieldDictKeys, + onlyKeysType=onlyKeysType, scalar=scalar): + yield v + elif not isinstance(seq, (str, unicode, int, float)): + for item in seq: + for i in flatten(item, toDescend=toDescend, + yieldDictKeys=yieldDictKeys, + onlyKeysType=onlyKeysType, scalar=scalar): + yield i + + diff --git a/libs/jinja2/__init__.py b/libs/jinja2/__init__.py new file mode 100644 index 0000000..dec8c27 --- /dev/null +++ b/libs/jinja2/__init__.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +""" + jinja2 + ~~~~~~ + + Jinja2 is a template engine written in pure Python. It provides a + Django inspired non-XML syntax but supports inline expressions and + an optional sandboxed environment. + + Nutshell + -------- + + Here a small example of a Jinja2 template:: + + {% extends 'base.html' %} + {% block title %}Memberlist{% endblock %} + {% block content %} +
    + {% endblock %} + + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" +__docformat__ = 'restructuredtext en' +try: + __version__ = __import__('pkg_resources') \ + .get_distribution('Jinja2').version +except Exception: + __version__ = 'unknown' + +# high level interface +from jinja2.environment import Environment, Template + +# loaders +from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \ + DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \ + ModuleLoader + +# bytecode caches +from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \ + MemcachedBytecodeCache + +# undefined types +from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined + +# exceptions +from jinja2.exceptions import TemplateError, UndefinedError, \ + TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \ + TemplateAssertionError + +# decorators and public utilities +from jinja2.filters import environmentfilter, contextfilter, \ + evalcontextfilter +from jinja2.utils import Markup, escape, clear_caches, \ + environmentfunction, evalcontextfunction, contextfunction, \ + is_undefined + +__all__ = [ + 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader', + 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader', + 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache', + 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined', + 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound', + 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError', + 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape', + 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined', + 'evalcontextfilter', 'evalcontextfunction' +] diff --git a/libs/jinja2/_debugsupport.c b/libs/jinja2/_debugsupport.c new file mode 100644 index 0000000..e756d8e --- /dev/null +++ b/libs/jinja2/_debugsupport.c @@ -0,0 +1,78 @@ +/** + * jinja2._debugsupport + * ~~~~~~~~~~~~~~~~~~~~ + * + * C implementation of `tb_set_next`. + * + * :copyright: (c) 2010 by the Jinja Team. + * :license: BSD. + */ + +#include + + +static PyObject* +tb_set_next(PyObject *self, PyObject *args) +{ + PyTracebackObject *tb, *old; + PyObject *next; + + if (!PyArg_ParseTuple(args, "O!O:tb_set_next", &PyTraceBack_Type, &tb, &next)) + return NULL; + if (next == Py_None) + next = NULL; + else if (!PyTraceBack_Check(next)) { + PyErr_SetString(PyExc_TypeError, + "tb_set_next arg 2 must be traceback or None"); + return NULL; + } + else + Py_INCREF(next); + + old = tb->tb_next; + tb->tb_next = (PyTracebackObject*)next; + Py_XDECREF(old); + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef module_methods[] = { + {"tb_set_next", (PyCFunction)tb_set_next, METH_VARARGS, + "Set the tb_next member of a traceback object."}, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + + +#if PY_MAJOR_VERSION < 3 + +#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ +#define PyMODINIT_FUNC void +#endif +PyMODINIT_FUNC +init_debugsupport(void) +{ + Py_InitModule3("jinja2._debugsupport", module_methods, ""); +} + +#else /* Python 3.x module initialization */ + +static struct PyModuleDef module_definition = { + PyModuleDef_HEAD_INIT, + "jinja2._debugsupport", + NULL, + -1, + module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC +PyInit__debugsupport(void) +{ + return PyModule_Create(&module_definition); +} + +#endif diff --git a/libs/jinja2/_markupsafe/__init__.py b/libs/jinja2/_markupsafe/__init__.py new file mode 100644 index 0000000..ec7bd57 --- /dev/null +++ b/libs/jinja2/_markupsafe/__init__.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +""" + markupsafe + ~~~~~~~~~~ + + Implements a Markup string. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +import re +from itertools import imap + + +__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent'] + + +_striptags_re = re.compile(r'(|<[^>]*>)') +_entity_re = re.compile(r'&([^;]+);') + + +class Markup(unicode): + r"""Marks a string as being safe for inclusion in HTML/XML output without + needing to be escaped. This implements the `__html__` interface a couple + of frameworks and web applications use. :class:`Markup` is a direct + subclass of `unicode` and provides all the methods of `unicode` just that + it escapes arguments passed and always returns `Markup`. + + The `escape` function returns markup objects so that double escaping can't + happen. + + The constructor of the :class:`Markup` class can be used for three + different things: When passed an unicode object it's assumed to be safe, + when passed an object with an HTML representation (has an `__html__` + method) that representation is used, otherwise the object passed is + converted into a unicode string and then assumed to be safe: + + >>> Markup("Hello World!") + Markup(u'Hello World!') + >>> class Foo(object): + ... def __html__(self): + ... return 'foo' + ... + >>> Markup(Foo()) + Markup(u'foo') + + If you want object passed being always treated as unsafe you can use the + :meth:`escape` classmethod to create a :class:`Markup` object: + + >>> Markup.escape("Hello World!") + Markup(u'Hello <em>World</em>!') + + Operations on a markup string are markup aware which means that all + arguments are passed through the :func:`escape` function: + + >>> em = Markup("%s") + >>> em % "foo & bar" + Markup(u'foo & bar') + >>> strong = Markup("%(text)s") + >>> strong % {'text': 'hacker here'} + Markup(u'<blink>hacker here</blink>') + >>> Markup("Hello ") + "" + Markup(u'Hello <foo>') + """ + __slots__ = () + + def __new__(cls, base=u'', encoding=None, errors='strict'): + if hasattr(base, '__html__'): + base = base.__html__() + if encoding is None: + return unicode.__new__(cls, base) + return unicode.__new__(cls, base, encoding, errors) + + def __html__(self): + return self + + def __add__(self, other): + if hasattr(other, '__html__') or isinstance(other, basestring): + return self.__class__(unicode(self) + unicode(escape(other))) + return NotImplemented + + def __radd__(self, other): + if hasattr(other, '__html__') or isinstance(other, basestring): + return self.__class__(unicode(escape(other)) + unicode(self)) + return NotImplemented + + def __mul__(self, num): + if isinstance(num, (int, long)): + return self.__class__(unicode.__mul__(self, num)) + return NotImplemented + __rmul__ = __mul__ + + def __mod__(self, arg): + if isinstance(arg, tuple): + arg = tuple(imap(_MarkupEscapeHelper, arg)) + else: + arg = _MarkupEscapeHelper(arg) + return self.__class__(unicode.__mod__(self, arg)) + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + unicode.__repr__(self) + ) + + def join(self, seq): + return self.__class__(unicode.join(self, imap(escape, seq))) + join.__doc__ = unicode.join.__doc__ + + def split(self, *args, **kwargs): + return map(self.__class__, unicode.split(self, *args, **kwargs)) + split.__doc__ = unicode.split.__doc__ + + def rsplit(self, *args, **kwargs): + return map(self.__class__, unicode.rsplit(self, *args, **kwargs)) + rsplit.__doc__ = unicode.rsplit.__doc__ + + def splitlines(self, *args, **kwargs): + return map(self.__class__, unicode.splitlines(self, *args, **kwargs)) + splitlines.__doc__ = unicode.splitlines.__doc__ + + def unescape(self): + r"""Unescape markup again into an unicode string. This also resolves + known HTML4 and XHTML entities: + + >>> Markup("Main » About").unescape() + u'Main \xbb About' + """ + from jinja2._markupsafe._constants import HTML_ENTITIES + def handle_match(m): + name = m.group(1) + if name in HTML_ENTITIES: + return unichr(HTML_ENTITIES[name]) + try: + if name[:2] in ('#x', '#X'): + return unichr(int(name[2:], 16)) + elif name.startswith('#'): + return unichr(int(name[1:])) + except ValueError: + pass + return u'' + return _entity_re.sub(handle_match, unicode(self)) + + def striptags(self): + r"""Unescape markup into an unicode string and strip all tags. This + also resolves known HTML4 and XHTML entities. Whitespace is + normalized to one: + + >>> Markup("Main » About").striptags() + u'Main \xbb About' + """ + stripped = u' '.join(_striptags_re.sub('', self).split()) + return Markup(stripped).unescape() + + @classmethod + def escape(cls, s): + """Escape the string. Works like :func:`escape` with the difference + that for subclasses of :class:`Markup` this function would return the + correct subclass. + """ + rv = escape(s) + if rv.__class__ is not cls: + return cls(rv) + return rv + + def make_wrapper(name): + orig = getattr(unicode, name) + def func(self, *args, **kwargs): + args = _escape_argspec(list(args), enumerate(args)) + _escape_argspec(kwargs, kwargs.iteritems()) + return self.__class__(orig(self, *args, **kwargs)) + func.__name__ = orig.__name__ + func.__doc__ = orig.__doc__ + return func + + for method in '__getitem__', 'capitalize', \ + 'title', 'lower', 'upper', 'replace', 'ljust', \ + 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \ + 'translate', 'expandtabs', 'swapcase', 'zfill': + locals()[method] = make_wrapper(method) + + # new in python 2.5 + if hasattr(unicode, 'partition'): + partition = make_wrapper('partition'), + rpartition = make_wrapper('rpartition') + + # new in python 2.6 + if hasattr(unicode, 'format'): + format = make_wrapper('format') + + # not in python 3 + if hasattr(unicode, '__getslice__'): + __getslice__ = make_wrapper('__getslice__') + + del method, make_wrapper + + +def _escape_argspec(obj, iterable): + """Helper for various string-wrapped functions.""" + for key, value in iterable: + if hasattr(value, '__html__') or isinstance(value, basestring): + obj[key] = escape(value) + return obj + + +class _MarkupEscapeHelper(object): + """Helper for Markup.__mod__""" + + def __init__(self, obj): + self.obj = obj + + __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x]) + __str__ = lambda s: str(escape(s.obj)) + __unicode__ = lambda s: unicode(escape(s.obj)) + __repr__ = lambda s: str(escape(repr(s.obj))) + __int__ = lambda s: int(s.obj) + __float__ = lambda s: float(s.obj) + + +# we have to import it down here as the speedups and native +# modules imports the markup type which is define above. +try: + from jinja2._markupsafe._speedups import escape, escape_silent, soft_unicode +except ImportError: + from jinja2._markupsafe._native import escape, escape_silent, soft_unicode diff --git a/libs/jinja2/_markupsafe/_bundle.py b/libs/jinja2/_markupsafe/_bundle.py new file mode 100644 index 0000000..e694faf --- /dev/null +++ b/libs/jinja2/_markupsafe/_bundle.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +""" + jinja2._markupsafe._bundle + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + This script pulls in markupsafe from a source folder and + bundles it with Jinja2. It does not pull in the speedups + module though. + + :copyright: Copyright 2010 by the Jinja team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import sys +import os +import re + + +def rewrite_imports(lines): + for idx, line in enumerate(lines): + new_line = re.sub(r'(import|from)\s+markupsafe\b', + r'\1 jinja2._markupsafe', line) + if new_line != line: + lines[idx] = new_line + + +def main(): + if len(sys.argv) != 2: + print 'error: only argument is path to markupsafe' + sys.exit(1) + basedir = os.path.dirname(__file__) + markupdir = sys.argv[1] + for filename in os.listdir(markupdir): + if filename.endswith('.py'): + f = open(os.path.join(markupdir, filename)) + try: + lines = list(f) + finally: + f.close() + rewrite_imports(lines) + f = open(os.path.join(basedir, filename), 'w') + try: + for line in lines: + f.write(line) + finally: + f.close() + + +if __name__ == '__main__': + main() diff --git a/libs/jinja2/_markupsafe/_constants.py b/libs/jinja2/_markupsafe/_constants.py new file mode 100644 index 0000000..919bf03 --- /dev/null +++ b/libs/jinja2/_markupsafe/_constants.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +""" + markupsafe._constants + ~~~~~~~~~~~~~~~~~~~~~ + + Highlevel implementation of the Markup string. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" + + +HTML_ENTITIES = { + 'AElig': 198, + 'Aacute': 193, + 'Acirc': 194, + 'Agrave': 192, + 'Alpha': 913, + 'Aring': 197, + 'Atilde': 195, + 'Auml': 196, + 'Beta': 914, + 'Ccedil': 199, + 'Chi': 935, + 'Dagger': 8225, + 'Delta': 916, + 'ETH': 208, + 'Eacute': 201, + 'Ecirc': 202, + 'Egrave': 200, + 'Epsilon': 917, + 'Eta': 919, + 'Euml': 203, + 'Gamma': 915, + 'Iacute': 205, + 'Icirc': 206, + 'Igrave': 204, + 'Iota': 921, + 'Iuml': 207, + 'Kappa': 922, + 'Lambda': 923, + 'Mu': 924, + 'Ntilde': 209, + 'Nu': 925, + 'OElig': 338, + 'Oacute': 211, + 'Ocirc': 212, + 'Ograve': 210, + 'Omega': 937, + 'Omicron': 927, + 'Oslash': 216, + 'Otilde': 213, + 'Ouml': 214, + 'Phi': 934, + 'Pi': 928, + 'Prime': 8243, + 'Psi': 936, + 'Rho': 929, + 'Scaron': 352, + 'Sigma': 931, + 'THORN': 222, + 'Tau': 932, + 'Theta': 920, + 'Uacute': 218, + 'Ucirc': 219, + 'Ugrave': 217, + 'Upsilon': 933, + 'Uuml': 220, + 'Xi': 926, + 'Yacute': 221, + 'Yuml': 376, + 'Zeta': 918, + 'aacute': 225, + 'acirc': 226, + 'acute': 180, + 'aelig': 230, + 'agrave': 224, + 'alefsym': 8501, + 'alpha': 945, + 'amp': 38, + 'and': 8743, + 'ang': 8736, + 'apos': 39, + 'aring': 229, + 'asymp': 8776, + 'atilde': 227, + 'auml': 228, + 'bdquo': 8222, + 'beta': 946, + 'brvbar': 166, + 'bull': 8226, + 'cap': 8745, + 'ccedil': 231, + 'cedil': 184, + 'cent': 162, + 'chi': 967, + 'circ': 710, + 'clubs': 9827, + 'cong': 8773, + 'copy': 169, + 'crarr': 8629, + 'cup': 8746, + 'curren': 164, + 'dArr': 8659, + 'dagger': 8224, + 'darr': 8595, + 'deg': 176, + 'delta': 948, + 'diams': 9830, + 'divide': 247, + 'eacute': 233, + 'ecirc': 234, + 'egrave': 232, + 'empty': 8709, + 'emsp': 8195, + 'ensp': 8194, + 'epsilon': 949, + 'equiv': 8801, + 'eta': 951, + 'eth': 240, + 'euml': 235, + 'euro': 8364, + 'exist': 8707, + 'fnof': 402, + 'forall': 8704, + 'frac12': 189, + 'frac14': 188, + 'frac34': 190, + 'frasl': 8260, + 'gamma': 947, + 'ge': 8805, + 'gt': 62, + 'hArr': 8660, + 'harr': 8596, + 'hearts': 9829, + 'hellip': 8230, + 'iacute': 237, + 'icirc': 238, + 'iexcl': 161, + 'igrave': 236, + 'image': 8465, + 'infin': 8734, + 'int': 8747, + 'iota': 953, + 'iquest': 191, + 'isin': 8712, + 'iuml': 239, + 'kappa': 954, + 'lArr': 8656, + 'lambda': 955, + 'lang': 9001, + 'laquo': 171, + 'larr': 8592, + 'lceil': 8968, + 'ldquo': 8220, + 'le': 8804, + 'lfloor': 8970, + 'lowast': 8727, + 'loz': 9674, + 'lrm': 8206, + 'lsaquo': 8249, + 'lsquo': 8216, + 'lt': 60, + 'macr': 175, + 'mdash': 8212, + 'micro': 181, + 'middot': 183, + 'minus': 8722, + 'mu': 956, + 'nabla': 8711, + 'nbsp': 160, + 'ndash': 8211, + 'ne': 8800, + 'ni': 8715, + 'not': 172, + 'notin': 8713, + 'nsub': 8836, + 'ntilde': 241, + 'nu': 957, + 'oacute': 243, + 'ocirc': 244, + 'oelig': 339, + 'ograve': 242, + 'oline': 8254, + 'omega': 969, + 'omicron': 959, + 'oplus': 8853, + 'or': 8744, + 'ordf': 170, + 'ordm': 186, + 'oslash': 248, + 'otilde': 245, + 'otimes': 8855, + 'ouml': 246, + 'para': 182, + 'part': 8706, + 'permil': 8240, + 'perp': 8869, + 'phi': 966, + 'pi': 960, + 'piv': 982, + 'plusmn': 177, + 'pound': 163, + 'prime': 8242, + 'prod': 8719, + 'prop': 8733, + 'psi': 968, + 'quot': 34, + 'rArr': 8658, + 'radic': 8730, + 'rang': 9002, + 'raquo': 187, + 'rarr': 8594, + 'rceil': 8969, + 'rdquo': 8221, + 'real': 8476, + 'reg': 174, + 'rfloor': 8971, + 'rho': 961, + 'rlm': 8207, + 'rsaquo': 8250, + 'rsquo': 8217, + 'sbquo': 8218, + 'scaron': 353, + 'sdot': 8901, + 'sect': 167, + 'shy': 173, + 'sigma': 963, + 'sigmaf': 962, + 'sim': 8764, + 'spades': 9824, + 'sub': 8834, + 'sube': 8838, + 'sum': 8721, + 'sup': 8835, + 'sup1': 185, + 'sup2': 178, + 'sup3': 179, + 'supe': 8839, + 'szlig': 223, + 'tau': 964, + 'there4': 8756, + 'theta': 952, + 'thetasym': 977, + 'thinsp': 8201, + 'thorn': 254, + 'tilde': 732, + 'times': 215, + 'trade': 8482, + 'uArr': 8657, + 'uacute': 250, + 'uarr': 8593, + 'ucirc': 251, + 'ugrave': 249, + 'uml': 168, + 'upsih': 978, + 'upsilon': 965, + 'uuml': 252, + 'weierp': 8472, + 'xi': 958, + 'yacute': 253, + 'yen': 165, + 'yuml': 255, + 'zeta': 950, + 'zwj': 8205, + 'zwnj': 8204 +} diff --git a/libs/jinja2/_markupsafe/_native.py b/libs/jinja2/_markupsafe/_native.py new file mode 100644 index 0000000..7b95828 --- /dev/null +++ b/libs/jinja2/_markupsafe/_native.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +""" + markupsafe._native + ~~~~~~~~~~~~~~~~~~ + + Native Python implementation the C module is not compiled. + + :copyright: (c) 2010 by Armin Ronacher. + :license: BSD, see LICENSE for more details. +""" +from jinja2._markupsafe import Markup + + +def escape(s): + """Convert the characters &, <, >, ' and " in string s to HTML-safe + sequences. Use this if you need to display text that might contain + such characters in HTML. Marks return value as markup string. + """ + if hasattr(s, '__html__'): + return s.__html__() + return Markup(unicode(s) + .replace('&', '&') + .replace('>', '>') + .replace('<', '<') + .replace("'", ''') + .replace('"', '"') + ) + + +def escape_silent(s): + """Like :func:`escape` but converts `None` into an empty + markup string. + """ + if s is None: + return Markup() + return escape(s) + + +def soft_unicode(s): + """Make a string unicode if it isn't already. That way a markup + string is not converted back to unicode. + """ + if not isinstance(s, unicode): + s = unicode(s) + return s diff --git a/libs/jinja2/_markupsafe/tests.py b/libs/jinja2/_markupsafe/tests.py new file mode 100644 index 0000000..c1ce394 --- /dev/null +++ b/libs/jinja2/_markupsafe/tests.py @@ -0,0 +1,80 @@ +import gc +import unittest +from jinja2._markupsafe import Markup, escape, escape_silent + + +class MarkupTestCase(unittest.TestCase): + + def test_markup_operations(self): + # adding two strings should escape the unsafe one + unsafe = '' + safe = Markup('username') + assert unsafe + safe == unicode(escape(unsafe)) + unicode(safe) + + # string interpolations are safe to use too + assert Markup('%s') % '' == \ + '<bad user>' + assert Markup('%(username)s') % { + 'username': '' + } == '<bad user>' + + # an escaped object is markup too + assert type(Markup('foo') + 'bar') is Markup + + # and it implements __html__ by returning itself + x = Markup("foo") + assert x.__html__() is x + + # it also knows how to treat __html__ objects + class Foo(object): + def __html__(self): + return 'awesome' + def __unicode__(self): + return 'awesome' + assert Markup(Foo()) == 'awesome' + assert Markup('%s') % Foo() == \ + 'awesome' + + # escaping and unescaping + assert escape('"<>&\'') == '"<>&'' + assert Markup("Foo & Bar").striptags() == "Foo & Bar" + assert Markup("<test>").unescape() == "" + + def test_all_set(self): + import jinja2._markupsafe as markup + for item in markup.__all__: + getattr(markup, item) + + def test_escape_silent(self): + assert escape_silent(None) == Markup() + assert escape(None) == Markup(None) + assert escape_silent('') == Markup(u'<foo>') + + +class MarkupLeakTestCase(unittest.TestCase): + + def test_markup_leaks(self): + counts = set() + for count in xrange(20): + for item in xrange(1000): + escape("foo") + escape("") + escape(u"foo") + escape(u"") + counts.add(len(gc.get_objects())) + assert len(counts) == 1, 'ouch, c extension seems to leak objects' + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(MarkupTestCase)) + + # this test only tests the c extension + if not hasattr(escape, 'func_code'): + suite.addTest(unittest.makeSuite(MarkupLeakTestCase)) + + return suite + + +if __name__ == '__main__': + unittest.main(defaultTest='suite') diff --git a/libs/jinja2/_stringdefs.py b/libs/jinja2/_stringdefs.py new file mode 100644 index 0000000..1161b7f --- /dev/null +++ b/libs/jinja2/_stringdefs.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +""" + jinja2._stringdefs + ~~~~~~~~~~~~~~~~~~ + + Strings of all Unicode characters of a certain category. + Used for matching in Unicode-aware languages. Run to regenerate. + + Inspired by chartypes_create.py from the MoinMoin project, original + implementation from Pygments. + + :copyright: Copyright 2006-2009 by the Jinja team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +Cc = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f' + +Cf = u'\xad\u0600\u0601\u0602\u0603\u06dd\u070f\u17b4\u17b5\u200b\u200c\u200d\u200e\u200f\u202a\u202b\u202c\u202d\u202e\u2060\u2061\u2062\u2063\u206a\u206b\u206c\u206d\u206e\u206f\ufeff\ufff9\ufffa\ufffb' + +Cn = u'\u0242\u0243\u0244\u0245\u0246\u0247\u0248\u0249\u024a\u024b\u024c\u024d\u024e\u024f\u0370\u0371\u0372\u0373\u0376\u0377\u0378\u0379\u037b\u037c\u037d\u037f\u0380\u0381\u0382\u0383\u038b\u038d\u03a2\u03cf\u0487\u04cf\u04fa\u04fb\u04fc\u04fd\u04fe\u04ff\u0510\u0511\u0512\u0513\u0514\u0515\u0516\u0517\u0518\u0519\u051a\u051b\u051c\u051d\u051e\u051f\u0520\u0521\u0522\u0523\u0524\u0525\u0526\u0527\u0528\u0529\u052a\u052b\u052c\u052d\u052e\u052f\u0530\u0557\u0558\u0560\u0588\u058b\u058c\u058d\u058e\u058f\u0590\u05ba\u05c8\u05c9\u05ca\u05cb\u05cc\u05cd\u05ce\u05cf\u05eb\u05ec\u05ed\u05ee\u05ef\u05f5\u05f6\u05f7\u05f8\u05f9\u05fa\u05fb\u05fc\u05fd\u05fe\u05ff\u0604\u0605\u0606\u0607\u0608\u0609\u060a\u0616\u0617\u0618\u0619\u061a\u061c\u061d\u0620\u063b\u063c\u063d\u063e\u063f\u065f\u070e\u074b\u074c\u076e\u076f\u0770\u0771\u0772\u0773\u0774\u0775\u0776\u0777\u0778\u0779\u077a\u077b\u077c\u077d\u077e\u077f\u07b2\u07b3\u07b4\u07b5\u07b6\u07b7\u07b8\u07b9\u07ba\u07bb\u07bc\u07bd\u07be\u07bf\u07c0\u07c1\u07c2\u07c3\u07c4\u07c5\u07c6\u07c7\u07c8\u07c9\u07ca\u07cb\u07cc\u07cd\u07ce\u07cf\u07d0\u07d1\u07d2\u07d3\u07d4\u07d5\u07d6\u07d7\u07d8\u07d9\u07da\u07db\u07dc\u07dd\u07de\u07df\u07e0\u07e1\u07e2\u07e3\u07e4\u07e5\u07e6\u07e7\u07e8\u07e9\u07ea\u07eb\u07ec\u07ed\u07ee\u07ef\u07f0\u07f1\u07f2\u07f3\u07f4\u07f5\u07f6\u07f7\u07f8\u07f9\u07fa\u07fb\u07fc\u07fd\u07fe\u07ff\u0800\u0801\u0802\u0803\u0804\u0805\u0806\u0807\u0808\u0809\u080a\u080b\u080c\u080d\u080e\u080f\u0810\u0811\u0812\u0813\u0814\u0815\u0816\u0817\u0818\u0819\u081a\u081b\u081c\u081d\u081e\u081f\u0820\u0821\u0822\u0823\u0824\u0825\u0826\u0827\u0828\u0829\u082a\u082b\u082c\u082d\u082e\u082f\u0830\u0831\u0832\u0833\u0834\u0835\u0836\u0837\u0838\u0839\u083a\u083b\u083c\u083d\u083e\u083f\u0840\u0841\u0842\u0843\u0844\u0845\u0846\u0847\u0848\u0849\u084a\u084b\u084c\u084d\u084e\u084f\u0850\u0851\u0852\u0853\u0854\u0855\u0856\u0857\u0858\u0859\u085a\u085b\u085c\u085d\u085e\u085f\u0860\u0861\u0862\u0863\u0864\u0865\u0866\u0867\u0868\u0869\u086a\u086b\u086c\u086d\u086e\u086f\u0870\u0871\u0872\u0873\u0874\u0875\u0876\u0877\u0878\u0879\u087a\u087b\u087c\u087d\u087e\u087f\u0880\u0881\u0882\u0883\u0884\u0885\u0886\u0887\u0888\u0889\u088a\u088b\u088c\u088d\u088e\u088f\u0890\u0891\u0892\u0893\u0894\u0895\u0896\u0897\u0898\u0899\u089a\u089b\u089c\u089d\u089e\u089f\u08a0\u08a1\u08a2\u08a3\u08a4\u08a5\u08a6\u08a7\u08a8\u08a9\u08aa\u08ab\u08ac\u08ad\u08ae\u08af\u08b0\u08b1\u08b2\u08b3\u08b4\u08b5\u08b6\u08b7\u08b8\u08b9\u08ba\u08bb\u08bc\u08bd\u08be\u08bf\u08c0\u08c1\u08c2\u08c3\u08c4\u08c5\u08c6\u08c7\u08c8\u08c9\u08ca\u08cb\u08cc\u08cd\u08ce\u08cf\u08d0\u08d1\u08d2\u08d3\u08d4\u08d5\u08d6\u08d7\u08d8\u08d9\u08da\u08db\u08dc\u08dd\u08de\u08df\u08e0\u08e1\u08e2\u08e3\u08e4\u08e5\u08e6\u08e7\u08e8\u08e9\u08ea\u08eb\u08ec\u08ed\u08ee\u08ef\u08f0\u08f1\u08f2\u08f3\u08f4\u08f5\u08f6\u08f7\u08f8\u08f9\u08fa\u08fb\u08fc\u08fd\u08fe\u08ff\u0900\u093a\u093b\u094e\u094f\u0955\u0956\u0957\u0971\u0972\u0973\u0974\u0975\u0976\u0977\u0978\u0979\u097a\u097b\u097c\u097e\u097f\u0980\u0984\u098d\u098e\u0991\u0992\u09a9\u09b1\u09b3\u09b4\u09b5\u09ba\u09bb\u09c5\u09c6\u09c9\u09ca\u09cf\u09d0\u09d1\u09d2\u09d3\u09d4\u09d5\u09d6\u09d8\u09d9\u09da\u09db\u09de\u09e4\u09e5\u09fb\u09fc\u09fd\u09fe\u09ff\u0a00\u0a04\u0a0b\u0a0c\u0a0d\u0a0e\u0a11\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a\u0a3b\u0a3d\u0a43\u0a44\u0a45\u0a46\u0a49\u0a4a\u0a4e\u0a4f\u0a50\u0a51\u0a52\u0a53\u0a54\u0a55\u0a56\u0a57\u0a58\u0a5d\u0a5f\u0a60\u0a61\u0a62\u0a63\u0a64\u0a65\u0a75\u0a76\u0a77\u0a78\u0a79\u0a7a\u0a7b\u0a7c\u0a7d\u0a7e\u0a7f\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba\u0abb\u0ac6\u0aca\u0ace\u0acf\u0ad1\u0ad2\u0ad3\u0ad4\u0ad5\u0ad6\u0ad7\u0ad8\u0ad9\u0ada\u0adb\u0adc\u0add\u0ade\u0adf\u0ae4\u0ae5\u0af0\u0af2\u0af3\u0af4\u0af5\u0af6\u0af7\u0af8\u0af9\u0afa\u0afb\u0afc\u0afd\u0afe\u0aff\u0b00\u0b04\u0b0d\u0b0e\u0b11\u0b12\u0b29\u0b31\u0b34\u0b3a\u0b3b\u0b44\u0b45\u0b46\u0b49\u0b4a\u0b4e\u0b4f\u0b50\u0b51\u0b52\u0b53\u0b54\u0b55\u0b58\u0b59\u0b5a\u0b5b\u0b5e\u0b62\u0b63\u0b64\u0b65\u0b72\u0b73\u0b74\u0b75\u0b76\u0b77\u0b78\u0b79\u0b7a\u0b7b\u0b7c\u0b7d\u0b7e\u0b7f\u0b80\u0b81\u0b84\u0b8b\u0b8c\u0b8d\u0b91\u0b96\u0b97\u0b98\u0b9b\u0b9d\u0ba0\u0ba1\u0ba2\u0ba5\u0ba6\u0ba7\u0bab\u0bac\u0bad\u0bba\u0bbb\u0bbc\u0bbd\u0bc3\u0bc4\u0bc5\u0bc9\u0bce\u0bcf\u0bd0\u0bd1\u0bd2\u0bd3\u0bd4\u0bd5\u0bd6\u0bd8\u0bd9\u0bda\u0bdb\u0bdc\u0bdd\u0bde\u0bdf\u0be0\u0be1\u0be2\u0be3\u0be4\u0be5\u0bfb\u0bfc\u0bfd\u0bfe\u0bff\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a\u0c3b\u0c3c\u0c3d\u0c45\u0c49\u0c4e\u0c4f\u0c50\u0c51\u0c52\u0c53\u0c54\u0c57\u0c58\u0c59\u0c5a\u0c5b\u0c5c\u0c5d\u0c5e\u0c5f\u0c62\u0c63\u0c64\u0c65\u0c70\u0c71\u0c72\u0c73\u0c74\u0c75\u0c76\u0c77\u0c78\u0c79\u0c7a\u0c7b\u0c7c\u0c7d\u0c7e\u0c7f\u0c80\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba\u0cbb\u0cc5\u0cc9\u0cce\u0ccf\u0cd0\u0cd1\u0cd2\u0cd3\u0cd4\u0cd7\u0cd8\u0cd9\u0cda\u0cdb\u0cdc\u0cdd\u0cdf\u0ce2\u0ce3\u0ce4\u0ce5\u0cf0\u0cf1\u0cf2\u0cf3\u0cf4\u0cf5\u0cf6\u0cf7\u0cf8\u0cf9\u0cfa\u0cfb\u0cfc\u0cfd\u0cfe\u0cff\u0d00\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a\u0d3b\u0d3c\u0d3d\u0d44\u0d45\u0d49\u0d4e\u0d4f\u0d50\u0d51\u0d52\u0d53\u0d54\u0d55\u0d56\u0d58\u0d59\u0d5a\u0d5b\u0d5c\u0d5d\u0d5e\u0d5f\u0d62\u0d63\u0d64\u0d65\u0d70\u0d71\u0d72\u0d73\u0d74\u0d75\u0d76\u0d77\u0d78\u0d79\u0d7a\u0d7b\u0d7c\u0d7d\u0d7e\u0d7f\u0d80\u0d81\u0d84\u0d97\u0d98\u0d99\u0db2\u0dbc\u0dbe\u0dbf\u0dc7\u0dc8\u0dc9\u0dcb\u0dcc\u0dcd\u0dce\u0dd5\u0dd7\u0de0\u0de1\u0de2\u0de3\u0de4\u0de5\u0de6\u0de7\u0de8\u0de9\u0dea\u0deb\u0dec\u0ded\u0dee\u0def\u0df0\u0df1\u0df5\u0df6\u0df7\u0df8\u0df9\u0dfa\u0dfb\u0dfc\u0dfd\u0dfe\u0dff\u0e00\u0e3b\u0e3c\u0e3d\u0e3e\u0e5c\u0e5d\u0e5e\u0e5f\u0e60\u0e61\u0e62\u0e63\u0e64\u0e65\u0e66\u0e67\u0e68\u0e69\u0e6a\u0e6b\u0e6c\u0e6d\u0e6e\u0e6f\u0e70\u0e71\u0e72\u0e73\u0e74\u0e75\u0e76\u0e77\u0e78\u0e79\u0e7a\u0e7b\u0e7c\u0e7d\u0e7e\u0e7f\u0e80\u0e83\u0e85\u0e86\u0e89\u0e8b\u0e8c\u0e8e\u0e8f\u0e90\u0e91\u0e92\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8\u0ea9\u0eac\u0eba\u0ebe\u0ebf\u0ec5\u0ec7\u0ece\u0ecf\u0eda\u0edb\u0ede\u0edf\u0ee0\u0ee1\u0ee2\u0ee3\u0ee4\u0ee5\u0ee6\u0ee7\u0ee8\u0ee9\u0eea\u0eeb\u0eec\u0eed\u0eee\u0eef\u0ef0\u0ef1\u0ef2\u0ef3\u0ef4\u0ef5\u0ef6\u0ef7\u0ef8\u0ef9\u0efa\u0efb\u0efc\u0efd\u0efe\u0eff\u0f48\u0f6b\u0f6c\u0f6d\u0f6e\u0f6f\u0f70\u0f8c\u0f8d\u0f8e\u0f8f\u0f98\u0fbd\u0fcd\u0fce\u0fd2\u0fd3\u0fd4\u0fd5\u0fd6\u0fd7\u0fd8\u0fd9\u0fda\u0fdb\u0fdc\u0fdd\u0fde\u0fdf\u0fe0\u0fe1\u0fe2\u0fe3\u0fe4\u0fe5\u0fe6\u0fe7\u0fe8\u0fe9\u0fea\u0feb\u0fec\u0fed\u0fee\u0fef\u0ff0\u0ff1\u0ff2\u0ff3\u0ff4\u0ff5\u0ff6\u0ff7\u0ff8\u0ff9\u0ffa\u0ffb\u0ffc\u0ffd\u0ffe\u0fff\u1022\u1028\u102b\u1033\u1034\u1035\u103a\u103b\u103c\u103d\u103e\u103f\u105a\u105b\u105c\u105d\u105e\u105f\u1060\u1061\u1062\u1063\u1064\u1065\u1066\u1067\u1068\u1069\u106a\u106b\u106c\u106d\u106e\u106f\u1070\u1071\u1072\u1073\u1074\u1075\u1076\u1077\u1078\u1079\u107a\u107b\u107c\u107d\u107e\u107f\u1080\u1081\u1082\u1083\u1084\u1085\u1086\u1087\u1088\u1089\u108a\u108b\u108c\u108d\u108e\u108f\u1090\u1091\u1092\u1093\u1094\u1095\u1096\u1097\u1098\u1099\u109a\u109b\u109c\u109d\u109e\u109f\u10c6\u10c7\u10c8\u10c9\u10ca\u10cb\u10cc\u10cd\u10ce\u10cf\u10fd\u10fe\u10ff\u115a\u115b\u115c\u115d\u115e\u11a3\u11a4\u11a5\u11a6\u11a7\u11fa\u11fb\u11fc\u11fd\u11fe\u11ff\u1249\u124e\u124f\u1257\u1259\u125e\u125f\u1289\u128e\u128f\u12b1\u12b6\u12b7\u12bf\u12c1\u12c6\u12c7\u12d7\u1311\u1316\u1317\u135b\u135c\u135d\u135e\u137d\u137e\u137f\u139a\u139b\u139c\u139d\u139e\u139f\u13f5\u13f6\u13f7\u13f8\u13f9\u13fa\u13fb\u13fc\u13fd\u13fe\u13ff\u1400\u1677\u1678\u1679\u167a\u167b\u167c\u167d\u167e\u167f\u169d\u169e\u169f\u16f1\u16f2\u16f3\u16f4\u16f5\u16f6\u16f7\u16f8\u16f9\u16fa\u16fb\u16fc\u16fd\u16fe\u16ff\u170d\u1715\u1716\u1717\u1718\u1719\u171a\u171b\u171c\u171d\u171e\u171f\u1737\u1738\u1739\u173a\u173b\u173c\u173d\u173e\u173f\u1754\u1755\u1756\u1757\u1758\u1759\u175a\u175b\u175c\u175d\u175e\u175f\u176d\u1771\u1774\u1775\u1776\u1777\u1778\u1779\u177a\u177b\u177c\u177d\u177e\u177f\u17de\u17df\u17ea\u17eb\u17ec\u17ed\u17ee\u17ef\u17fa\u17fb\u17fc\u17fd\u17fe\u17ff\u180f\u181a\u181b\u181c\u181d\u181e\u181f\u1878\u1879\u187a\u187b\u187c\u187d\u187e\u187f\u18aa\u18ab\u18ac\u18ad\u18ae\u18af\u18b0\u18b1\u18b2\u18b3\u18b4\u18b5\u18b6\u18b7\u18b8\u18b9\u18ba\u18bb\u18bc\u18bd\u18be\u18bf\u18c0\u18c1\u18c2\u18c3\u18c4\u18c5\u18c6\u18c7\u18c8\u18c9\u18ca\u18cb\u18cc\u18cd\u18ce\u18cf\u18d0\u18d1\u18d2\u18d3\u18d4\u18d5\u18d6\u18d7\u18d8\u18d9\u18da\u18db\u18dc\u18dd\u18de\u18df\u18e0\u18e1\u18e2\u18e3\u18e4\u18e5\u18e6\u18e7\u18e8\u18e9\u18ea\u18eb\u18ec\u18ed\u18ee\u18ef\u18f0\u18f1\u18f2\u18f3\u18f4\u18f5\u18f6\u18f7\u18f8\u18f9\u18fa\u18fb\u18fc\u18fd\u18fe\u18ff\u191d\u191e\u191f\u192c\u192d\u192e\u192f\u193c\u193d\u193e\u193f\u1941\u1942\u1943\u196e\u196f\u1975\u1976\u1977\u1978\u1979\u197a\u197b\u197c\u197d\u197e\u197f\u19aa\u19ab\u19ac\u19ad\u19ae\u19af\u19ca\u19cb\u19cc\u19cd\u19ce\u19cf\u19da\u19db\u19dc\u19dd\u1a1c\u1a1d\u1a20\u1a21\u1a22\u1a23\u1a24\u1a25\u1a26\u1a27\u1a28\u1a29\u1a2a\u1a2b\u1a2c\u1a2d\u1a2e\u1a2f\u1a30\u1a31\u1a32\u1a33\u1a34\u1a35\u1a36\u1a37\u1a38\u1a39\u1a3a\u1a3b\u1a3c\u1a3d\u1a3e\u1a3f\u1a40\u1a41\u1a42\u1a43\u1a44\u1a45\u1a46\u1a47\u1a48\u1a49\u1a4a\u1a4b\u1a4c\u1a4d\u1a4e\u1a4f\u1a50\u1a51\u1a52\u1a53\u1a54\u1a55\u1a56\u1a57\u1a58\u1a59\u1a5a\u1a5b\u1a5c\u1a5d\u1a5e\u1a5f\u1a60\u1a61\u1a62\u1a63\u1a64\u1a65\u1a66\u1a67\u1a68\u1a69\u1a6a\u1a6b\u1a6c\u1a6d\u1a6e\u1a6f\u1a70\u1a71\u1a72\u1a73\u1a74\u1a75\u1a76\u1a77\u1a78\u1a79\u1a7a\u1a7b\u1a7c\u1a7d\u1a7e\u1a7f\u1a80\u1a81\u1a82\u1a83\u1a84\u1a85\u1a86\u1a87\u1a88\u1a89\u1a8a\u1a8b\u1a8c\u1a8d\u1a8e\u1a8f\u1a90\u1a91\u1a92\u1a93\u1a94\u1a95\u1a96\u1a97\u1a98\u1a99\u1a9a\u1a9b\u1a9c\u1a9d\u1a9e\u1a9f\u1aa0\u1aa1\u1aa2\u1aa3\u1aa4\u1aa5\u1aa6\u1aa7\u1aa8\u1aa9\u1aaa\u1aab\u1aac\u1aad\u1aae\u1aaf\u1ab0\u1ab1\u1ab2\u1ab3\u1ab4\u1ab5\u1ab6\u1ab7\u1ab8\u1ab9\u1aba\u1abb\u1abc\u1abd\u1abe\u1abf\u1ac0\u1ac1\u1ac2\u1ac3\u1ac4\u1ac5\u1ac6\u1ac7\u1ac8\u1ac9\u1aca\u1acb\u1acc\u1acd\u1ace\u1acf\u1ad0\u1ad1\u1ad2\u1ad3\u1ad4\u1ad5\u1ad6\u1ad7\u1ad8\u1ad9\u1ada\u1adb\u1adc\u1add\u1ade\u1adf\u1ae0\u1ae1\u1ae2\u1ae3\u1ae4\u1ae5\u1ae6\u1ae7\u1ae8\u1ae9\u1aea\u1aeb\u1aec\u1aed\u1aee\u1aef\u1af0\u1af1\u1af2\u1af3\u1af4\u1af5\u1af6\u1af7\u1af8\u1af9\u1afa\u1afb\u1afc\u1afd\u1afe\u1aff\u1b00\u1b01\u1b02\u1b03\u1b04\u1b05\u1b06\u1b07\u1b08\u1b09\u1b0a\u1b0b\u1b0c\u1b0d\u1b0e\u1b0f\u1b10\u1b11\u1b12\u1b13\u1b14\u1b15\u1b16\u1b17\u1b18\u1b19\u1b1a\u1b1b\u1b1c\u1b1d\u1b1e\u1b1f\u1b20\u1b21\u1b22\u1b23\u1b24\u1b25\u1b26\u1b27\u1b28\u1b29\u1b2a\u1b2b\u1b2c\u1b2d\u1b2e\u1b2f\u1b30\u1b31\u1b32\u1b33\u1b34\u1b35\u1b36\u1b37\u1b38\u1b39\u1b3a\u1b3b\u1b3c\u1b3d\u1b3e\u1b3f\u1b40\u1b41\u1b42\u1b43\u1b44\u1b45\u1b46\u1b47\u1b48\u1b49\u1b4a\u1b4b\u1b4c\u1b4d\u1b4e\u1b4f\u1b50\u1b51\u1b52\u1b53\u1b54\u1b55\u1b56\u1b57\u1b58\u1b59\u1b5a\u1b5b\u1b5c\u1b5d\u1b5e\u1b5f\u1b60\u1b61\u1b62\u1b63\u1b64\u1b65\u1b66\u1b67\u1b68\u1b69\u1b6a\u1b6b\u1b6c\u1b6d\u1b6e\u1b6f\u1b70\u1b71\u1b72\u1b73\u1b74\u1b75\u1b76\u1b77\u1b78\u1b79\u1b7a\u1b7b\u1b7c\u1b7d\u1b7e\u1b7f\u1b80\u1b81\u1b82\u1b83\u1b84\u1b85\u1b86\u1b87\u1b88\u1b89\u1b8a\u1b8b\u1b8c\u1b8d\u1b8e\u1b8f\u1b90\u1b91\u1b92\u1b93\u1b94\u1b95\u1b96\u1b97\u1b98\u1b99\u1b9a\u1b9b\u1b9c\u1b9d\u1b9e\u1b9f\u1ba0\u1ba1\u1ba2\u1ba3\u1ba4\u1ba5\u1ba6\u1ba7\u1ba8\u1ba9\u1baa\u1bab\u1bac\u1bad\u1bae\u1baf\u1bb0\u1bb1\u1bb2\u1bb3\u1bb4\u1bb5\u1bb6\u1bb7\u1bb8\u1bb9\u1bba\u1bbb\u1bbc\u1bbd\u1bbe\u1bbf\u1bc0\u1bc1\u1bc2\u1bc3\u1bc4\u1bc5\u1bc6\u1bc7\u1bc8\u1bc9\u1bca\u1bcb\u1bcc\u1bcd\u1bce\u1bcf\u1bd0\u1bd1\u1bd2\u1bd3\u1bd4\u1bd5\u1bd6\u1bd7\u1bd8\u1bd9\u1bda\u1bdb\u1bdc\u1bdd\u1bde\u1bdf\u1be0\u1be1\u1be2\u1be3\u1be4\u1be5\u1be6\u1be7\u1be8\u1be9\u1bea\u1beb\u1bec\u1bed\u1bee\u1bef\u1bf0\u1bf1\u1bf2\u1bf3\u1bf4\u1bf5\u1bf6\u1bf7\u1bf8\u1bf9\u1bfa\u1bfb\u1bfc\u1bfd\u1bfe\u1bff\u1c00\u1c01\u1c02\u1c03\u1c04\u1c05\u1c06\u1c07\u1c08\u1c09\u1c0a\u1c0b\u1c0c\u1c0d\u1c0e\u1c0f\u1c10\u1c11\u1c12\u1c13\u1c14\u1c15\u1c16\u1c17\u1c18\u1c19\u1c1a\u1c1b\u1c1c\u1c1d\u1c1e\u1c1f\u1c20\u1c21\u1c22\u1c23\u1c24\u1c25\u1c26\u1c27\u1c28\u1c29\u1c2a\u1c2b\u1c2c\u1c2d\u1c2e\u1c2f\u1c30\u1c31\u1c32\u1c33\u1c34\u1c35\u1c36\u1c37\u1c38\u1c39\u1c3a\u1c3b\u1c3c\u1c3d\u1c3e\u1c3f\u1c40\u1c41\u1c42\u1c43\u1c44\u1c45\u1c46\u1c47\u1c48\u1c49\u1c4a\u1c4b\u1c4c\u1c4d\u1c4e\u1c4f\u1c50\u1c51\u1c52\u1c53\u1c54\u1c55\u1c56\u1c57\u1c58\u1c59\u1c5a\u1c5b\u1c5c\u1c5d\u1c5e\u1c5f\u1c60\u1c61\u1c62\u1c63\u1c64\u1c65\u1c66\u1c67\u1c68\u1c69\u1c6a\u1c6b\u1c6c\u1c6d\u1c6e\u1c6f\u1c70\u1c71\u1c72\u1c73\u1c74\u1c75\u1c76\u1c77\u1c78\u1c79\u1c7a\u1c7b\u1c7c\u1c7d\u1c7e\u1c7f\u1c80\u1c81\u1c82\u1c83\u1c84\u1c85\u1c86\u1c87\u1c88\u1c89\u1c8a\u1c8b\u1c8c\u1c8d\u1c8e\u1c8f\u1c90\u1c91\u1c92\u1c93\u1c94\u1c95\u1c96\u1c97\u1c98\u1c99\u1c9a\u1c9b\u1c9c\u1c9d\u1c9e\u1c9f\u1ca0\u1ca1\u1ca2\u1ca3\u1ca4\u1ca5\u1ca6\u1ca7\u1ca8\u1ca9\u1caa\u1cab\u1cac\u1cad\u1cae\u1caf\u1cb0\u1cb1\u1cb2\u1cb3\u1cb4\u1cb5\u1cb6\u1cb7\u1cb8\u1cb9\u1cba\u1cbb\u1cbc\u1cbd\u1cbe\u1cbf\u1cc0\u1cc1\u1cc2\u1cc3\u1cc4\u1cc5\u1cc6\u1cc7\u1cc8\u1cc9\u1cca\u1ccb\u1ccc\u1ccd\u1cce\u1ccf\u1cd0\u1cd1\u1cd2\u1cd3\u1cd4\u1cd5\u1cd6\u1cd7\u1cd8\u1cd9\u1cda\u1cdb\u1cdc\u1cdd\u1cde\u1cdf\u1ce0\u1ce1\u1ce2\u1ce3\u1ce4\u1ce5\u1ce6\u1ce7\u1ce8\u1ce9\u1cea\u1ceb\u1cec\u1ced\u1cee\u1cef\u1cf0\u1cf1\u1cf2\u1cf3\u1cf4\u1cf5\u1cf6\u1cf7\u1cf8\u1cf9\u1cfa\u1cfb\u1cfc\u1cfd\u1cfe\u1cff\u1dc4\u1dc5\u1dc6\u1dc7\u1dc8\u1dc9\u1dca\u1dcb\u1dcc\u1dcd\u1dce\u1dcf\u1dd0\u1dd1\u1dd2\u1dd3\u1dd4\u1dd5\u1dd6\u1dd7\u1dd8\u1dd9\u1dda\u1ddb\u1ddc\u1ddd\u1dde\u1ddf\u1de0\u1de1\u1de2\u1de3\u1de4\u1de5\u1de6\u1de7\u1de8\u1de9\u1dea\u1deb\u1dec\u1ded\u1dee\u1def\u1df0\u1df1\u1df2\u1df3\u1df4\u1df5\u1df6\u1df7\u1df8\u1df9\u1dfa\u1dfb\u1dfc\u1dfd\u1dfe\u1dff\u1e9c\u1e9d\u1e9e\u1e9f\u1efa\u1efb\u1efc\u1efd\u1efe\u1eff\u1f16\u1f17\u1f1e\u1f1f\u1f46\u1f47\u1f4e\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e\u1f7f\u1fb5\u1fc5\u1fd4\u1fd5\u1fdc\u1ff0\u1ff1\u1ff5\u1fff\u2064\u2065\u2066\u2067\u2068\u2069\u2072\u2073\u208f\u2095\u2096\u2097\u2098\u2099\u209a\u209b\u209c\u209d\u209e\u209f\u20b6\u20b7\u20b8\u20b9\u20ba\u20bb\u20bc\u20bd\u20be\u20bf\u20c0\u20c1\u20c2\u20c3\u20c4\u20c5\u20c6\u20c7\u20c8\u20c9\u20ca\u20cb\u20cc\u20cd\u20ce\u20cf\u20ec\u20ed\u20ee\u20ef\u20f0\u20f1\u20f2\u20f3\u20f4\u20f5\u20f6\u20f7\u20f8\u20f9\u20fa\u20fb\u20fc\u20fd\u20fe\u20ff\u214d\u214e\u214f\u2150\u2151\u2152\u2184\u2185\u2186\u2187\u2188\u2189\u218a\u218b\u218c\u218d\u218e\u218f\u23dc\u23dd\u23de\u23df\u23e0\u23e1\u23e2\u23e3\u23e4\u23e5\u23e6\u23e7\u23e8\u23e9\u23ea\u23eb\u23ec\u23ed\u23ee\u23ef\u23f0\u23f1\u23f2\u23f3\u23f4\u23f5\u23f6\u23f7\u23f8\u23f9\u23fa\u23fb\u23fc\u23fd\u23fe\u23ff\u2427\u2428\u2429\u242a\u242b\u242c\u242d\u242e\u242f\u2430\u2431\u2432\u2433\u2434\u2435\u2436\u2437\u2438\u2439\u243a\u243b\u243c\u243d\u243e\u243f\u244b\u244c\u244d\u244e\u244f\u2450\u2451\u2452\u2453\u2454\u2455\u2456\u2457\u2458\u2459\u245a\u245b\u245c\u245d\u245e\u245f\u269d\u269e\u269f\u26b2\u26b3\u26b4\u26b5\u26b6\u26b7\u26b8\u26b9\u26ba\u26bb\u26bc\u26bd\u26be\u26bf\u26c0\u26c1\u26c2\u26c3\u26c4\u26c5\u26c6\u26c7\u26c8\u26c9\u26ca\u26cb\u26cc\u26cd\u26ce\u26cf\u26d0\u26d1\u26d2\u26d3\u26d4\u26d5\u26d6\u26d7\u26d8\u26d9\u26da\u26db\u26dc\u26dd\u26de\u26df\u26e0\u26e1\u26e2\u26e3\u26e4\u26e5\u26e6\u26e7\u26e8\u26e9\u26ea\u26eb\u26ec\u26ed\u26ee\u26ef\u26f0\u26f1\u26f2\u26f3\u26f4\u26f5\u26f6\u26f7\u26f8\u26f9\u26fa\u26fb\u26fc\u26fd\u26fe\u26ff\u2700\u2705\u270a\u270b\u2728\u274c\u274e\u2753\u2754\u2755\u2757\u275f\u2760\u2795\u2796\u2797\u27b0\u27bf\u27c7\u27c8\u27c9\u27ca\u27cb\u27cc\u27cd\u27ce\u27cf\u27ec\u27ed\u27ee\u27ef\u2b14\u2b15\u2b16\u2b17\u2b18\u2b19\u2b1a\u2b1b\u2b1c\u2b1d\u2b1e\u2b1f\u2b20\u2b21\u2b22\u2b23\u2b24\u2b25\u2b26\u2b27\u2b28\u2b29\u2b2a\u2b2b\u2b2c\u2b2d\u2b2e\u2b2f\u2b30\u2b31\u2b32\u2b33\u2b34\u2b35\u2b36\u2b37\u2b38\u2b39\u2b3a\u2b3b\u2b3c\u2b3d\u2b3e\u2b3f\u2b40\u2b41\u2b42\u2b43\u2b44\u2b45\u2b46\u2b47\u2b48\u2b49\u2b4a\u2b4b\u2b4c\u2b4d\u2b4e\u2b4f\u2b50\u2b51\u2b52\u2b53\u2b54\u2b55\u2b56\u2b57\u2b58\u2b59\u2b5a\u2b5b\u2b5c\u2b5d\u2b5e\u2b5f\u2b60\u2b61\u2b62\u2b63\u2b64\u2b65\u2b66\u2b67\u2b68\u2b69\u2b6a\u2b6b\u2b6c\u2b6d\u2b6e\u2b6f\u2b70\u2b71\u2b72\u2b73\u2b74\u2b75\u2b76\u2b77\u2b78\u2b79\u2b7a\u2b7b\u2b7c\u2b7d\u2b7e\u2b7f\u2b80\u2b81\u2b82\u2b83\u2b84\u2b85\u2b86\u2b87\u2b88\u2b89\u2b8a\u2b8b\u2b8c\u2b8d\u2b8e\u2b8f\u2b90\u2b91\u2b92\u2b93\u2b94\u2b95\u2b96\u2b97\u2b98\u2b99\u2b9a\u2b9b\u2b9c\u2b9d\u2b9e\u2b9f\u2ba0\u2ba1\u2ba2\u2ba3\u2ba4\u2ba5\u2ba6\u2ba7\u2ba8\u2ba9\u2baa\u2bab\u2bac\u2bad\u2bae\u2baf\u2bb0\u2bb1\u2bb2\u2bb3\u2bb4\u2bb5\u2bb6\u2bb7\u2bb8\u2bb9\u2bba\u2bbb\u2bbc\u2bbd\u2bbe\u2bbf\u2bc0\u2bc1\u2bc2\u2bc3\u2bc4\u2bc5\u2bc6\u2bc7\u2bc8\u2bc9\u2bca\u2bcb\u2bcc\u2bcd\u2bce\u2bcf\u2bd0\u2bd1\u2bd2\u2bd3\u2bd4\u2bd5\u2bd6\u2bd7\u2bd8\u2bd9\u2bda\u2bdb\u2bdc\u2bdd\u2bde\u2bdf\u2be0\u2be1\u2be2\u2be3\u2be4\u2be5\u2be6\u2be7\u2be8\u2be9\u2bea\u2beb\u2bec\u2bed\u2bee\u2bef\u2bf0\u2bf1\u2bf2\u2bf3\u2bf4\u2bf5\u2bf6\u2bf7\u2bf8\u2bf9\u2bfa\u2bfb\u2bfc\u2bfd\u2bfe\u2bff\u2c2f\u2c5f\u2c60\u2c61\u2c62\u2c63\u2c64\u2c65\u2c66\u2c67\u2c68\u2c69\u2c6a\u2c6b\u2c6c\u2c6d\u2c6e\u2c6f\u2c70\u2c71\u2c72\u2c73\u2c74\u2c75\u2c76\u2c77\u2c78\u2c79\u2c7a\u2c7b\u2c7c\u2c7d\u2c7e\u2c7f\u2ceb\u2cec\u2ced\u2cee\u2cef\u2cf0\u2cf1\u2cf2\u2cf3\u2cf4\u2cf5\u2cf6\u2cf7\u2cf8\u2d26\u2d27\u2d28\u2d29\u2d2a\u2d2b\u2d2c\u2d2d\u2d2e\u2d2f\u2d66\u2d67\u2d68\u2d69\u2d6a\u2d6b\u2d6c\u2d6d\u2d6e\u2d70\u2d71\u2d72\u2d73\u2d74\u2d75\u2d76\u2d77\u2d78\u2d79\u2d7a\u2d7b\u2d7c\u2d7d\u2d7e\u2d7f\u2d97\u2d98\u2d99\u2d9a\u2d9b\u2d9c\u2d9d\u2d9e\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2de0\u2de1\u2de2\u2de3\u2de4\u2de5\u2de6\u2de7\u2de8\u2de9\u2dea\u2deb\u2dec\u2ded\u2dee\u2def\u2df0\u2df1\u2df2\u2df3\u2df4\u2df5\u2df6\u2df7\u2df8\u2df9\u2dfa\u2dfb\u2dfc\u2dfd\u2dfe\u2dff\u2e18\u2e19\u2e1a\u2e1b\u2e1e\u2e1f\u2e20\u2e21\u2e22\u2e23\u2e24\u2e25\u2e26\u2e27\u2e28\u2e29\u2e2a\u2e2b\u2e2c\u2e2d\u2e2e\u2e2f\u2e30\u2e31\u2e32\u2e33\u2e34\u2e35\u2e36\u2e37\u2e38\u2e39\u2e3a\u2e3b\u2e3c\u2e3d\u2e3e\u2e3f\u2e40\u2e41\u2e42\u2e43\u2e44\u2e45\u2e46\u2e47\u2e48\u2e49\u2e4a\u2e4b\u2e4c\u2e4d\u2e4e\u2e4f\u2e50\u2e51\u2e52\u2e53\u2e54\u2e55\u2e56\u2e57\u2e58\u2e59\u2e5a\u2e5b\u2e5c\u2e5d\u2e5e\u2e5f\u2e60\u2e61\u2e62\u2e63\u2e64\u2e65\u2e66\u2e67\u2e68\u2e69\u2e6a\u2e6b\u2e6c\u2e6d\u2e6e\u2e6f\u2e70\u2e71\u2e72\u2e73\u2e74\u2e75\u2e76\u2e77\u2e78\u2e79\u2e7a\u2e7b\u2e7c\u2e7d\u2e7e\u2e7f\u2e9a\u2ef4\u2ef5\u2ef6\u2ef7\u2ef8\u2ef9\u2efa\u2efb\u2efc\u2efd\u2efe\u2eff\u2fd6\u2fd7\u2fd8\u2fd9\u2fda\u2fdb\u2fdc\u2fdd\u2fde\u2fdf\u2fe0\u2fe1\u2fe2\u2fe3\u2fe4\u2fe5\u2fe6\u2fe7\u2fe8\u2fe9\u2fea\u2feb\u2fec\u2fed\u2fee\u2fef\u2ffc\u2ffd\u2ffe\u2fff\u3040\u3097\u3098\u3100\u3101\u3102\u3103\u3104\u312d\u312e\u312f\u3130\u318f\u31b8\u31b9\u31ba\u31bb\u31bc\u31bd\u31be\u31bf\u31d0\u31d1\u31d2\u31d3\u31d4\u31d5\u31d6\u31d7\u31d8\u31d9\u31da\u31db\u31dc\u31dd\u31de\u31df\u31e0\u31e1\u31e2\u31e3\u31e4\u31e5\u31e6\u31e7\u31e8\u31e9\u31ea\u31eb\u31ec\u31ed\u31ee\u31ef\u321f\u3244\u3245\u3246\u3247\u3248\u3249\u324a\u324b\u324c\u324d\u324e\u324f\u32ff\u4db6\u4db7\u4db8\u4db9\u4dba\u4dbb\u4dbc\u4dbd\u4dbe\u4dbf\u9fbc\u9fbd\u9fbe\u9fbf\u9fc0\u9fc1\u9fc2\u9fc3\u9fc4\u9fc5\u9fc6\u9fc7\u9fc8\u9fc9\u9fca\u9fcb\u9fcc\u9fcd\u9fce\u9fcf\u9fd0\u9fd1\u9fd2\u9fd3\u9fd4\u9fd5\u9fd6\u9fd7\u9fd8\u9fd9\u9fda\u9fdb\u9fdc\u9fdd\u9fde\u9fdf\u9fe0\u9fe1\u9fe2\u9fe3\u9fe4\u9fe5\u9fe6\u9fe7\u9fe8\u9fe9\u9fea\u9feb\u9fec\u9fed\u9fee\u9fef\u9ff0\u9ff1\u9ff2\u9ff3\u9ff4\u9ff5\u9ff6\u9ff7\u9ff8\u9ff9\u9ffa\u9ffb\u9ffc\u9ffd\u9ffe\u9fff\ua48d\ua48e\ua48f\ua4c7\ua4c8\ua4c9\ua4ca\ua4cb\ua4cc\ua4cd\ua4ce\ua4cf\ua4d0\ua4d1\ua4d2\ua4d3\ua4d4\ua4d5\ua4d6\ua4d7\ua4d8\ua4d9\ua4da\ua4db\ua4dc\ua4dd\ua4de\ua4df\ua4e0\ua4e1\ua4e2\ua4e3\ua4e4\ua4e5\ua4e6\ua4e7\ua4e8\ua4e9\ua4ea\ua4eb\ua4ec\ua4ed\ua4ee\ua4ef\ua4f0\ua4f1\ua4f2\ua4f3\ua4f4\ua4f5\ua4f6\ua4f7\ua4f8\ua4f9\ua4fa\ua4fb\ua4fc\ua4fd\ua4fe\ua4ff\ua500\ua501\ua502\ua503\ua504\ua505\ua506\ua507\ua508\ua509\ua50a\ua50b\ua50c\ua50d\ua50e\ua50f\ua510\ua511\ua512\ua513\ua514\ua515\ua516\ua517\ua518\ua519\ua51a\ua51b\ua51c\ua51d\ua51e\ua51f\ua520\ua521\ua522\ua523\ua524\ua525\ua526\ua527\ua528\ua529\ua52a\ua52b\ua52c\ua52d\ua52e\ua52f\ua530\ua531\ua532\ua533\ua534\ua535\ua536\ua537\ua538\ua539\ua53a\ua53b\ua53c\ua53d\ua53e\ua53f\ua540\ua541\ua542\ua543\ua544\ua545\ua546\ua547\ua548\ua549\ua54a\ua54b\ua54c\ua54d\ua54e\ua54f\ua550\ua551\ua552\ua553\ua554\ua555\ua556\ua557\ua558\ua559\ua55a\ua55b\ua55c\ua55d\ua55e\ua55f\ua560\ua561\ua562\ua563\ua564\ua565\ua566\ua567\ua568\ua569\ua56a\ua56b\ua56c\ua56d\ua56e\ua56f\ua570\ua571\ua572\ua573\ua574\ua575\ua576\ua577\ua578\ua579\ua57a\ua57b\ua57c\ua57d\ua57e\ua57f\ua580\ua581\ua582\ua583\ua584\ua585\ua586\ua587\ua588\ua589\ua58a\ua58b\ua58c\ua58d\ua58e\ua58f\ua590\ua591\ua592\ua593\ua594\ua595\ua596\ua597\ua598\ua599\ua59a\ua59b\ua59c\ua59d\ua59e\ua59f\ua5a0\ua5a1\ua5a2\ua5a3\ua5a4\ua5a5\ua5a6\ua5a7\ua5a8\ua5a9\ua5aa\ua5ab\ua5ac\ua5ad\ua5ae\ua5af\ua5b0\ua5b1\ua5b2\ua5b3\ua5b4\ua5b5\ua5b6\ua5b7\ua5b8\ua5b9\ua5ba\ua5bb\ua5bc\ua5bd\ua5be\ua5bf\ua5c0\ua5c1\ua5c2\ua5c3\ua5c4\ua5c5\ua5c6\ua5c7\ua5c8\ua5c9\ua5ca\ua5cb\ua5cc\ua5cd\ua5ce\ua5cf\ua5d0\ua5d1\ua5d2\ua5d3\ua5d4\ua5d5\ua5d6\ua5d7\ua5d8\ua5d9\ua5da\ua5db\ua5dc\ua5dd\ua5de\ua5df\ua5e0\ua5e1\ua5e2\ua5e3\ua5e4\ua5e5\ua5e6\ua5e7\ua5e8\ua5e9\ua5ea\ua5eb\ua5ec\ua5ed\ua5ee\ua5ef\ua5f0\ua5f1\ua5f2\ua5f3\ua5f4\ua5f5\ua5f6\ua5f7\ua5f8\ua5f9\ua5fa\ua5fb\ua5fc\ua5fd\ua5fe\ua5ff\ua600\ua601\ua602\ua603\ua604\ua605\ua606\ua607\ua608\ua609\ua60a\ua60b\ua60c\ua60d\ua60e\ua60f\ua610\ua611\ua612\ua613\ua614\ua615\ua616\ua617\ua618\ua619\ua61a\ua61b\ua61c\ua61d\ua61e\ua61f\ua620\ua621\ua622\ua623\ua624\ua625\ua626\ua627\ua628\ua629\ua62a\ua62b\ua62c\ua62d\ua62e\ua62f\ua630\ua631\ua632\ua633\ua634\ua635\ua636\ua637\ua638\ua639\ua63a\ua63b\ua63c\ua63d\ua63e\ua63f\ua640\ua641\ua642\ua643\ua644\ua645\ua646\ua647\ua648\ua649\ua64a\ua64b\ua64c\ua64d\ua64e\ua64f\ua650\ua651\ua652\ua653\ua654\ua655\ua656\ua657\ua658\ua659\ua65a\ua65b\ua65c\ua65d\ua65e\ua65f\ua660\ua661\ua662\ua663\ua664\ua665\ua666\ua667\ua668\ua669\ua66a\ua66b\ua66c\ua66d\ua66e\ua66f\ua670\ua671\ua672\ua673\ua674\ua675\ua676\ua677\ua678\ua679\ua67a\ua67b\ua67c\ua67d\ua67e\ua67f\ua680\ua681\ua682\ua683\ua684\ua685\ua686\ua687\ua688\ua689\ua68a\ua68b\ua68c\ua68d\ua68e\ua68f\ua690\ua691\ua692\ua693\ua694\ua695\ua696\ua697\ua698\ua699\ua69a\ua69b\ua69c\ua69d\ua69e\ua69f\ua6a0\ua6a1\ua6a2\ua6a3\ua6a4\ua6a5\ua6a6\ua6a7\ua6a8\ua6a9\ua6aa\ua6ab\ua6ac\ua6ad\ua6ae\ua6af\ua6b0\ua6b1\ua6b2\ua6b3\ua6b4\ua6b5\ua6b6\ua6b7\ua6b8\ua6b9\ua6ba\ua6bb\ua6bc\ua6bd\ua6be\ua6bf\ua6c0\ua6c1\ua6c2\ua6c3\ua6c4\ua6c5\ua6c6\ua6c7\ua6c8\ua6c9\ua6ca\ua6cb\ua6cc\ua6cd\ua6ce\ua6cf\ua6d0\ua6d1\ua6d2\ua6d3\ua6d4\ua6d5\ua6d6\ua6d7\ua6d8\ua6d9\ua6da\ua6db\ua6dc\ua6dd\ua6de\ua6df\ua6e0\ua6e1\ua6e2\ua6e3\ua6e4\ua6e5\ua6e6\ua6e7\ua6e8\ua6e9\ua6ea\ua6eb\ua6ec\ua6ed\ua6ee\ua6ef\ua6f0\ua6f1\ua6f2\ua6f3\ua6f4\ua6f5\ua6f6\ua6f7\ua6f8\ua6f9\ua6fa\ua6fb\ua6fc\ua6fd\ua6fe\ua6ff\ua717\ua718\ua719\ua71a\ua71b\ua71c\ua71d\ua71e\ua71f\ua720\ua721\ua722\ua723\ua724\ua725\ua726\ua727\ua728\ua729\ua72a\ua72b\ua72c\ua72d\ua72e\ua72f\ua730\ua731\ua732\ua733\ua734\ua735\ua736\ua737\ua738\ua739\ua73a\ua73b\ua73c\ua73d\ua73e\ua73f\ua740\ua741\ua742\ua743\ua744\ua745\ua746\ua747\ua748\ua749\ua74a\ua74b\ua74c\ua74d\ua74e\ua74f\ua750\ua751\ua752\ua753\ua754\ua755\ua756\ua757\ua758\ua759\ua75a\ua75b\ua75c\ua75d\ua75e\ua75f\ua760\ua761\ua762\ua763\ua764\ua765\ua766\ua767\ua768\ua769\ua76a\ua76b\ua76c\ua76d\ua76e\ua76f\ua770\ua771\ua772\ua773\ua774\ua775\ua776\ua777\ua778\ua779\ua77a\ua77b\ua77c\ua77d\ua77e\ua77f\ua780\ua781\ua782\ua783\ua784\ua785\ua786\ua787\ua788\ua789\ua78a\ua78b\ua78c\ua78d\ua78e\ua78f\ua790\ua791\ua792\ua793\ua794\ua795\ua796\ua797\ua798\ua799\ua79a\ua79b\ua79c\ua79d\ua79e\ua79f\ua7a0\ua7a1\ua7a2\ua7a3\ua7a4\ua7a5\ua7a6\ua7a7\ua7a8\ua7a9\ua7aa\ua7ab\ua7ac\ua7ad\ua7ae\ua7af\ua7b0\ua7b1\ua7b2\ua7b3\ua7b4\ua7b5\ua7b6\ua7b7\ua7b8\ua7b9\ua7ba\ua7bb\ua7bc\ua7bd\ua7be\ua7bf\ua7c0\ua7c1\ua7c2\ua7c3\ua7c4\ua7c5\ua7c6\ua7c7\ua7c8\ua7c9\ua7ca\ua7cb\ua7cc\ua7cd\ua7ce\ua7cf\ua7d0\ua7d1\ua7d2\ua7d3\ua7d4\ua7d5\ua7d6\ua7d7\ua7d8\ua7d9\ua7da\ua7db\ua7dc\ua7dd\ua7de\ua7df\ua7e0\ua7e1\ua7e2\ua7e3\ua7e4\ua7e5\ua7e6\ua7e7\ua7e8\ua7e9\ua7ea\ua7eb\ua7ec\ua7ed\ua7ee\ua7ef\ua7f0\ua7f1\ua7f2\ua7f3\ua7f4\ua7f5\ua7f6\ua7f7\ua7f8\ua7f9\ua7fa\ua7fb\ua7fc\ua7fd\ua7fe\ua7ff\ua82c\ua82d\ua82e\ua82f\ua830\ua831\ua832\ua833\ua834\ua835\ua836\ua837\ua838\ua839\ua83a\ua83b\ua83c\ua83d\ua83e\ua83f\ua840\ua841\ua842\ua843\ua844\ua845\ua846\ua847\ua848\ua849\ua84a\ua84b\ua84c\ua84d\ua84e\ua84f\ua850\ua851\ua852\ua853\ua854\ua855\ua856\ua857\ua858\ua859\ua85a\ua85b\ua85c\ua85d\ua85e\ua85f\ua860\ua861\ua862\ua863\ua864\ua865\ua866\ua867\ua868\ua869\ua86a\ua86b\ua86c\ua86d\ua86e\ua86f\ua870\ua871\ua872\ua873\ua874\ua875\ua876\ua877\ua878\ua879\ua87a\ua87b\ua87c\ua87d\ua87e\ua87f\ua880\ua881\ua882\ua883\ua884\ua885\ua886\ua887\ua888\ua889\ua88a\ua88b\ua88c\ua88d\ua88e\ua88f\ua890\ua891\ua892\ua893\ua894\ua895\ua896\ua897\ua898\ua899\ua89a\ua89b\ua89c\ua89d\ua89e\ua89f\ua8a0\ua8a1\ua8a2\ua8a3\ua8a4\ua8a5\ua8a6\ua8a7\ua8a8\ua8a9\ua8aa\ua8ab\ua8ac\ua8ad\ua8ae\ua8af\ua8b0\ua8b1\ua8b2\ua8b3\ua8b4\ua8b5\ua8b6\ua8b7\ua8b8\ua8b9\ua8ba\ua8bb\ua8bc\ua8bd\ua8be\ua8bf\ua8c0\ua8c1\ua8c2\ua8c3\ua8c4\ua8c5\ua8c6\ua8c7\ua8c8\ua8c9\ua8ca\ua8cb\ua8cc\ua8cd\ua8ce\ua8cf\ua8d0\ua8d1\ua8d2\ua8d3\ua8d4\ua8d5\ua8d6\ua8d7\ua8d8\ua8d9\ua8da\ua8db\ua8dc\ua8dd\ua8de\ua8df\ua8e0\ua8e1\ua8e2\ua8e3\ua8e4\ua8e5\ua8e6\ua8e7\ua8e8\ua8e9\ua8ea\ua8eb\ua8ec\ua8ed\ua8ee\ua8ef\ua8f0\ua8f1\ua8f2\ua8f3\ua8f4\ua8f5\ua8f6\ua8f7\ua8f8\ua8f9\ua8fa\ua8fb\ua8fc\ua8fd\ua8fe\ua8ff\ua900\ua901\ua902\ua903\ua904\ua905\ua906\ua907\ua908\ua909\ua90a\ua90b\ua90c\ua90d\ua90e\ua90f\ua910\ua911\ua912\ua913\ua914\ua915\ua916\ua917\ua918\ua919\ua91a\ua91b\ua91c\ua91d\ua91e\ua91f\ua920\ua921\ua922\ua923\ua924\ua925\ua926\ua927\ua928\ua929\ua92a\ua92b\ua92c\ua92d\ua92e\ua92f\ua930\ua931\ua932\ua933\ua934\ua935\ua936\ua937\ua938\ua939\ua93a\ua93b\ua93c\ua93d\ua93e\ua93f\ua940\ua941\ua942\ua943\ua944\ua945\ua946\ua947\ua948\ua949\ua94a\ua94b\ua94c\ua94d\ua94e\ua94f\ua950\ua951\ua952\ua953\ua954\ua955\ua956\ua957\ua958\ua959\ua95a\ua95b\ua95c\ua95d\ua95e\ua95f\ua960\ua961\ua962\ua963\ua964\ua965\ua966\ua967\ua968\ua969\ua96a\ua96b\ua96c\ua96d\ua96e\ua96f\ua970\ua971\ua972\ua973\ua974\ua975\ua976\ua977\ua978\ua979\ua97a\ua97b\ua97c\ua97d\ua97e\ua97f\ua980\ua981\ua982\ua983\ua984\ua985\ua986\ua987\ua988\ua989\ua98a\ua98b\ua98c\ua98d\ua98e\ua98f\ua990\ua991\ua992\ua993\ua994\ua995\ua996\ua997\ua998\ua999\ua99a\ua99b\ua99c\ua99d\ua99e\ua99f\ua9a0\ua9a1\ua9a2\ua9a3\ua9a4\ua9a5\ua9a6\ua9a7\ua9a8\ua9a9\ua9aa\ua9ab\ua9ac\ua9ad\ua9ae\ua9af\ua9b0\ua9b1\ua9b2\ua9b3\ua9b4\ua9b5\ua9b6\ua9b7\ua9b8\ua9b9\ua9ba\ua9bb\ua9bc\ua9bd\ua9be\ua9bf\ua9c0\ua9c1\ua9c2\ua9c3\ua9c4\ua9c5\ua9c6\ua9c7\ua9c8\ua9c9\ua9ca\ua9cb\ua9cc\ua9cd\ua9ce\ua9cf\ua9d0\ua9d1\ua9d2\ua9d3\ua9d4\ua9d5\ua9d6\ua9d7\ua9d8\ua9d9\ua9da\ua9db\ua9dc\ua9dd\ua9de\ua9df\ua9e0\ua9e1\ua9e2\ua9e3\ua9e4\ua9e5\ua9e6\ua9e7\ua9e8\ua9e9\ua9ea\ua9eb\ua9ec\ua9ed\ua9ee\ua9ef\ua9f0\ua9f1\ua9f2\ua9f3\ua9f4\ua9f5\ua9f6\ua9f7\ua9f8\ua9f9\ua9fa\ua9fb\ua9fc\ua9fd\ua9fe\ua9ff\uaa00\uaa01\uaa02\uaa03\uaa04\uaa05\uaa06\uaa07\uaa08\uaa09\uaa0a\uaa0b\uaa0c\uaa0d\uaa0e\uaa0f\uaa10\uaa11\uaa12\uaa13\uaa14\uaa15\uaa16\uaa17\uaa18\uaa19\uaa1a\uaa1b\uaa1c\uaa1d\uaa1e\uaa1f\uaa20\uaa21\uaa22\uaa23\uaa24\uaa25\uaa26\uaa27\uaa28\uaa29\uaa2a\uaa2b\uaa2c\uaa2d\uaa2e\uaa2f\uaa30\uaa31\uaa32\uaa33\uaa34\uaa35\uaa36\uaa37\uaa38\uaa39\uaa3a\uaa3b\uaa3c\uaa3d\uaa3e\uaa3f\uaa40\uaa41\uaa42\uaa43\uaa44\uaa45\uaa46\uaa47\uaa48\uaa49\uaa4a\uaa4b\uaa4c\uaa4d\uaa4e\uaa4f\uaa50\uaa51\uaa52\uaa53\uaa54\uaa55\uaa56\uaa57\uaa58\uaa59\uaa5a\uaa5b\uaa5c\uaa5d\uaa5e\uaa5f\uaa60\uaa61\uaa62\uaa63\uaa64\uaa65\uaa66\uaa67\uaa68\uaa69\uaa6a\uaa6b\uaa6c\uaa6d\uaa6e\uaa6f\uaa70\uaa71\uaa72\uaa73\uaa74\uaa75\uaa76\uaa77\uaa78\uaa79\uaa7a\uaa7b\uaa7c\uaa7d\uaa7e\uaa7f\uaa80\uaa81\uaa82\uaa83\uaa84\uaa85\uaa86\uaa87\uaa88\uaa89\uaa8a\uaa8b\uaa8c\uaa8d\uaa8e\uaa8f\uaa90\uaa91\uaa92\uaa93\uaa94\uaa95\uaa96\uaa97\uaa98\uaa99\uaa9a\uaa9b\uaa9c\uaa9d\uaa9e\uaa9f\uaaa0\uaaa1\uaaa2\uaaa3\uaaa4\uaaa5\uaaa6\uaaa7\uaaa8\uaaa9\uaaaa\uaaab\uaaac\uaaad\uaaae\uaaaf\uaab0\uaab1\uaab2\uaab3\uaab4\uaab5\uaab6\uaab7\uaab8\uaab9\uaaba\uaabb\uaabc\uaabd\uaabe\uaabf\uaac0\uaac1\uaac2\uaac3\uaac4\uaac5\uaac6\uaac7\uaac8\uaac9\uaaca\uaacb\uaacc\uaacd\uaace\uaacf\uaad0\uaad1\uaad2\uaad3\uaad4\uaad5\uaad6\uaad7\uaad8\uaad9\uaada\uaadb\uaadc\uaadd\uaade\uaadf\uaae0\uaae1\uaae2\uaae3\uaae4\uaae5\uaae6\uaae7\uaae8\uaae9\uaaea\uaaeb\uaaec\uaaed\uaaee\uaaef\uaaf0\uaaf1\uaaf2\uaaf3\uaaf4\uaaf5\uaaf6\uaaf7\uaaf8\uaaf9\uaafa\uaafb\uaafc\uaafd\uaafe\uaaff\uab00\uab01\uab02\uab03\uab04\uab05\uab06\uab07\uab08\uab09\uab0a\uab0b\uab0c\uab0d\uab0e\uab0f\uab10\uab11\uab12\uab13\uab14\uab15\uab16\uab17\uab18\uab19\uab1a\uab1b\uab1c\uab1d\uab1e\uab1f\uab20\uab21\uab22\uab23\uab24\uab25\uab26\uab27\uab28\uab29\uab2a\uab2b\uab2c\uab2d\uab2e\uab2f\uab30\uab31\uab32\uab33\uab34\uab35\uab36\uab37\uab38\uab39\uab3a\uab3b\uab3c\uab3d\uab3e\uab3f\uab40\uab41\uab42\uab43\uab44\uab45\uab46\uab47\uab48\uab49\uab4a\uab4b\uab4c\uab4d\uab4e\uab4f\uab50\uab51\uab52\uab53\uab54\uab55\uab56\uab57\uab58\uab59\uab5a\uab5b\uab5c\uab5d\uab5e\uab5f\uab60\uab61\uab62\uab63\uab64\uab65\uab66\uab67\uab68\uab69\uab6a\uab6b\uab6c\uab6d\uab6e\uab6f\uab70\uab71\uab72\uab73\uab74\uab75\uab76\uab77\uab78\uab79\uab7a\uab7b\uab7c\uab7d\uab7e\uab7f\uab80\uab81\uab82\uab83\uab84\uab85\uab86\uab87\uab88\uab89\uab8a\uab8b\uab8c\uab8d\uab8e\uab8f\uab90\uab91\uab92\uab93\uab94\uab95\uab96\uab97\uab98\uab99\uab9a\uab9b\uab9c\uab9d\uab9e\uab9f\uaba0\uaba1\uaba2\uaba3\uaba4\uaba5\uaba6\uaba7\uaba8\uaba9\uabaa\uabab\uabac\uabad\uabae\uabaf\uabb0\uabb1\uabb2\uabb3\uabb4\uabb5\uabb6\uabb7\uabb8\uabb9\uabba\uabbb\uabbc\uabbd\uabbe\uabbf\uabc0\uabc1\uabc2\uabc3\uabc4\uabc5\uabc6\uabc7\uabc8\uabc9\uabca\uabcb\uabcc\uabcd\uabce\uabcf\uabd0\uabd1\uabd2\uabd3\uabd4\uabd5\uabd6\uabd7\uabd8\uabd9\uabda\uabdb\uabdc\uabdd\uabde\uabdf\uabe0\uabe1\uabe2\uabe3\uabe4\uabe5\uabe6\uabe7\uabe8\uabe9\uabea\uabeb\uabec\uabed\uabee\uabef\uabf0\uabf1\uabf2\uabf3\uabf4\uabf5\uabf6\uabf7\uabf8\uabf9\uabfa\uabfb\uabfc\uabfd\uabfe\uabff\ud7a4\ud7a5\ud7a6\ud7a7\ud7a8\ud7a9\ud7aa\ud7ab\ud7ac\ud7ad\ud7ae\ud7af\ud7b0\ud7b1\ud7b2\ud7b3\ud7b4\ud7b5\ud7b6\ud7b7\ud7b8\ud7b9\ud7ba\ud7bb\ud7bc\ud7bd\ud7be\ud7bf\ud7c0\ud7c1\ud7c2\ud7c3\ud7c4\ud7c5\ud7c6\ud7c7\ud7c8\ud7c9\ud7ca\ud7cb\ud7cc\ud7cd\ud7ce\ud7cf\ud7d0\ud7d1\ud7d2\ud7d3\ud7d4\ud7d5\ud7d6\ud7d7\ud7d8\ud7d9\ud7da\ud7db\ud7dc\ud7dd\ud7de\ud7df\ud7e0\ud7e1\ud7e2\ud7e3\ud7e4\ud7e5\ud7e6\ud7e7\ud7e8\ud7e9\ud7ea\ud7eb\ud7ec\ud7ed\ud7ee\ud7ef\ud7f0\ud7f1\ud7f2\ud7f3\ud7f4\ud7f5\ud7f6\ud7f7\ud7f8\ud7f9\ud7fa\ud7fb\ud7fc\ud7fd\ud7fe\ud7ff\ufa2e\ufa2f\ufa6b\ufa6c\ufa6d\ufa6e\ufa6f\ufada\ufadb\ufadc\ufadd\ufade\ufadf\ufae0\ufae1\ufae2\ufae3\ufae4\ufae5\ufae6\ufae7\ufae8\ufae9\ufaea\ufaeb\ufaec\ufaed\ufaee\ufaef\ufaf0\ufaf1\ufaf2\ufaf3\ufaf4\ufaf5\ufaf6\ufaf7\ufaf8\ufaf9\ufafa\ufafb\ufafc\ufafd\ufafe\ufaff\ufb07\ufb08\ufb09\ufb0a\ufb0b\ufb0c\ufb0d\ufb0e\ufb0f\ufb10\ufb11\ufb12\ufb18\ufb19\ufb1a\ufb1b\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbb2\ufbb3\ufbb4\ufbb5\ufbb6\ufbb7\ufbb8\ufbb9\ufbba\ufbbb\ufbbc\ufbbd\ufbbe\ufbbf\ufbc0\ufbc1\ufbc2\ufbc3\ufbc4\ufbc5\ufbc6\ufbc7\ufbc8\ufbc9\ufbca\ufbcb\ufbcc\ufbcd\ufbce\ufbcf\ufbd0\ufbd1\ufbd2\ufd40\ufd41\ufd42\ufd43\ufd44\ufd45\ufd46\ufd47\ufd48\ufd49\ufd4a\ufd4b\ufd4c\ufd4d\ufd4e\ufd4f\ufd90\ufd91\ufdc8\ufdc9\ufdca\ufdcb\ufdcc\ufdcd\ufdce\ufdcf\ufdd0\ufdd1\ufdd2\ufdd3\ufdd4\ufdd5\ufdd6\ufdd7\ufdd8\ufdd9\ufdda\ufddb\ufddc\ufddd\ufdde\ufddf\ufde0\ufde1\ufde2\ufde3\ufde4\ufde5\ufde6\ufde7\ufde8\ufde9\ufdea\ufdeb\ufdec\ufded\ufdee\ufdef\ufdfe\ufdff\ufe1a\ufe1b\ufe1c\ufe1d\ufe1e\ufe1f\ufe24\ufe25\ufe26\ufe27\ufe28\ufe29\ufe2a\ufe2b\ufe2c\ufe2d\ufe2e\ufe2f\ufe53\ufe67\ufe6c\ufe6d\ufe6e\ufe6f\ufe75\ufefd\ufefe\uff00\uffbf\uffc0\uffc1\uffc8\uffc9\uffd0\uffd1\uffd8\uffd9\uffdd\uffde\uffdf\uffe7\uffef\ufff0\ufff1\ufff2\ufff3\ufff4\ufff5\ufff6\ufff7\ufff8\ufffe' + +Co = u'\ue000\ue001\ue002\ue003\ue004\ue005\ue006\ue007\ue008\ue009\ue00a\ue00b\ue00c\ue00d\ue00e\ue00f\ue010\ue011\ue012\ue013\ue014\ue015\ue016\ue017\ue018\ue019\ue01a\ue01b\ue01c\ue01d\ue01e\ue01f\ue020\ue021\ue022\ue023\ue024\ue025\ue026\ue027\ue028\ue029\ue02a\ue02b\ue02c\ue02d\ue02e\ue02f\ue030\ue031\ue032\ue033\ue034\ue035\ue036\ue037\ue038\ue039\ue03a\ue03b\ue03c\ue03d\ue03e\ue03f\ue040\ue041\ue042\ue043\ue044\ue045\ue046\ue047\ue048\ue049\ue04a\ue04b\ue04c\ue04d\ue04e\ue04f\ue050\ue051\ue052\ue053\ue054\ue055\ue056\ue057\ue058\ue059\ue05a\ue05b\ue05c\ue05d\ue05e\ue05f\ue060\ue061\ue062\ue063\ue064\ue065\ue066\ue067\ue068\ue069\ue06a\ue06b\ue06c\ue06d\ue06e\ue06f\ue070\ue071\ue072\ue073\ue074\ue075\ue076\ue077\ue078\ue079\ue07a\ue07b\ue07c\ue07d\ue07e\ue07f\ue080\ue081\ue082\ue083\ue084\ue085\ue086\ue087\ue088\ue089\ue08a\ue08b\ue08c\ue08d\ue08e\ue08f\ue090\ue091\ue092\ue093\ue094\ue095\ue096\ue097\ue098\ue099\ue09a\ue09b\ue09c\ue09d\ue09e\ue09f\ue0a0\ue0a1\ue0a2\ue0a3\ue0a4\ue0a5\ue0a6\ue0a7\ue0a8\ue0a9\ue0aa\ue0ab\ue0ac\ue0ad\ue0ae\ue0af\ue0b0\ue0b1\ue0b2\ue0b3\ue0b4\ue0b5\ue0b6\ue0b7\ue0b8\ue0b9\ue0ba\ue0bb\ue0bc\ue0bd\ue0be\ue0bf\ue0c0\ue0c1\ue0c2\ue0c3\ue0c4\ue0c5\ue0c6\ue0c7\ue0c8\ue0c9\ue0ca\ue0cb\ue0cc\ue0cd\ue0ce\ue0cf\ue0d0\ue0d1\ue0d2\ue0d3\ue0d4\ue0d5\ue0d6\ue0d7\ue0d8\ue0d9\ue0da\ue0db\ue0dc\ue0dd\ue0de\ue0df\ue0e0\ue0e1\ue0e2\ue0e3\ue0e4\ue0e5\ue0e6\ue0e7\ue0e8\ue0e9\ue0ea\ue0eb\ue0ec\ue0ed\ue0ee\ue0ef\ue0f0\ue0f1\ue0f2\ue0f3\ue0f4\ue0f5\ue0f6\ue0f7\ue0f8\ue0f9\ue0fa\ue0fb\ue0fc\ue0fd\ue0fe\ue0ff\ue100\ue101\ue102\ue103\ue104\ue105\ue106\ue107\ue108\ue109\ue10a\ue10b\ue10c\ue10d\ue10e\ue10f\ue110\ue111\ue112\ue113\ue114\ue115\ue116\ue117\ue118\ue119\ue11a\ue11b\ue11c\ue11d\ue11e\ue11f\ue120\ue121\ue122\ue123\ue124\ue125\ue126\ue127\ue128\ue129\ue12a\ue12b\ue12c\ue12d\ue12e\ue12f\ue130\ue131\ue132\ue133\ue134\ue135\ue136\ue137\ue138\ue139\ue13a\ue13b\ue13c\ue13d\ue13e\ue13f\ue140\ue141\ue142\ue143\ue144\ue145\ue146\ue147\ue148\ue149\ue14a\ue14b\ue14c\ue14d\ue14e\ue14f\ue150\ue151\ue152\ue153\ue154\ue155\ue156\ue157\ue158\ue159\ue15a\ue15b\ue15c\ue15d\ue15e\ue15f\ue160\ue161\ue162\ue163\ue164\ue165\ue166\ue167\ue168\ue169\ue16a\ue16b\ue16c\ue16d\ue16e\ue16f\ue170\ue171\ue172\ue173\ue174\ue175\ue176\ue177\ue178\ue179\ue17a\ue17b\ue17c\ue17d\ue17e\ue17f\ue180\ue181\ue182\ue183\ue184\ue185\ue186\ue187\ue188\ue189\ue18a\ue18b\ue18c\ue18d\ue18e\ue18f\ue190\ue191\ue192\ue193\ue194\ue195\ue196\ue197\ue198\ue199\ue19a\ue19b\ue19c\ue19d\ue19e\ue19f\ue1a0\ue1a1\ue1a2\ue1a3\ue1a4\ue1a5\ue1a6\ue1a7\ue1a8\ue1a9\ue1aa\ue1ab\ue1ac\ue1ad\ue1ae\ue1af\ue1b0\ue1b1\ue1b2\ue1b3\ue1b4\ue1b5\ue1b6\ue1b7\ue1b8\ue1b9\ue1ba\ue1bb\ue1bc\ue1bd\ue1be\ue1bf\ue1c0\ue1c1\ue1c2\ue1c3\ue1c4\ue1c5\ue1c6\ue1c7\ue1c8\ue1c9\ue1ca\ue1cb\ue1cc\ue1cd\ue1ce\ue1cf\ue1d0\ue1d1\ue1d2\ue1d3\ue1d4\ue1d5\ue1d6\ue1d7\ue1d8\ue1d9\ue1da\ue1db\ue1dc\ue1dd\ue1de\ue1df\ue1e0\ue1e1\ue1e2\ue1e3\ue1e4\ue1e5\ue1e6\ue1e7\ue1e8\ue1e9\ue1ea\ue1eb\ue1ec\ue1ed\ue1ee\ue1ef\ue1f0\ue1f1\ue1f2\ue1f3\ue1f4\ue1f5\ue1f6\ue1f7\ue1f8\ue1f9\ue1fa\ue1fb\ue1fc\ue1fd\ue1fe\ue1ff\ue200\ue201\ue202\ue203\ue204\ue205\ue206\ue207\ue208\ue209\ue20a\ue20b\ue20c\ue20d\ue20e\ue20f\ue210\ue211\ue212\ue213\ue214\ue215\ue216\ue217\ue218\ue219\ue21a\ue21b\ue21c\ue21d\ue21e\ue21f\ue220\ue221\ue222\ue223\ue224\ue225\ue226\ue227\ue228\ue229\ue22a\ue22b\ue22c\ue22d\ue22e\ue22f\ue230\ue231\ue232\ue233\ue234\ue235\ue236\ue237\ue238\ue239\ue23a\ue23b\ue23c\ue23d\ue23e\ue23f\ue240\ue241\ue242\ue243\ue244\ue245\ue246\ue247\ue248\ue249\ue24a\ue24b\ue24c\ue24d\ue24e\ue24f\ue250\ue251\ue252\ue253\ue254\ue255\ue256\ue257\ue258\ue259\ue25a\ue25b\ue25c\ue25d\ue25e\ue25f\ue260\ue261\ue262\ue263\ue264\ue265\ue266\ue267\ue268\ue269\ue26a\ue26b\ue26c\ue26d\ue26e\ue26f\ue270\ue271\ue272\ue273\ue274\ue275\ue276\ue277\ue278\ue279\ue27a\ue27b\ue27c\ue27d\ue27e\ue27f\ue280\ue281\ue282\ue283\ue284\ue285\ue286\ue287\ue288\ue289\ue28a\ue28b\ue28c\ue28d\ue28e\ue28f\ue290\ue291\ue292\ue293\ue294\ue295\ue296\ue297\ue298\ue299\ue29a\ue29b\ue29c\ue29d\ue29e\ue29f\ue2a0\ue2a1\ue2a2\ue2a3\ue2a4\ue2a5\ue2a6\ue2a7\ue2a8\ue2a9\ue2aa\ue2ab\ue2ac\ue2ad\ue2ae\ue2af\ue2b0\ue2b1\ue2b2\ue2b3\ue2b4\ue2b5\ue2b6\ue2b7\ue2b8\ue2b9\ue2ba\ue2bb\ue2bc\ue2bd\ue2be\ue2bf\ue2c0\ue2c1\ue2c2\ue2c3\ue2c4\ue2c5\ue2c6\ue2c7\ue2c8\ue2c9\ue2ca\ue2cb\ue2cc\ue2cd\ue2ce\ue2cf\ue2d0\ue2d1\ue2d2\ue2d3\ue2d4\ue2d5\ue2d6\ue2d7\ue2d8\ue2d9\ue2da\ue2db\ue2dc\ue2dd\ue2de\ue2df\ue2e0\ue2e1\ue2e2\ue2e3\ue2e4\ue2e5\ue2e6\ue2e7\ue2e8\ue2e9\ue2ea\ue2eb\ue2ec\ue2ed\ue2ee\ue2ef\ue2f0\ue2f1\ue2f2\ue2f3\ue2f4\ue2f5\ue2f6\ue2f7\ue2f8\ue2f9\ue2fa\ue2fb\ue2fc\ue2fd\ue2fe\ue2ff\ue300\ue301\ue302\ue303\ue304\ue305\ue306\ue307\ue308\ue309\ue30a\ue30b\ue30c\ue30d\ue30e\ue30f\ue310\ue311\ue312\ue313\ue314\ue315\ue316\ue317\ue318\ue319\ue31a\ue31b\ue31c\ue31d\ue31e\ue31f\ue320\ue321\ue322\ue323\ue324\ue325\ue326\ue327\ue328\ue329\ue32a\ue32b\ue32c\ue32d\ue32e\ue32f\ue330\ue331\ue332\ue333\ue334\ue335\ue336\ue337\ue338\ue339\ue33a\ue33b\ue33c\ue33d\ue33e\ue33f\ue340\ue341\ue342\ue343\ue344\ue345\ue346\ue347\ue348\ue349\ue34a\ue34b\ue34c\ue34d\ue34e\ue34f\ue350\ue351\ue352\ue353\ue354\ue355\ue356\ue357\ue358\ue359\ue35a\ue35b\ue35c\ue35d\ue35e\ue35f\ue360\ue361\ue362\ue363\ue364\ue365\ue366\ue367\ue368\ue369\ue36a\ue36b\ue36c\ue36d\ue36e\ue36f\ue370\ue371\ue372\ue373\ue374\ue375\ue376\ue377\ue378\ue379\ue37a\ue37b\ue37c\ue37d\ue37e\ue37f\ue380\ue381\ue382\ue383\ue384\ue385\ue386\ue387\ue388\ue389\ue38a\ue38b\ue38c\ue38d\ue38e\ue38f\ue390\ue391\ue392\ue393\ue394\ue395\ue396\ue397\ue398\ue399\ue39a\ue39b\ue39c\ue39d\ue39e\ue39f\ue3a0\ue3a1\ue3a2\ue3a3\ue3a4\ue3a5\ue3a6\ue3a7\ue3a8\ue3a9\ue3aa\ue3ab\ue3ac\ue3ad\ue3ae\ue3af\ue3b0\ue3b1\ue3b2\ue3b3\ue3b4\ue3b5\ue3b6\ue3b7\ue3b8\ue3b9\ue3ba\ue3bb\ue3bc\ue3bd\ue3be\ue3bf\ue3c0\ue3c1\ue3c2\ue3c3\ue3c4\ue3c5\ue3c6\ue3c7\ue3c8\ue3c9\ue3ca\ue3cb\ue3cc\ue3cd\ue3ce\ue3cf\ue3d0\ue3d1\ue3d2\ue3d3\ue3d4\ue3d5\ue3d6\ue3d7\ue3d8\ue3d9\ue3da\ue3db\ue3dc\ue3dd\ue3de\ue3df\ue3e0\ue3e1\ue3e2\ue3e3\ue3e4\ue3e5\ue3e6\ue3e7\ue3e8\ue3e9\ue3ea\ue3eb\ue3ec\ue3ed\ue3ee\ue3ef\ue3f0\ue3f1\ue3f2\ue3f3\ue3f4\ue3f5\ue3f6\ue3f7\ue3f8\ue3f9\ue3fa\ue3fb\ue3fc\ue3fd\ue3fe\ue3ff\ue400\ue401\ue402\ue403\ue404\ue405\ue406\ue407\ue408\ue409\ue40a\ue40b\ue40c\ue40d\ue40e\ue40f\ue410\ue411\ue412\ue413\ue414\ue415\ue416\ue417\ue418\ue419\ue41a\ue41b\ue41c\ue41d\ue41e\ue41f\ue420\ue421\ue422\ue423\ue424\ue425\ue426\ue427\ue428\ue429\ue42a\ue42b\ue42c\ue42d\ue42e\ue42f\ue430\ue431\ue432\ue433\ue434\ue435\ue436\ue437\ue438\ue439\ue43a\ue43b\ue43c\ue43d\ue43e\ue43f\ue440\ue441\ue442\ue443\ue444\ue445\ue446\ue447\ue448\ue449\ue44a\ue44b\ue44c\ue44d\ue44e\ue44f\ue450\ue451\ue452\ue453\ue454\ue455\ue456\ue457\ue458\ue459\ue45a\ue45b\ue45c\ue45d\ue45e\ue45f\ue460\ue461\ue462\ue463\ue464\ue465\ue466\ue467\ue468\ue469\ue46a\ue46b\ue46c\ue46d\ue46e\ue46f\ue470\ue471\ue472\ue473\ue474\ue475\ue476\ue477\ue478\ue479\ue47a\ue47b\ue47c\ue47d\ue47e\ue47f\ue480\ue481\ue482\ue483\ue484\ue485\ue486\ue487\ue488\ue489\ue48a\ue48b\ue48c\ue48d\ue48e\ue48f\ue490\ue491\ue492\ue493\ue494\ue495\ue496\ue497\ue498\ue499\ue49a\ue49b\ue49c\ue49d\ue49e\ue49f\ue4a0\ue4a1\ue4a2\ue4a3\ue4a4\ue4a5\ue4a6\ue4a7\ue4a8\ue4a9\ue4aa\ue4ab\ue4ac\ue4ad\ue4ae\ue4af\ue4b0\ue4b1\ue4b2\ue4b3\ue4b4\ue4b5\ue4b6\ue4b7\ue4b8\ue4b9\ue4ba\ue4bb\ue4bc\ue4bd\ue4be\ue4bf\ue4c0\ue4c1\ue4c2\ue4c3\ue4c4\ue4c5\ue4c6\ue4c7\ue4c8\ue4c9\ue4ca\ue4cb\ue4cc\ue4cd\ue4ce\ue4cf\ue4d0\ue4d1\ue4d2\ue4d3\ue4d4\ue4d5\ue4d6\ue4d7\ue4d8\ue4d9\ue4da\ue4db\ue4dc\ue4dd\ue4de\ue4df\ue4e0\ue4e1\ue4e2\ue4e3\ue4e4\ue4e5\ue4e6\ue4e7\ue4e8\ue4e9\ue4ea\ue4eb\ue4ec\ue4ed\ue4ee\ue4ef\ue4f0\ue4f1\ue4f2\ue4f3\ue4f4\ue4f5\ue4f6\ue4f7\ue4f8\ue4f9\ue4fa\ue4fb\ue4fc\ue4fd\ue4fe\ue4ff\ue500\ue501\ue502\ue503\ue504\ue505\ue506\ue507\ue508\ue509\ue50a\ue50b\ue50c\ue50d\ue50e\ue50f\ue510\ue511\ue512\ue513\ue514\ue515\ue516\ue517\ue518\ue519\ue51a\ue51b\ue51c\ue51d\ue51e\ue51f\ue520\ue521\ue522\ue523\ue524\ue525\ue526\ue527\ue528\ue529\ue52a\ue52b\ue52c\ue52d\ue52e\ue52f\ue530\ue531\ue532\ue533\ue534\ue535\ue536\ue537\ue538\ue539\ue53a\ue53b\ue53c\ue53d\ue53e\ue53f\ue540\ue541\ue542\ue543\ue544\ue545\ue546\ue547\ue548\ue549\ue54a\ue54b\ue54c\ue54d\ue54e\ue54f\ue550\ue551\ue552\ue553\ue554\ue555\ue556\ue557\ue558\ue559\ue55a\ue55b\ue55c\ue55d\ue55e\ue55f\ue560\ue561\ue562\ue563\ue564\ue565\ue566\ue567\ue568\ue569\ue56a\ue56b\ue56c\ue56d\ue56e\ue56f\ue570\ue571\ue572\ue573\ue574\ue575\ue576\ue577\ue578\ue579\ue57a\ue57b\ue57c\ue57d\ue57e\ue57f\ue580\ue581\ue582\ue583\ue584\ue585\ue586\ue587\ue588\ue589\ue58a\ue58b\ue58c\ue58d\ue58e\ue58f\ue590\ue591\ue592\ue593\ue594\ue595\ue596\ue597\ue598\ue599\ue59a\ue59b\ue59c\ue59d\ue59e\ue59f\ue5a0\ue5a1\ue5a2\ue5a3\ue5a4\ue5a5\ue5a6\ue5a7\ue5a8\ue5a9\ue5aa\ue5ab\ue5ac\ue5ad\ue5ae\ue5af\ue5b0\ue5b1\ue5b2\ue5b3\ue5b4\ue5b5\ue5b6\ue5b7\ue5b8\ue5b9\ue5ba\ue5bb\ue5bc\ue5bd\ue5be\ue5bf\ue5c0\ue5c1\ue5c2\ue5c3\ue5c4\ue5c5\ue5c6\ue5c7\ue5c8\ue5c9\ue5ca\ue5cb\ue5cc\ue5cd\ue5ce\ue5cf\ue5d0\ue5d1\ue5d2\ue5d3\ue5d4\ue5d5\ue5d6\ue5d7\ue5d8\ue5d9\ue5da\ue5db\ue5dc\ue5dd\ue5de\ue5df\ue5e0\ue5e1\ue5e2\ue5e3\ue5e4\ue5e5\ue5e6\ue5e7\ue5e8\ue5e9\ue5ea\ue5eb\ue5ec\ue5ed\ue5ee\ue5ef\ue5f0\ue5f1\ue5f2\ue5f3\ue5f4\ue5f5\ue5f6\ue5f7\ue5f8\ue5f9\ue5fa\ue5fb\ue5fc\ue5fd\ue5fe\ue5ff\ue600\ue601\ue602\ue603\ue604\ue605\ue606\ue607\ue608\ue609\ue60a\ue60b\ue60c\ue60d\ue60e\ue60f\ue610\ue611\ue612\ue613\ue614\ue615\ue616\ue617\ue618\ue619\ue61a\ue61b\ue61c\ue61d\ue61e\ue61f\ue620\ue621\ue622\ue623\ue624\ue625\ue626\ue627\ue628\ue629\ue62a\ue62b\ue62c\ue62d\ue62e\ue62f\ue630\ue631\ue632\ue633\ue634\ue635\ue636\ue637\ue638\ue639\ue63a\ue63b\ue63c\ue63d\ue63e\ue63f\ue640\ue641\ue642\ue643\ue644\ue645\ue646\ue647\ue648\ue649\ue64a\ue64b\ue64c\ue64d\ue64e\ue64f\ue650\ue651\ue652\ue653\ue654\ue655\ue656\ue657\ue658\ue659\ue65a\ue65b\ue65c\ue65d\ue65e\ue65f\ue660\ue661\ue662\ue663\ue664\ue665\ue666\ue667\ue668\ue669\ue66a\ue66b\ue66c\ue66d\ue66e\ue66f\ue670\ue671\ue672\ue673\ue674\ue675\ue676\ue677\ue678\ue679\ue67a\ue67b\ue67c\ue67d\ue67e\ue67f\ue680\ue681\ue682\ue683\ue684\ue685\ue686\ue687\ue688\ue689\ue68a\ue68b\ue68c\ue68d\ue68e\ue68f\ue690\ue691\ue692\ue693\ue694\ue695\ue696\ue697\ue698\ue699\ue69a\ue69b\ue69c\ue69d\ue69e\ue69f\ue6a0\ue6a1\ue6a2\ue6a3\ue6a4\ue6a5\ue6a6\ue6a7\ue6a8\ue6a9\ue6aa\ue6ab\ue6ac\ue6ad\ue6ae\ue6af\ue6b0\ue6b1\ue6b2\ue6b3\ue6b4\ue6b5\ue6b6\ue6b7\ue6b8\ue6b9\ue6ba\ue6bb\ue6bc\ue6bd\ue6be\ue6bf\ue6c0\ue6c1\ue6c2\ue6c3\ue6c4\ue6c5\ue6c6\ue6c7\ue6c8\ue6c9\ue6ca\ue6cb\ue6cc\ue6cd\ue6ce\ue6cf\ue6d0\ue6d1\ue6d2\ue6d3\ue6d4\ue6d5\ue6d6\ue6d7\ue6d8\ue6d9\ue6da\ue6db\ue6dc\ue6dd\ue6de\ue6df\ue6e0\ue6e1\ue6e2\ue6e3\ue6e4\ue6e5\ue6e6\ue6e7\ue6e8\ue6e9\ue6ea\ue6eb\ue6ec\ue6ed\ue6ee\ue6ef\ue6f0\ue6f1\ue6f2\ue6f3\ue6f4\ue6f5\ue6f6\ue6f7\ue6f8\ue6f9\ue6fa\ue6fb\ue6fc\ue6fd\ue6fe\ue6ff\ue700\ue701\ue702\ue703\ue704\ue705\ue706\ue707\ue708\ue709\ue70a\ue70b\ue70c\ue70d\ue70e\ue70f\ue710\ue711\ue712\ue713\ue714\ue715\ue716\ue717\ue718\ue719\ue71a\ue71b\ue71c\ue71d\ue71e\ue71f\ue720\ue721\ue722\ue723\ue724\ue725\ue726\ue727\ue728\ue729\ue72a\ue72b\ue72c\ue72d\ue72e\ue72f\ue730\ue731\ue732\ue733\ue734\ue735\ue736\ue737\ue738\ue739\ue73a\ue73b\ue73c\ue73d\ue73e\ue73f\ue740\ue741\ue742\ue743\ue744\ue745\ue746\ue747\ue748\ue749\ue74a\ue74b\ue74c\ue74d\ue74e\ue74f\ue750\ue751\ue752\ue753\ue754\ue755\ue756\ue757\ue758\ue759\ue75a\ue75b\ue75c\ue75d\ue75e\ue75f\ue760\ue761\ue762\ue763\ue764\ue765\ue766\ue767\ue768\ue769\ue76a\ue76b\ue76c\ue76d\ue76e\ue76f\ue770\ue771\ue772\ue773\ue774\ue775\ue776\ue777\ue778\ue779\ue77a\ue77b\ue77c\ue77d\ue77e\ue77f\ue780\ue781\ue782\ue783\ue784\ue785\ue786\ue787\ue788\ue789\ue78a\ue78b\ue78c\ue78d\ue78e\ue78f\ue790\ue791\ue792\ue793\ue794\ue795\ue796\ue797\ue798\ue799\ue79a\ue79b\ue79c\ue79d\ue79e\ue79f\ue7a0\ue7a1\ue7a2\ue7a3\ue7a4\ue7a5\ue7a6\ue7a7\ue7a8\ue7a9\ue7aa\ue7ab\ue7ac\ue7ad\ue7ae\ue7af\ue7b0\ue7b1\ue7b2\ue7b3\ue7b4\ue7b5\ue7b6\ue7b7\ue7b8\ue7b9\ue7ba\ue7bb\ue7bc\ue7bd\ue7be\ue7bf\ue7c0\ue7c1\ue7c2\ue7c3\ue7c4\ue7c5\ue7c6\ue7c7\ue7c8\ue7c9\ue7ca\ue7cb\ue7cc\ue7cd\ue7ce\ue7cf\ue7d0\ue7d1\ue7d2\ue7d3\ue7d4\ue7d5\ue7d6\ue7d7\ue7d8\ue7d9\ue7da\ue7db\ue7dc\ue7dd\ue7de\ue7df\ue7e0\ue7e1\ue7e2\ue7e3\ue7e4\ue7e5\ue7e6\ue7e7\ue7e8\ue7e9\ue7ea\ue7eb\ue7ec\ue7ed\ue7ee\ue7ef\ue7f0\ue7f1\ue7f2\ue7f3\ue7f4\ue7f5\ue7f6\ue7f7\ue7f8\ue7f9\ue7fa\ue7fb\ue7fc\ue7fd\ue7fe\ue7ff\ue800\ue801\ue802\ue803\ue804\ue805\ue806\ue807\ue808\ue809\ue80a\ue80b\ue80c\ue80d\ue80e\ue80f\ue810\ue811\ue812\ue813\ue814\ue815\ue816\ue817\ue818\ue819\ue81a\ue81b\ue81c\ue81d\ue81e\ue81f\ue820\ue821\ue822\ue823\ue824\ue825\ue826\ue827\ue828\ue829\ue82a\ue82b\ue82c\ue82d\ue82e\ue82f\ue830\ue831\ue832\ue833\ue834\ue835\ue836\ue837\ue838\ue839\ue83a\ue83b\ue83c\ue83d\ue83e\ue83f\ue840\ue841\ue842\ue843\ue844\ue845\ue846\ue847\ue848\ue849\ue84a\ue84b\ue84c\ue84d\ue84e\ue84f\ue850\ue851\ue852\ue853\ue854\ue855\ue856\ue857\ue858\ue859\ue85a\ue85b\ue85c\ue85d\ue85e\ue85f\ue860\ue861\ue862\ue863\ue864\ue865\ue866\ue867\ue868\ue869\ue86a\ue86b\ue86c\ue86d\ue86e\ue86f\ue870\ue871\ue872\ue873\ue874\ue875\ue876\ue877\ue878\ue879\ue87a\ue87b\ue87c\ue87d\ue87e\ue87f\ue880\ue881\ue882\ue883\ue884\ue885\ue886\ue887\ue888\ue889\ue88a\ue88b\ue88c\ue88d\ue88e\ue88f\ue890\ue891\ue892\ue893\ue894\ue895\ue896\ue897\ue898\ue899\ue89a\ue89b\ue89c\ue89d\ue89e\ue89f\ue8a0\ue8a1\ue8a2\ue8a3\ue8a4\ue8a5\ue8a6\ue8a7\ue8a8\ue8a9\ue8aa\ue8ab\ue8ac\ue8ad\ue8ae\ue8af\ue8b0\ue8b1\ue8b2\ue8b3\ue8b4\ue8b5\ue8b6\ue8b7\ue8b8\ue8b9\ue8ba\ue8bb\ue8bc\ue8bd\ue8be\ue8bf\ue8c0\ue8c1\ue8c2\ue8c3\ue8c4\ue8c5\ue8c6\ue8c7\ue8c8\ue8c9\ue8ca\ue8cb\ue8cc\ue8cd\ue8ce\ue8cf\ue8d0\ue8d1\ue8d2\ue8d3\ue8d4\ue8d5\ue8d6\ue8d7\ue8d8\ue8d9\ue8da\ue8db\ue8dc\ue8dd\ue8de\ue8df\ue8e0\ue8e1\ue8e2\ue8e3\ue8e4\ue8e5\ue8e6\ue8e7\ue8e8\ue8e9\ue8ea\ue8eb\ue8ec\ue8ed\ue8ee\ue8ef\ue8f0\ue8f1\ue8f2\ue8f3\ue8f4\ue8f5\ue8f6\ue8f7\ue8f8\ue8f9\ue8fa\ue8fb\ue8fc\ue8fd\ue8fe\ue8ff\ue900\ue901\ue902\ue903\ue904\ue905\ue906\ue907\ue908\ue909\ue90a\ue90b\ue90c\ue90d\ue90e\ue90f\ue910\ue911\ue912\ue913\ue914\ue915\ue916\ue917\ue918\ue919\ue91a\ue91b\ue91c\ue91d\ue91e\ue91f\ue920\ue921\ue922\ue923\ue924\ue925\ue926\ue927\ue928\ue929\ue92a\ue92b\ue92c\ue92d\ue92e\ue92f\ue930\ue931\ue932\ue933\ue934\ue935\ue936\ue937\ue938\ue939\ue93a\ue93b\ue93c\ue93d\ue93e\ue93f\ue940\ue941\ue942\ue943\ue944\ue945\ue946\ue947\ue948\ue949\ue94a\ue94b\ue94c\ue94d\ue94e\ue94f\ue950\ue951\ue952\ue953\ue954\ue955\ue956\ue957\ue958\ue959\ue95a\ue95b\ue95c\ue95d\ue95e\ue95f\ue960\ue961\ue962\ue963\ue964\ue965\ue966\ue967\ue968\ue969\ue96a\ue96b\ue96c\ue96d\ue96e\ue96f\ue970\ue971\ue972\ue973\ue974\ue975\ue976\ue977\ue978\ue979\ue97a\ue97b\ue97c\ue97d\ue97e\ue97f\ue980\ue981\ue982\ue983\ue984\ue985\ue986\ue987\ue988\ue989\ue98a\ue98b\ue98c\ue98d\ue98e\ue98f\ue990\ue991\ue992\ue993\ue994\ue995\ue996\ue997\ue998\ue999\ue99a\ue99b\ue99c\ue99d\ue99e\ue99f\ue9a0\ue9a1\ue9a2\ue9a3\ue9a4\ue9a5\ue9a6\ue9a7\ue9a8\ue9a9\ue9aa\ue9ab\ue9ac\ue9ad\ue9ae\ue9af\ue9b0\ue9b1\ue9b2\ue9b3\ue9b4\ue9b5\ue9b6\ue9b7\ue9b8\ue9b9\ue9ba\ue9bb\ue9bc\ue9bd\ue9be\ue9bf\ue9c0\ue9c1\ue9c2\ue9c3\ue9c4\ue9c5\ue9c6\ue9c7\ue9c8\ue9c9\ue9ca\ue9cb\ue9cc\ue9cd\ue9ce\ue9cf\ue9d0\ue9d1\ue9d2\ue9d3\ue9d4\ue9d5\ue9d6\ue9d7\ue9d8\ue9d9\ue9da\ue9db\ue9dc\ue9dd\ue9de\ue9df\ue9e0\ue9e1\ue9e2\ue9e3\ue9e4\ue9e5\ue9e6\ue9e7\ue9e8\ue9e9\ue9ea\ue9eb\ue9ec\ue9ed\ue9ee\ue9ef\ue9f0\ue9f1\ue9f2\ue9f3\ue9f4\ue9f5\ue9f6\ue9f7\ue9f8\ue9f9\ue9fa\ue9fb\ue9fc\ue9fd\ue9fe\ue9ff\uea00\uea01\uea02\uea03\uea04\uea05\uea06\uea07\uea08\uea09\uea0a\uea0b\uea0c\uea0d\uea0e\uea0f\uea10\uea11\uea12\uea13\uea14\uea15\uea16\uea17\uea18\uea19\uea1a\uea1b\uea1c\uea1d\uea1e\uea1f\uea20\uea21\uea22\uea23\uea24\uea25\uea26\uea27\uea28\uea29\uea2a\uea2b\uea2c\uea2d\uea2e\uea2f\uea30\uea31\uea32\uea33\uea34\uea35\uea36\uea37\uea38\uea39\uea3a\uea3b\uea3c\uea3d\uea3e\uea3f\uea40\uea41\uea42\uea43\uea44\uea45\uea46\uea47\uea48\uea49\uea4a\uea4b\uea4c\uea4d\uea4e\uea4f\uea50\uea51\uea52\uea53\uea54\uea55\uea56\uea57\uea58\uea59\uea5a\uea5b\uea5c\uea5d\uea5e\uea5f\uea60\uea61\uea62\uea63\uea64\uea65\uea66\uea67\uea68\uea69\uea6a\uea6b\uea6c\uea6d\uea6e\uea6f\uea70\uea71\uea72\uea73\uea74\uea75\uea76\uea77\uea78\uea79\uea7a\uea7b\uea7c\uea7d\uea7e\uea7f\uea80\uea81\uea82\uea83\uea84\uea85\uea86\uea87\uea88\uea89\uea8a\uea8b\uea8c\uea8d\uea8e\uea8f\uea90\uea91\uea92\uea93\uea94\uea95\uea96\uea97\uea98\uea99\uea9a\uea9b\uea9c\uea9d\uea9e\uea9f\ueaa0\ueaa1\ueaa2\ueaa3\ueaa4\ueaa5\ueaa6\ueaa7\ueaa8\ueaa9\ueaaa\ueaab\ueaac\ueaad\ueaae\ueaaf\ueab0\ueab1\ueab2\ueab3\ueab4\ueab5\ueab6\ueab7\ueab8\ueab9\ueaba\ueabb\ueabc\ueabd\ueabe\ueabf\ueac0\ueac1\ueac2\ueac3\ueac4\ueac5\ueac6\ueac7\ueac8\ueac9\ueaca\ueacb\ueacc\ueacd\ueace\ueacf\uead0\uead1\uead2\uead3\uead4\uead5\uead6\uead7\uead8\uead9\ueada\ueadb\ueadc\ueadd\ueade\ueadf\ueae0\ueae1\ueae2\ueae3\ueae4\ueae5\ueae6\ueae7\ueae8\ueae9\ueaea\ueaeb\ueaec\ueaed\ueaee\ueaef\ueaf0\ueaf1\ueaf2\ueaf3\ueaf4\ueaf5\ueaf6\ueaf7\ueaf8\ueaf9\ueafa\ueafb\ueafc\ueafd\ueafe\ueaff\ueb00\ueb01\ueb02\ueb03\ueb04\ueb05\ueb06\ueb07\ueb08\ueb09\ueb0a\ueb0b\ueb0c\ueb0d\ueb0e\ueb0f\ueb10\ueb11\ueb12\ueb13\ueb14\ueb15\ueb16\ueb17\ueb18\ueb19\ueb1a\ueb1b\ueb1c\ueb1d\ueb1e\ueb1f\ueb20\ueb21\ueb22\ueb23\ueb24\ueb25\ueb26\ueb27\ueb28\ueb29\ueb2a\ueb2b\ueb2c\ueb2d\ueb2e\ueb2f\ueb30\ueb31\ueb32\ueb33\ueb34\ueb35\ueb36\ueb37\ueb38\ueb39\ueb3a\ueb3b\ueb3c\ueb3d\ueb3e\ueb3f\ueb40\ueb41\ueb42\ueb43\ueb44\ueb45\ueb46\ueb47\ueb48\ueb49\ueb4a\ueb4b\ueb4c\ueb4d\ueb4e\ueb4f\ueb50\ueb51\ueb52\ueb53\ueb54\ueb55\ueb56\ueb57\ueb58\ueb59\ueb5a\ueb5b\ueb5c\ueb5d\ueb5e\ueb5f\ueb60\ueb61\ueb62\ueb63\ueb64\ueb65\ueb66\ueb67\ueb68\ueb69\ueb6a\ueb6b\ueb6c\ueb6d\ueb6e\ueb6f\ueb70\ueb71\ueb72\ueb73\ueb74\ueb75\ueb76\ueb77\ueb78\ueb79\ueb7a\ueb7b\ueb7c\ueb7d\ueb7e\ueb7f\ueb80\ueb81\ueb82\ueb83\ueb84\ueb85\ueb86\ueb87\ueb88\ueb89\ueb8a\ueb8b\ueb8c\ueb8d\ueb8e\ueb8f\ueb90\ueb91\ueb92\ueb93\ueb94\ueb95\ueb96\ueb97\ueb98\ueb99\ueb9a\ueb9b\ueb9c\ueb9d\ueb9e\ueb9f\ueba0\ueba1\ueba2\ueba3\ueba4\ueba5\ueba6\ueba7\ueba8\ueba9\uebaa\uebab\uebac\uebad\uebae\uebaf\uebb0\uebb1\uebb2\uebb3\uebb4\uebb5\uebb6\uebb7\uebb8\uebb9\uebba\uebbb\uebbc\uebbd\uebbe\uebbf\uebc0\uebc1\uebc2\uebc3\uebc4\uebc5\uebc6\uebc7\uebc8\uebc9\uebca\uebcb\uebcc\uebcd\uebce\uebcf\uebd0\uebd1\uebd2\uebd3\uebd4\uebd5\uebd6\uebd7\uebd8\uebd9\uebda\uebdb\uebdc\uebdd\uebde\uebdf\uebe0\uebe1\uebe2\uebe3\uebe4\uebe5\uebe6\uebe7\uebe8\uebe9\uebea\uebeb\uebec\uebed\uebee\uebef\uebf0\uebf1\uebf2\uebf3\uebf4\uebf5\uebf6\uebf7\uebf8\uebf9\uebfa\uebfb\uebfc\uebfd\uebfe\uebff\uec00\uec01\uec02\uec03\uec04\uec05\uec06\uec07\uec08\uec09\uec0a\uec0b\uec0c\uec0d\uec0e\uec0f\uec10\uec11\uec12\uec13\uec14\uec15\uec16\uec17\uec18\uec19\uec1a\uec1b\uec1c\uec1d\uec1e\uec1f\uec20\uec21\uec22\uec23\uec24\uec25\uec26\uec27\uec28\uec29\uec2a\uec2b\uec2c\uec2d\uec2e\uec2f\uec30\uec31\uec32\uec33\uec34\uec35\uec36\uec37\uec38\uec39\uec3a\uec3b\uec3c\uec3d\uec3e\uec3f\uec40\uec41\uec42\uec43\uec44\uec45\uec46\uec47\uec48\uec49\uec4a\uec4b\uec4c\uec4d\uec4e\uec4f\uec50\uec51\uec52\uec53\uec54\uec55\uec56\uec57\uec58\uec59\uec5a\uec5b\uec5c\uec5d\uec5e\uec5f\uec60\uec61\uec62\uec63\uec64\uec65\uec66\uec67\uec68\uec69\uec6a\uec6b\uec6c\uec6d\uec6e\uec6f\uec70\uec71\uec72\uec73\uec74\uec75\uec76\uec77\uec78\uec79\uec7a\uec7b\uec7c\uec7d\uec7e\uec7f\uec80\uec81\uec82\uec83\uec84\uec85\uec86\uec87\uec88\uec89\uec8a\uec8b\uec8c\uec8d\uec8e\uec8f\uec90\uec91\uec92\uec93\uec94\uec95\uec96\uec97\uec98\uec99\uec9a\uec9b\uec9c\uec9d\uec9e\uec9f\ueca0\ueca1\ueca2\ueca3\ueca4\ueca5\ueca6\ueca7\ueca8\ueca9\uecaa\uecab\uecac\uecad\uecae\uecaf\uecb0\uecb1\uecb2\uecb3\uecb4\uecb5\uecb6\uecb7\uecb8\uecb9\uecba\uecbb\uecbc\uecbd\uecbe\uecbf\uecc0\uecc1\uecc2\uecc3\uecc4\uecc5\uecc6\uecc7\uecc8\uecc9\uecca\ueccb\ueccc\ueccd\uecce\ueccf\uecd0\uecd1\uecd2\uecd3\uecd4\uecd5\uecd6\uecd7\uecd8\uecd9\uecda\uecdb\uecdc\uecdd\uecde\uecdf\uece0\uece1\uece2\uece3\uece4\uece5\uece6\uece7\uece8\uece9\uecea\ueceb\uecec\ueced\uecee\uecef\uecf0\uecf1\uecf2\uecf3\uecf4\uecf5\uecf6\uecf7\uecf8\uecf9\uecfa\uecfb\uecfc\uecfd\uecfe\uecff\ued00\ued01\ued02\ued03\ued04\ued05\ued06\ued07\ued08\ued09\ued0a\ued0b\ued0c\ued0d\ued0e\ued0f\ued10\ued11\ued12\ued13\ued14\ued15\ued16\ued17\ued18\ued19\ued1a\ued1b\ued1c\ued1d\ued1e\ued1f\ued20\ued21\ued22\ued23\ued24\ued25\ued26\ued27\ued28\ued29\ued2a\ued2b\ued2c\ued2d\ued2e\ued2f\ued30\ued31\ued32\ued33\ued34\ued35\ued36\ued37\ued38\ued39\ued3a\ued3b\ued3c\ued3d\ued3e\ued3f\ued40\ued41\ued42\ued43\ued44\ued45\ued46\ued47\ued48\ued49\ued4a\ued4b\ued4c\ued4d\ued4e\ued4f\ued50\ued51\ued52\ued53\ued54\ued55\ued56\ued57\ued58\ued59\ued5a\ued5b\ued5c\ued5d\ued5e\ued5f\ued60\ued61\ued62\ued63\ued64\ued65\ued66\ued67\ued68\ued69\ued6a\ued6b\ued6c\ued6d\ued6e\ued6f\ued70\ued71\ued72\ued73\ued74\ued75\ued76\ued77\ued78\ued79\ued7a\ued7b\ued7c\ued7d\ued7e\ued7f\ued80\ued81\ued82\ued83\ued84\ued85\ued86\ued87\ued88\ued89\ued8a\ued8b\ued8c\ued8d\ued8e\ued8f\ued90\ued91\ued92\ued93\ued94\ued95\ued96\ued97\ued98\ued99\ued9a\ued9b\ued9c\ued9d\ued9e\ued9f\ueda0\ueda1\ueda2\ueda3\ueda4\ueda5\ueda6\ueda7\ueda8\ueda9\uedaa\uedab\uedac\uedad\uedae\uedaf\uedb0\uedb1\uedb2\uedb3\uedb4\uedb5\uedb6\uedb7\uedb8\uedb9\uedba\uedbb\uedbc\uedbd\uedbe\uedbf\uedc0\uedc1\uedc2\uedc3\uedc4\uedc5\uedc6\uedc7\uedc8\uedc9\uedca\uedcb\uedcc\uedcd\uedce\uedcf\uedd0\uedd1\uedd2\uedd3\uedd4\uedd5\uedd6\uedd7\uedd8\uedd9\uedda\ueddb\ueddc\ueddd\uedde\ueddf\uede0\uede1\uede2\uede3\uede4\uede5\uede6\uede7\uede8\uede9\uedea\uedeb\uedec\ueded\uedee\uedef\uedf0\uedf1\uedf2\uedf3\uedf4\uedf5\uedf6\uedf7\uedf8\uedf9\uedfa\uedfb\uedfc\uedfd\uedfe\uedff\uee00\uee01\uee02\uee03\uee04\uee05\uee06\uee07\uee08\uee09\uee0a\uee0b\uee0c\uee0d\uee0e\uee0f\uee10\uee11\uee12\uee13\uee14\uee15\uee16\uee17\uee18\uee19\uee1a\uee1b\uee1c\uee1d\uee1e\uee1f\uee20\uee21\uee22\uee23\uee24\uee25\uee26\uee27\uee28\uee29\uee2a\uee2b\uee2c\uee2d\uee2e\uee2f\uee30\uee31\uee32\uee33\uee34\uee35\uee36\uee37\uee38\uee39\uee3a\uee3b\uee3c\uee3d\uee3e\uee3f\uee40\uee41\uee42\uee43\uee44\uee45\uee46\uee47\uee48\uee49\uee4a\uee4b\uee4c\uee4d\uee4e\uee4f\uee50\uee51\uee52\uee53\uee54\uee55\uee56\uee57\uee58\uee59\uee5a\uee5b\uee5c\uee5d\uee5e\uee5f\uee60\uee61\uee62\uee63\uee64\uee65\uee66\uee67\uee68\uee69\uee6a\uee6b\uee6c\uee6d\uee6e\uee6f\uee70\uee71\uee72\uee73\uee74\uee75\uee76\uee77\uee78\uee79\uee7a\uee7b\uee7c\uee7d\uee7e\uee7f\uee80\uee81\uee82\uee83\uee84\uee85\uee86\uee87\uee88\uee89\uee8a\uee8b\uee8c\uee8d\uee8e\uee8f\uee90\uee91\uee92\uee93\uee94\uee95\uee96\uee97\uee98\uee99\uee9a\uee9b\uee9c\uee9d\uee9e\uee9f\ueea0\ueea1\ueea2\ueea3\ueea4\ueea5\ueea6\ueea7\ueea8\ueea9\ueeaa\ueeab\ueeac\ueead\ueeae\ueeaf\ueeb0\ueeb1\ueeb2\ueeb3\ueeb4\ueeb5\ueeb6\ueeb7\ueeb8\ueeb9\ueeba\ueebb\ueebc\ueebd\ueebe\ueebf\ueec0\ueec1\ueec2\ueec3\ueec4\ueec5\ueec6\ueec7\ueec8\ueec9\ueeca\ueecb\ueecc\ueecd\ueece\ueecf\ueed0\ueed1\ueed2\ueed3\ueed4\ueed5\ueed6\ueed7\ueed8\ueed9\ueeda\ueedb\ueedc\ueedd\ueede\ueedf\ueee0\ueee1\ueee2\ueee3\ueee4\ueee5\ueee6\ueee7\ueee8\ueee9\ueeea\ueeeb\ueeec\ueeed\ueeee\ueeef\ueef0\ueef1\ueef2\ueef3\ueef4\ueef5\ueef6\ueef7\ueef8\ueef9\ueefa\ueefb\ueefc\ueefd\ueefe\ueeff\uef00\uef01\uef02\uef03\uef04\uef05\uef06\uef07\uef08\uef09\uef0a\uef0b\uef0c\uef0d\uef0e\uef0f\uef10\uef11\uef12\uef13\uef14\uef15\uef16\uef17\uef18\uef19\uef1a\uef1b\uef1c\uef1d\uef1e\uef1f\uef20\uef21\uef22\uef23\uef24\uef25\uef26\uef27\uef28\uef29\uef2a\uef2b\uef2c\uef2d\uef2e\uef2f\uef30\uef31\uef32\uef33\uef34\uef35\uef36\uef37\uef38\uef39\uef3a\uef3b\uef3c\uef3d\uef3e\uef3f\uef40\uef41\uef42\uef43\uef44\uef45\uef46\uef47\uef48\uef49\uef4a\uef4b\uef4c\uef4d\uef4e\uef4f\uef50\uef51\uef52\uef53\uef54\uef55\uef56\uef57\uef58\uef59\uef5a\uef5b\uef5c\uef5d\uef5e\uef5f\uef60\uef61\uef62\uef63\uef64\uef65\uef66\uef67\uef68\uef69\uef6a\uef6b\uef6c\uef6d\uef6e\uef6f\uef70\uef71\uef72\uef73\uef74\uef75\uef76\uef77\uef78\uef79\uef7a\uef7b\uef7c\uef7d\uef7e\uef7f\uef80\uef81\uef82\uef83\uef84\uef85\uef86\uef87\uef88\uef89\uef8a\uef8b\uef8c\uef8d\uef8e\uef8f\uef90\uef91\uef92\uef93\uef94\uef95\uef96\uef97\uef98\uef99\uef9a\uef9b\uef9c\uef9d\uef9e\uef9f\uefa0\uefa1\uefa2\uefa3\uefa4\uefa5\uefa6\uefa7\uefa8\uefa9\uefaa\uefab\uefac\uefad\uefae\uefaf\uefb0\uefb1\uefb2\uefb3\uefb4\uefb5\uefb6\uefb7\uefb8\uefb9\uefba\uefbb\uefbc\uefbd\uefbe\uefbf\uefc0\uefc1\uefc2\uefc3\uefc4\uefc5\uefc6\uefc7\uefc8\uefc9\uefca\uefcb\uefcc\uefcd\uefce\uefcf\uefd0\uefd1\uefd2\uefd3\uefd4\uefd5\uefd6\uefd7\uefd8\uefd9\uefda\uefdb\uefdc\uefdd\uefde\uefdf\uefe0\uefe1\uefe2\uefe3\uefe4\uefe5\uefe6\uefe7\uefe8\uefe9\uefea\uefeb\uefec\uefed\uefee\uefef\ueff0\ueff1\ueff2\ueff3\ueff4\ueff5\ueff6\ueff7\ueff8\ueff9\ueffa\ueffb\ueffc\ueffd\ueffe\uefff\uf000\uf001\uf002\uf003\uf004\uf005\uf006\uf007\uf008\uf009\uf00a\uf00b\uf00c\uf00d\uf00e\uf00f\uf010\uf011\uf012\uf013\uf014\uf015\uf016\uf017\uf018\uf019\uf01a\uf01b\uf01c\uf01d\uf01e\uf01f\uf020\uf021\uf022\uf023\uf024\uf025\uf026\uf027\uf028\uf029\uf02a\uf02b\uf02c\uf02d\uf02e\uf02f\uf030\uf031\uf032\uf033\uf034\uf035\uf036\uf037\uf038\uf039\uf03a\uf03b\uf03c\uf03d\uf03e\uf03f\uf040\uf041\uf042\uf043\uf044\uf045\uf046\uf047\uf048\uf049\uf04a\uf04b\uf04c\uf04d\uf04e\uf04f\uf050\uf051\uf052\uf053\uf054\uf055\uf056\uf057\uf058\uf059\uf05a\uf05b\uf05c\uf05d\uf05e\uf05f\uf060\uf061\uf062\uf063\uf064\uf065\uf066\uf067\uf068\uf069\uf06a\uf06b\uf06c\uf06d\uf06e\uf06f\uf070\uf071\uf072\uf073\uf074\uf075\uf076\uf077\uf078\uf079\uf07a\uf07b\uf07c\uf07d\uf07e\uf07f\uf080\uf081\uf082\uf083\uf084\uf085\uf086\uf087\uf088\uf089\uf08a\uf08b\uf08c\uf08d\uf08e\uf08f\uf090\uf091\uf092\uf093\uf094\uf095\uf096\uf097\uf098\uf099\uf09a\uf09b\uf09c\uf09d\uf09e\uf09f\uf0a0\uf0a1\uf0a2\uf0a3\uf0a4\uf0a5\uf0a6\uf0a7\uf0a8\uf0a9\uf0aa\uf0ab\uf0ac\uf0ad\uf0ae\uf0af\uf0b0\uf0b1\uf0b2\uf0b3\uf0b4\uf0b5\uf0b6\uf0b7\uf0b8\uf0b9\uf0ba\uf0bb\uf0bc\uf0bd\uf0be\uf0bf\uf0c0\uf0c1\uf0c2\uf0c3\uf0c4\uf0c5\uf0c6\uf0c7\uf0c8\uf0c9\uf0ca\uf0cb\uf0cc\uf0cd\uf0ce\uf0cf\uf0d0\uf0d1\uf0d2\uf0d3\uf0d4\uf0d5\uf0d6\uf0d7\uf0d8\uf0d9\uf0da\uf0db\uf0dc\uf0dd\uf0de\uf0df\uf0e0\uf0e1\uf0e2\uf0e3\uf0e4\uf0e5\uf0e6\uf0e7\uf0e8\uf0e9\uf0ea\uf0eb\uf0ec\uf0ed\uf0ee\uf0ef\uf0f0\uf0f1\uf0f2\uf0f3\uf0f4\uf0f5\uf0f6\uf0f7\uf0f8\uf0f9\uf0fa\uf0fb\uf0fc\uf0fd\uf0fe\uf0ff\uf100\uf101\uf102\uf103\uf104\uf105\uf106\uf107\uf108\uf109\uf10a\uf10b\uf10c\uf10d\uf10e\uf10f\uf110\uf111\uf112\uf113\uf114\uf115\uf116\uf117\uf118\uf119\uf11a\uf11b\uf11c\uf11d\uf11e\uf11f\uf120\uf121\uf122\uf123\uf124\uf125\uf126\uf127\uf128\uf129\uf12a\uf12b\uf12c\uf12d\uf12e\uf12f\uf130\uf131\uf132\uf133\uf134\uf135\uf136\uf137\uf138\uf139\uf13a\uf13b\uf13c\uf13d\uf13e\uf13f\uf140\uf141\uf142\uf143\uf144\uf145\uf146\uf147\uf148\uf149\uf14a\uf14b\uf14c\uf14d\uf14e\uf14f\uf150\uf151\uf152\uf153\uf154\uf155\uf156\uf157\uf158\uf159\uf15a\uf15b\uf15c\uf15d\uf15e\uf15f\uf160\uf161\uf162\uf163\uf164\uf165\uf166\uf167\uf168\uf169\uf16a\uf16b\uf16c\uf16d\uf16e\uf16f\uf170\uf171\uf172\uf173\uf174\uf175\uf176\uf177\uf178\uf179\uf17a\uf17b\uf17c\uf17d\uf17e\uf17f\uf180\uf181\uf182\uf183\uf184\uf185\uf186\uf187\uf188\uf189\uf18a\uf18b\uf18c\uf18d\uf18e\uf18f\uf190\uf191\uf192\uf193\uf194\uf195\uf196\uf197\uf198\uf199\uf19a\uf19b\uf19c\uf19d\uf19e\uf19f\uf1a0\uf1a1\uf1a2\uf1a3\uf1a4\uf1a5\uf1a6\uf1a7\uf1a8\uf1a9\uf1aa\uf1ab\uf1ac\uf1ad\uf1ae\uf1af\uf1b0\uf1b1\uf1b2\uf1b3\uf1b4\uf1b5\uf1b6\uf1b7\uf1b8\uf1b9\uf1ba\uf1bb\uf1bc\uf1bd\uf1be\uf1bf\uf1c0\uf1c1\uf1c2\uf1c3\uf1c4\uf1c5\uf1c6\uf1c7\uf1c8\uf1c9\uf1ca\uf1cb\uf1cc\uf1cd\uf1ce\uf1cf\uf1d0\uf1d1\uf1d2\uf1d3\uf1d4\uf1d5\uf1d6\uf1d7\uf1d8\uf1d9\uf1da\uf1db\uf1dc\uf1dd\uf1de\uf1df\uf1e0\uf1e1\uf1e2\uf1e3\uf1e4\uf1e5\uf1e6\uf1e7\uf1e8\uf1e9\uf1ea\uf1eb\uf1ec\uf1ed\uf1ee\uf1ef\uf1f0\uf1f1\uf1f2\uf1f3\uf1f4\uf1f5\uf1f6\uf1f7\uf1f8\uf1f9\uf1fa\uf1fb\uf1fc\uf1fd\uf1fe\uf1ff\uf200\uf201\uf202\uf203\uf204\uf205\uf206\uf207\uf208\uf209\uf20a\uf20b\uf20c\uf20d\uf20e\uf20f\uf210\uf211\uf212\uf213\uf214\uf215\uf216\uf217\uf218\uf219\uf21a\uf21b\uf21c\uf21d\uf21e\uf21f\uf220\uf221\uf222\uf223\uf224\uf225\uf226\uf227\uf228\uf229\uf22a\uf22b\uf22c\uf22d\uf22e\uf22f\uf230\uf231\uf232\uf233\uf234\uf235\uf236\uf237\uf238\uf239\uf23a\uf23b\uf23c\uf23d\uf23e\uf23f\uf240\uf241\uf242\uf243\uf244\uf245\uf246\uf247\uf248\uf249\uf24a\uf24b\uf24c\uf24d\uf24e\uf24f\uf250\uf251\uf252\uf253\uf254\uf255\uf256\uf257\uf258\uf259\uf25a\uf25b\uf25c\uf25d\uf25e\uf25f\uf260\uf261\uf262\uf263\uf264\uf265\uf266\uf267\uf268\uf269\uf26a\uf26b\uf26c\uf26d\uf26e\uf26f\uf270\uf271\uf272\uf273\uf274\uf275\uf276\uf277\uf278\uf279\uf27a\uf27b\uf27c\uf27d\uf27e\uf27f\uf280\uf281\uf282\uf283\uf284\uf285\uf286\uf287\uf288\uf289\uf28a\uf28b\uf28c\uf28d\uf28e\uf28f\uf290\uf291\uf292\uf293\uf294\uf295\uf296\uf297\uf298\uf299\uf29a\uf29b\uf29c\uf29d\uf29e\uf29f\uf2a0\uf2a1\uf2a2\uf2a3\uf2a4\uf2a5\uf2a6\uf2a7\uf2a8\uf2a9\uf2aa\uf2ab\uf2ac\uf2ad\uf2ae\uf2af\uf2b0\uf2b1\uf2b2\uf2b3\uf2b4\uf2b5\uf2b6\uf2b7\uf2b8\uf2b9\uf2ba\uf2bb\uf2bc\uf2bd\uf2be\uf2bf\uf2c0\uf2c1\uf2c2\uf2c3\uf2c4\uf2c5\uf2c6\uf2c7\uf2c8\uf2c9\uf2ca\uf2cb\uf2cc\uf2cd\uf2ce\uf2cf\uf2d0\uf2d1\uf2d2\uf2d3\uf2d4\uf2d5\uf2d6\uf2d7\uf2d8\uf2d9\uf2da\uf2db\uf2dc\uf2dd\uf2de\uf2df\uf2e0\uf2e1\uf2e2\uf2e3\uf2e4\uf2e5\uf2e6\uf2e7\uf2e8\uf2e9\uf2ea\uf2eb\uf2ec\uf2ed\uf2ee\uf2ef\uf2f0\uf2f1\uf2f2\uf2f3\uf2f4\uf2f5\uf2f6\uf2f7\uf2f8\uf2f9\uf2fa\uf2fb\uf2fc\uf2fd\uf2fe\uf2ff\uf300\uf301\uf302\uf303\uf304\uf305\uf306\uf307\uf308\uf309\uf30a\uf30b\uf30c\uf30d\uf30e\uf30f\uf310\uf311\uf312\uf313\uf314\uf315\uf316\uf317\uf318\uf319\uf31a\uf31b\uf31c\uf31d\uf31e\uf31f\uf320\uf321\uf322\uf323\uf324\uf325\uf326\uf327\uf328\uf329\uf32a\uf32b\uf32c\uf32d\uf32e\uf32f\uf330\uf331\uf332\uf333\uf334\uf335\uf336\uf337\uf338\uf339\uf33a\uf33b\uf33c\uf33d\uf33e\uf33f\uf340\uf341\uf342\uf343\uf344\uf345\uf346\uf347\uf348\uf349\uf34a\uf34b\uf34c\uf34d\uf34e\uf34f\uf350\uf351\uf352\uf353\uf354\uf355\uf356\uf357\uf358\uf359\uf35a\uf35b\uf35c\uf35d\uf35e\uf35f\uf360\uf361\uf362\uf363\uf364\uf365\uf366\uf367\uf368\uf369\uf36a\uf36b\uf36c\uf36d\uf36e\uf36f\uf370\uf371\uf372\uf373\uf374\uf375\uf376\uf377\uf378\uf379\uf37a\uf37b\uf37c\uf37d\uf37e\uf37f\uf380\uf381\uf382\uf383\uf384\uf385\uf386\uf387\uf388\uf389\uf38a\uf38b\uf38c\uf38d\uf38e\uf38f\uf390\uf391\uf392\uf393\uf394\uf395\uf396\uf397\uf398\uf399\uf39a\uf39b\uf39c\uf39d\uf39e\uf39f\uf3a0\uf3a1\uf3a2\uf3a3\uf3a4\uf3a5\uf3a6\uf3a7\uf3a8\uf3a9\uf3aa\uf3ab\uf3ac\uf3ad\uf3ae\uf3af\uf3b0\uf3b1\uf3b2\uf3b3\uf3b4\uf3b5\uf3b6\uf3b7\uf3b8\uf3b9\uf3ba\uf3bb\uf3bc\uf3bd\uf3be\uf3bf\uf3c0\uf3c1\uf3c2\uf3c3\uf3c4\uf3c5\uf3c6\uf3c7\uf3c8\uf3c9\uf3ca\uf3cb\uf3cc\uf3cd\uf3ce\uf3cf\uf3d0\uf3d1\uf3d2\uf3d3\uf3d4\uf3d5\uf3d6\uf3d7\uf3d8\uf3d9\uf3da\uf3db\uf3dc\uf3dd\uf3de\uf3df\uf3e0\uf3e1\uf3e2\uf3e3\uf3e4\uf3e5\uf3e6\uf3e7\uf3e8\uf3e9\uf3ea\uf3eb\uf3ec\uf3ed\uf3ee\uf3ef\uf3f0\uf3f1\uf3f2\uf3f3\uf3f4\uf3f5\uf3f6\uf3f7\uf3f8\uf3f9\uf3fa\uf3fb\uf3fc\uf3fd\uf3fe\uf3ff\uf400\uf401\uf402\uf403\uf404\uf405\uf406\uf407\uf408\uf409\uf40a\uf40b\uf40c\uf40d\uf40e\uf40f\uf410\uf411\uf412\uf413\uf414\uf415\uf416\uf417\uf418\uf419\uf41a\uf41b\uf41c\uf41d\uf41e\uf41f\uf420\uf421\uf422\uf423\uf424\uf425\uf426\uf427\uf428\uf429\uf42a\uf42b\uf42c\uf42d\uf42e\uf42f\uf430\uf431\uf432\uf433\uf434\uf435\uf436\uf437\uf438\uf439\uf43a\uf43b\uf43c\uf43d\uf43e\uf43f\uf440\uf441\uf442\uf443\uf444\uf445\uf446\uf447\uf448\uf449\uf44a\uf44b\uf44c\uf44d\uf44e\uf44f\uf450\uf451\uf452\uf453\uf454\uf455\uf456\uf457\uf458\uf459\uf45a\uf45b\uf45c\uf45d\uf45e\uf45f\uf460\uf461\uf462\uf463\uf464\uf465\uf466\uf467\uf468\uf469\uf46a\uf46b\uf46c\uf46d\uf46e\uf46f\uf470\uf471\uf472\uf473\uf474\uf475\uf476\uf477\uf478\uf479\uf47a\uf47b\uf47c\uf47d\uf47e\uf47f\uf480\uf481\uf482\uf483\uf484\uf485\uf486\uf487\uf488\uf489\uf48a\uf48b\uf48c\uf48d\uf48e\uf48f\uf490\uf491\uf492\uf493\uf494\uf495\uf496\uf497\uf498\uf499\uf49a\uf49b\uf49c\uf49d\uf49e\uf49f\uf4a0\uf4a1\uf4a2\uf4a3\uf4a4\uf4a5\uf4a6\uf4a7\uf4a8\uf4a9\uf4aa\uf4ab\uf4ac\uf4ad\uf4ae\uf4af\uf4b0\uf4b1\uf4b2\uf4b3\uf4b4\uf4b5\uf4b6\uf4b7\uf4b8\uf4b9\uf4ba\uf4bb\uf4bc\uf4bd\uf4be\uf4bf\uf4c0\uf4c1\uf4c2\uf4c3\uf4c4\uf4c5\uf4c6\uf4c7\uf4c8\uf4c9\uf4ca\uf4cb\uf4cc\uf4cd\uf4ce\uf4cf\uf4d0\uf4d1\uf4d2\uf4d3\uf4d4\uf4d5\uf4d6\uf4d7\uf4d8\uf4d9\uf4da\uf4db\uf4dc\uf4dd\uf4de\uf4df\uf4e0\uf4e1\uf4e2\uf4e3\uf4e4\uf4e5\uf4e6\uf4e7\uf4e8\uf4e9\uf4ea\uf4eb\uf4ec\uf4ed\uf4ee\uf4ef\uf4f0\uf4f1\uf4f2\uf4f3\uf4f4\uf4f5\uf4f6\uf4f7\uf4f8\uf4f9\uf4fa\uf4fb\uf4fc\uf4fd\uf4fe\uf4ff\uf500\uf501\uf502\uf503\uf504\uf505\uf506\uf507\uf508\uf509\uf50a\uf50b\uf50c\uf50d\uf50e\uf50f\uf510\uf511\uf512\uf513\uf514\uf515\uf516\uf517\uf518\uf519\uf51a\uf51b\uf51c\uf51d\uf51e\uf51f\uf520\uf521\uf522\uf523\uf524\uf525\uf526\uf527\uf528\uf529\uf52a\uf52b\uf52c\uf52d\uf52e\uf52f\uf530\uf531\uf532\uf533\uf534\uf535\uf536\uf537\uf538\uf539\uf53a\uf53b\uf53c\uf53d\uf53e\uf53f\uf540\uf541\uf542\uf543\uf544\uf545\uf546\uf547\uf548\uf549\uf54a\uf54b\uf54c\uf54d\uf54e\uf54f\uf550\uf551\uf552\uf553\uf554\uf555\uf556\uf557\uf558\uf559\uf55a\uf55b\uf55c\uf55d\uf55e\uf55f\uf560\uf561\uf562\uf563\uf564\uf565\uf566\uf567\uf568\uf569\uf56a\uf56b\uf56c\uf56d\uf56e\uf56f\uf570\uf571\uf572\uf573\uf574\uf575\uf576\uf577\uf578\uf579\uf57a\uf57b\uf57c\uf57d\uf57e\uf57f\uf580\uf581\uf582\uf583\uf584\uf585\uf586\uf587\uf588\uf589\uf58a\uf58b\uf58c\uf58d\uf58e\uf58f\uf590\uf591\uf592\uf593\uf594\uf595\uf596\uf597\uf598\uf599\uf59a\uf59b\uf59c\uf59d\uf59e\uf59f\uf5a0\uf5a1\uf5a2\uf5a3\uf5a4\uf5a5\uf5a6\uf5a7\uf5a8\uf5a9\uf5aa\uf5ab\uf5ac\uf5ad\uf5ae\uf5af\uf5b0\uf5b1\uf5b2\uf5b3\uf5b4\uf5b5\uf5b6\uf5b7\uf5b8\uf5b9\uf5ba\uf5bb\uf5bc\uf5bd\uf5be\uf5bf\uf5c0\uf5c1\uf5c2\uf5c3\uf5c4\uf5c5\uf5c6\uf5c7\uf5c8\uf5c9\uf5ca\uf5cb\uf5cc\uf5cd\uf5ce\uf5cf\uf5d0\uf5d1\uf5d2\uf5d3\uf5d4\uf5d5\uf5d6\uf5d7\uf5d8\uf5d9\uf5da\uf5db\uf5dc\uf5dd\uf5de\uf5df\uf5e0\uf5e1\uf5e2\uf5e3\uf5e4\uf5e5\uf5e6\uf5e7\uf5e8\uf5e9\uf5ea\uf5eb\uf5ec\uf5ed\uf5ee\uf5ef\uf5f0\uf5f1\uf5f2\uf5f3\uf5f4\uf5f5\uf5f6\uf5f7\uf5f8\uf5f9\uf5fa\uf5fb\uf5fc\uf5fd\uf5fe\uf5ff\uf600\uf601\uf602\uf603\uf604\uf605\uf606\uf607\uf608\uf609\uf60a\uf60b\uf60c\uf60d\uf60e\uf60f\uf610\uf611\uf612\uf613\uf614\uf615\uf616\uf617\uf618\uf619\uf61a\uf61b\uf61c\uf61d\uf61e\uf61f\uf620\uf621\uf622\uf623\uf624\uf625\uf626\uf627\uf628\uf629\uf62a\uf62b\uf62c\uf62d\uf62e\uf62f\uf630\uf631\uf632\uf633\uf634\uf635\uf636\uf637\uf638\uf639\uf63a\uf63b\uf63c\uf63d\uf63e\uf63f\uf640\uf641\uf642\uf643\uf644\uf645\uf646\uf647\uf648\uf649\uf64a\uf64b\uf64c\uf64d\uf64e\uf64f\uf650\uf651\uf652\uf653\uf654\uf655\uf656\uf657\uf658\uf659\uf65a\uf65b\uf65c\uf65d\uf65e\uf65f\uf660\uf661\uf662\uf663\uf664\uf665\uf666\uf667\uf668\uf669\uf66a\uf66b\uf66c\uf66d\uf66e\uf66f\uf670\uf671\uf672\uf673\uf674\uf675\uf676\uf677\uf678\uf679\uf67a\uf67b\uf67c\uf67d\uf67e\uf67f\uf680\uf681\uf682\uf683\uf684\uf685\uf686\uf687\uf688\uf689\uf68a\uf68b\uf68c\uf68d\uf68e\uf68f\uf690\uf691\uf692\uf693\uf694\uf695\uf696\uf697\uf698\uf699\uf69a\uf69b\uf69c\uf69d\uf69e\uf69f\uf6a0\uf6a1\uf6a2\uf6a3\uf6a4\uf6a5\uf6a6\uf6a7\uf6a8\uf6a9\uf6aa\uf6ab\uf6ac\uf6ad\uf6ae\uf6af\uf6b0\uf6b1\uf6b2\uf6b3\uf6b4\uf6b5\uf6b6\uf6b7\uf6b8\uf6b9\uf6ba\uf6bb\uf6bc\uf6bd\uf6be\uf6bf\uf6c0\uf6c1\uf6c2\uf6c3\uf6c4\uf6c5\uf6c6\uf6c7\uf6c8\uf6c9\uf6ca\uf6cb\uf6cc\uf6cd\uf6ce\uf6cf\uf6d0\uf6d1\uf6d2\uf6d3\uf6d4\uf6d5\uf6d6\uf6d7\uf6d8\uf6d9\uf6da\uf6db\uf6dc\uf6dd\uf6de\uf6df\uf6e0\uf6e1\uf6e2\uf6e3\uf6e4\uf6e5\uf6e6\uf6e7\uf6e8\uf6e9\uf6ea\uf6eb\uf6ec\uf6ed\uf6ee\uf6ef\uf6f0\uf6f1\uf6f2\uf6f3\uf6f4\uf6f5\uf6f6\uf6f7\uf6f8\uf6f9\uf6fa\uf6fb\uf6fc\uf6fd\uf6fe\uf6ff\uf700\uf701\uf702\uf703\uf704\uf705\uf706\uf707\uf708\uf709\uf70a\uf70b\uf70c\uf70d\uf70e\uf70f\uf710\uf711\uf712\uf713\uf714\uf715\uf716\uf717\uf718\uf719\uf71a\uf71b\uf71c\uf71d\uf71e\uf71f\uf720\uf721\uf722\uf723\uf724\uf725\uf726\uf727\uf728\uf729\uf72a\uf72b\uf72c\uf72d\uf72e\uf72f\uf730\uf731\uf732\uf733\uf734\uf735\uf736\uf737\uf738\uf739\uf73a\uf73b\uf73c\uf73d\uf73e\uf73f\uf740\uf741\uf742\uf743\uf744\uf745\uf746\uf747\uf748\uf749\uf74a\uf74b\uf74c\uf74d\uf74e\uf74f\uf750\uf751\uf752\uf753\uf754\uf755\uf756\uf757\uf758\uf759\uf75a\uf75b\uf75c\uf75d\uf75e\uf75f\uf760\uf761\uf762\uf763\uf764\uf765\uf766\uf767\uf768\uf769\uf76a\uf76b\uf76c\uf76d\uf76e\uf76f\uf770\uf771\uf772\uf773\uf774\uf775\uf776\uf777\uf778\uf779\uf77a\uf77b\uf77c\uf77d\uf77e\uf77f\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff\uf800\uf801\uf802\uf803\uf804\uf805\uf806\uf807\uf808\uf809\uf80a\uf80b\uf80c\uf80d\uf80e\uf80f\uf810\uf811\uf812\uf813\uf814\uf815\uf816\uf817\uf818\uf819\uf81a\uf81b\uf81c\uf81d\uf81e\uf81f\uf820\uf821\uf822\uf823\uf824\uf825\uf826\uf827\uf828\uf829\uf82a\uf82b\uf82c\uf82d\uf82e\uf82f\uf830\uf831\uf832\uf833\uf834\uf835\uf836\uf837\uf838\uf839\uf83a\uf83b\uf83c\uf83d\uf83e\uf83f\uf840\uf841\uf842\uf843\uf844\uf845\uf846\uf847\uf848\uf849\uf84a\uf84b\uf84c\uf84d\uf84e\uf84f\uf850\uf851\uf852\uf853\uf854\uf855\uf856\uf857\uf858\uf859\uf85a\uf85b\uf85c\uf85d\uf85e\uf85f\uf860\uf861\uf862\uf863\uf864\uf865\uf866\uf867\uf868\uf869\uf86a\uf86b\uf86c\uf86d\uf86e\uf86f\uf870\uf871\uf872\uf873\uf874\uf875\uf876\uf877\uf878\uf879\uf87a\uf87b\uf87c\uf87d\uf87e\uf87f\uf880\uf881\uf882\uf883\uf884\uf885\uf886\uf887\uf888\uf889\uf88a\uf88b\uf88c\uf88d\uf88e\uf88f\uf890\uf891\uf892\uf893\uf894\uf895\uf896\uf897\uf898\uf899\uf89a\uf89b\uf89c\uf89d\uf89e\uf89f\uf8a0\uf8a1\uf8a2\uf8a3\uf8a4\uf8a5\uf8a6\uf8a7\uf8a8\uf8a9\uf8aa\uf8ab\uf8ac\uf8ad\uf8ae\uf8af\uf8b0\uf8b1\uf8b2\uf8b3\uf8b4\uf8b5\uf8b6\uf8b7\uf8b8\uf8b9\uf8ba\uf8bb\uf8bc\uf8bd\uf8be\uf8bf\uf8c0\uf8c1\uf8c2\uf8c3\uf8c4\uf8c5\uf8c6\uf8c7\uf8c8\uf8c9\uf8ca\uf8cb\uf8cc\uf8cd\uf8ce\uf8cf\uf8d0\uf8d1\uf8d2\uf8d3\uf8d4\uf8d5\uf8d6\uf8d7\uf8d8\uf8d9\uf8da\uf8db\uf8dc\uf8dd\uf8de\uf8df\uf8e0\uf8e1\uf8e2\uf8e3\uf8e4\uf8e5\uf8e6\uf8e7\uf8e8\uf8e9\uf8ea\uf8eb\uf8ec\uf8ed\uf8ee\uf8ef\uf8f0\uf8f1\uf8f2\uf8f3\uf8f4\uf8f5\uf8f6\uf8f7\uf8f8\uf8f9\uf8fa\uf8fb\uf8fc\uf8fd\uf8fe\uf8ff' + +try: + Cs = eval(r"'\ud800\ud801\ud802\ud803\ud804\ud805\ud806\ud807\ud808\ud809\ud80a\ud80b\ud80c\ud80d\ud80e\ud80f\ud810\ud811\ud812\ud813\ud814\ud815\ud816\ud817\ud818\ud819\ud81a\ud81b\ud81c\ud81d\ud81e\ud81f\ud820\ud821\ud822\ud823\ud824\ud825\ud826\ud827\ud828\ud829\ud82a\ud82b\ud82c\ud82d\ud82e\ud82f\ud830\ud831\ud832\ud833\ud834\ud835\ud836\ud837\ud838\ud839\ud83a\ud83b\ud83c\ud83d\ud83e\ud83f\ud840\ud841\ud842\ud843\ud844\ud845\ud846\ud847\ud848\ud849\ud84a\ud84b\ud84c\ud84d\ud84e\ud84f\ud850\ud851\ud852\ud853\ud854\ud855\ud856\ud857\ud858\ud859\ud85a\ud85b\ud85c\ud85d\ud85e\ud85f\ud860\ud861\ud862\ud863\ud864\ud865\ud866\ud867\ud868\ud869\ud86a\ud86b\ud86c\ud86d\ud86e\ud86f\ud870\ud871\ud872\ud873\ud874\ud875\ud876\ud877\ud878\ud879\ud87a\ud87b\ud87c\ud87d\ud87e\ud87f\ud880\ud881\ud882\ud883\ud884\ud885\ud886\ud887\ud888\ud889\ud88a\ud88b\ud88c\ud88d\ud88e\ud88f\ud890\ud891\ud892\ud893\ud894\ud895\ud896\ud897\ud898\ud899\ud89a\ud89b\ud89c\ud89d\ud89e\ud89f\ud8a0\ud8a1\ud8a2\ud8a3\ud8a4\ud8a5\ud8a6\ud8a7\ud8a8\ud8a9\ud8aa\ud8ab\ud8ac\ud8ad\ud8ae\ud8af\ud8b0\ud8b1\ud8b2\ud8b3\ud8b4\ud8b5\ud8b6\ud8b7\ud8b8\ud8b9\ud8ba\ud8bb\ud8bc\ud8bd\ud8be\ud8bf\ud8c0\ud8c1\ud8c2\ud8c3\ud8c4\ud8c5\ud8c6\ud8c7\ud8c8\ud8c9\ud8ca\ud8cb\ud8cc\ud8cd\ud8ce\ud8cf\ud8d0\ud8d1\ud8d2\ud8d3\ud8d4\ud8d5\ud8d6\ud8d7\ud8d8\ud8d9\ud8da\ud8db\ud8dc\ud8dd\ud8de\ud8df\ud8e0\ud8e1\ud8e2\ud8e3\ud8e4\ud8e5\ud8e6\ud8e7\ud8e8\ud8e9\ud8ea\ud8eb\ud8ec\ud8ed\ud8ee\ud8ef\ud8f0\ud8f1\ud8f2\ud8f3\ud8f4\ud8f5\ud8f6\ud8f7\ud8f8\ud8f9\ud8fa\ud8fb\ud8fc\ud8fd\ud8fe\ud8ff\ud900\ud901\ud902\ud903\ud904\ud905\ud906\ud907\ud908\ud909\ud90a\ud90b\ud90c\ud90d\ud90e\ud90f\ud910\ud911\ud912\ud913\ud914\ud915\ud916\ud917\ud918\ud919\ud91a\ud91b\ud91c\ud91d\ud91e\ud91f\ud920\ud921\ud922\ud923\ud924\ud925\ud926\ud927\ud928\ud929\ud92a\ud92b\ud92c\ud92d\ud92e\ud92f\ud930\ud931\ud932\ud933\ud934\ud935\ud936\ud937\ud938\ud939\ud93a\ud93b\ud93c\ud93d\ud93e\ud93f\ud940\ud941\ud942\ud943\ud944\ud945\ud946\ud947\ud948\ud949\ud94a\ud94b\ud94c\ud94d\ud94e\ud94f\ud950\ud951\ud952\ud953\ud954\ud955\ud956\ud957\ud958\ud959\ud95a\ud95b\ud95c\ud95d\ud95e\ud95f\ud960\ud961\ud962\ud963\ud964\ud965\ud966\ud967\ud968\ud969\ud96a\ud96b\ud96c\ud96d\ud96e\ud96f\ud970\ud971\ud972\ud973\ud974\ud975\ud976\ud977\ud978\ud979\ud97a\ud97b\ud97c\ud97d\ud97e\ud97f\ud980\ud981\ud982\ud983\ud984\ud985\ud986\ud987\ud988\ud989\ud98a\ud98b\ud98c\ud98d\ud98e\ud98f\ud990\ud991\ud992\ud993\ud994\ud995\ud996\ud997\ud998\ud999\ud99a\ud99b\ud99c\ud99d\ud99e\ud99f\ud9a0\ud9a1\ud9a2\ud9a3\ud9a4\ud9a5\ud9a6\ud9a7\ud9a8\ud9a9\ud9aa\ud9ab\ud9ac\ud9ad\ud9ae\ud9af\ud9b0\ud9b1\ud9b2\ud9b3\ud9b4\ud9b5\ud9b6\ud9b7\ud9b8\ud9b9\ud9ba\ud9bb\ud9bc\ud9bd\ud9be\ud9bf\ud9c0\ud9c1\ud9c2\ud9c3\ud9c4\ud9c5\ud9c6\ud9c7\ud9c8\ud9c9\ud9ca\ud9cb\ud9cc\ud9cd\ud9ce\ud9cf\ud9d0\ud9d1\ud9d2\ud9d3\ud9d4\ud9d5\ud9d6\ud9d7\ud9d8\ud9d9\ud9da\ud9db\ud9dc\ud9dd\ud9de\ud9df\ud9e0\ud9e1\ud9e2\ud9e3\ud9e4\ud9e5\ud9e6\ud9e7\ud9e8\ud9e9\ud9ea\ud9eb\ud9ec\ud9ed\ud9ee\ud9ef\ud9f0\ud9f1\ud9f2\ud9f3\ud9f4\ud9f5\ud9f6\ud9f7\ud9f8\ud9f9\ud9fa\ud9fb\ud9fc\ud9fd\ud9fe\ud9ff\uda00\uda01\uda02\uda03\uda04\uda05\uda06\uda07\uda08\uda09\uda0a\uda0b\uda0c\uda0d\uda0e\uda0f\uda10\uda11\uda12\uda13\uda14\uda15\uda16\uda17\uda18\uda19\uda1a\uda1b\uda1c\uda1d\uda1e\uda1f\uda20\uda21\uda22\uda23\uda24\uda25\uda26\uda27\uda28\uda29\uda2a\uda2b\uda2c\uda2d\uda2e\uda2f\uda30\uda31\uda32\uda33\uda34\uda35\uda36\uda37\uda38\uda39\uda3a\uda3b\uda3c\uda3d\uda3e\uda3f\uda40\uda41\uda42\uda43\uda44\uda45\uda46\uda47\uda48\uda49\uda4a\uda4b\uda4c\uda4d\uda4e\uda4f\uda50\uda51\uda52\uda53\uda54\uda55\uda56\uda57\uda58\uda59\uda5a\uda5b\uda5c\uda5d\uda5e\uda5f\uda60\uda61\uda62\uda63\uda64\uda65\uda66\uda67\uda68\uda69\uda6a\uda6b\uda6c\uda6d\uda6e\uda6f\uda70\uda71\uda72\uda73\uda74\uda75\uda76\uda77\uda78\uda79\uda7a\uda7b\uda7c\uda7d\uda7e\uda7f\uda80\uda81\uda82\uda83\uda84\uda85\uda86\uda87\uda88\uda89\uda8a\uda8b\uda8c\uda8d\uda8e\uda8f\uda90\uda91\uda92\uda93\uda94\uda95\uda96\uda97\uda98\uda99\uda9a\uda9b\uda9c\uda9d\uda9e\uda9f\udaa0\udaa1\udaa2\udaa3\udaa4\udaa5\udaa6\udaa7\udaa8\udaa9\udaaa\udaab\udaac\udaad\udaae\udaaf\udab0\udab1\udab2\udab3\udab4\udab5\udab6\udab7\udab8\udab9\udaba\udabb\udabc\udabd\udabe\udabf\udac0\udac1\udac2\udac3\udac4\udac5\udac6\udac7\udac8\udac9\udaca\udacb\udacc\udacd\udace\udacf\udad0\udad1\udad2\udad3\udad4\udad5\udad6\udad7\udad8\udad9\udada\udadb\udadc\udadd\udade\udadf\udae0\udae1\udae2\udae3\udae4\udae5\udae6\udae7\udae8\udae9\udaea\udaeb\udaec\udaed\udaee\udaef\udaf0\udaf1\udaf2\udaf3\udaf4\udaf5\udaf6\udaf7\udaf8\udaf9\udafa\udafb\udafc\udafd\udafe\udaff\udb00\udb01\udb02\udb03\udb04\udb05\udb06\udb07\udb08\udb09\udb0a\udb0b\udb0c\udb0d\udb0e\udb0f\udb10\udb11\udb12\udb13\udb14\udb15\udb16\udb17\udb18\udb19\udb1a\udb1b\udb1c\udb1d\udb1e\udb1f\udb20\udb21\udb22\udb23\udb24\udb25\udb26\udb27\udb28\udb29\udb2a\udb2b\udb2c\udb2d\udb2e\udb2f\udb30\udb31\udb32\udb33\udb34\udb35\udb36\udb37\udb38\udb39\udb3a\udb3b\udb3c\udb3d\udb3e\udb3f\udb40\udb41\udb42\udb43\udb44\udb45\udb46\udb47\udb48\udb49\udb4a\udb4b\udb4c\udb4d\udb4e\udb4f\udb50\udb51\udb52\udb53\udb54\udb55\udb56\udb57\udb58\udb59\udb5a\udb5b\udb5c\udb5d\udb5e\udb5f\udb60\udb61\udb62\udb63\udb64\udb65\udb66\udb67\udb68\udb69\udb6a\udb6b\udb6c\udb6d\udb6e\udb6f\udb70\udb71\udb72\udb73\udb74\udb75\udb76\udb77\udb78\udb79\udb7a\udb7b\udb7c\udb7d\udb7e\udb7f\udb80\udb81\udb82\udb83\udb84\udb85\udb86\udb87\udb88\udb89\udb8a\udb8b\udb8c\udb8d\udb8e\udb8f\udb90\udb91\udb92\udb93\udb94\udb95\udb96\udb97\udb98\udb99\udb9a\udb9b\udb9c\udb9d\udb9e\udb9f\udba0\udba1\udba2\udba3\udba4\udba5\udba6\udba7\udba8\udba9\udbaa\udbab\udbac\udbad\udbae\udbaf\udbb0\udbb1\udbb2\udbb3\udbb4\udbb5\udbb6\udbb7\udbb8\udbb9\udbba\udbbb\udbbc\udbbd\udbbe\udbbf\udbc0\udbc1\udbc2\udbc3\udbc4\udbc5\udbc6\udbc7\udbc8\udbc9\udbca\udbcb\udbcc\udbcd\udbce\udbcf\udbd0\udbd1\udbd2\udbd3\udbd4\udbd5\udbd6\udbd7\udbd8\udbd9\udbda\udbdb\udbdc\udbdd\udbde\udbdf\udbe0\udbe1\udbe2\udbe3\udbe4\udbe5\udbe6\udbe7\udbe8\udbe9\udbea\udbeb\udbec\udbed\udbee\udbef\udbf0\udbf1\udbf2\udbf3\udbf4\udbf5\udbf6\udbf7\udbf8\udbf9\udbfa\udbfb\udbfc\udbfd\udbfe\U0010fc00\udc01\udc02\udc03\udc04\udc05\udc06\udc07\udc08\udc09\udc0a\udc0b\udc0c\udc0d\udc0e\udc0f\udc10\udc11\udc12\udc13\udc14\udc15\udc16\udc17\udc18\udc19\udc1a\udc1b\udc1c\udc1d\udc1e\udc1f\udc20\udc21\udc22\udc23\udc24\udc25\udc26\udc27\udc28\udc29\udc2a\udc2b\udc2c\udc2d\udc2e\udc2f\udc30\udc31\udc32\udc33\udc34\udc35\udc36\udc37\udc38\udc39\udc3a\udc3b\udc3c\udc3d\udc3e\udc3f\udc40\udc41\udc42\udc43\udc44\udc45\udc46\udc47\udc48\udc49\udc4a\udc4b\udc4c\udc4d\udc4e\udc4f\udc50\udc51\udc52\udc53\udc54\udc55\udc56\udc57\udc58\udc59\udc5a\udc5b\udc5c\udc5d\udc5e\udc5f\udc60\udc61\udc62\udc63\udc64\udc65\udc66\udc67\udc68\udc69\udc6a\udc6b\udc6c\udc6d\udc6e\udc6f\udc70\udc71\udc72\udc73\udc74\udc75\udc76\udc77\udc78\udc79\udc7a\udc7b\udc7c\udc7d\udc7e\udc7f\udc80\udc81\udc82\udc83\udc84\udc85\udc86\udc87\udc88\udc89\udc8a\udc8b\udc8c\udc8d\udc8e\udc8f\udc90\udc91\udc92\udc93\udc94\udc95\udc96\udc97\udc98\udc99\udc9a\udc9b\udc9c\udc9d\udc9e\udc9f\udca0\udca1\udca2\udca3\udca4\udca5\udca6\udca7\udca8\udca9\udcaa\udcab\udcac\udcad\udcae\udcaf\udcb0\udcb1\udcb2\udcb3\udcb4\udcb5\udcb6\udcb7\udcb8\udcb9\udcba\udcbb\udcbc\udcbd\udcbe\udcbf\udcc0\udcc1\udcc2\udcc3\udcc4\udcc5\udcc6\udcc7\udcc8\udcc9\udcca\udccb\udccc\udccd\udcce\udccf\udcd0\udcd1\udcd2\udcd3\udcd4\udcd5\udcd6\udcd7\udcd8\udcd9\udcda\udcdb\udcdc\udcdd\udcde\udcdf\udce0\udce1\udce2\udce3\udce4\udce5\udce6\udce7\udce8\udce9\udcea\udceb\udcec\udced\udcee\udcef\udcf0\udcf1\udcf2\udcf3\udcf4\udcf5\udcf6\udcf7\udcf8\udcf9\udcfa\udcfb\udcfc\udcfd\udcfe\udcff\udd00\udd01\udd02\udd03\udd04\udd05\udd06\udd07\udd08\udd09\udd0a\udd0b\udd0c\udd0d\udd0e\udd0f\udd10\udd11\udd12\udd13\udd14\udd15\udd16\udd17\udd18\udd19\udd1a\udd1b\udd1c\udd1d\udd1e\udd1f\udd20\udd21\udd22\udd23\udd24\udd25\udd26\udd27\udd28\udd29\udd2a\udd2b\udd2c\udd2d\udd2e\udd2f\udd30\udd31\udd32\udd33\udd34\udd35\udd36\udd37\udd38\udd39\udd3a\udd3b\udd3c\udd3d\udd3e\udd3f\udd40\udd41\udd42\udd43\udd44\udd45\udd46\udd47\udd48\udd49\udd4a\udd4b\udd4c\udd4d\udd4e\udd4f\udd50\udd51\udd52\udd53\udd54\udd55\udd56\udd57\udd58\udd59\udd5a\udd5b\udd5c\udd5d\udd5e\udd5f\udd60\udd61\udd62\udd63\udd64\udd65\udd66\udd67\udd68\udd69\udd6a\udd6b\udd6c\udd6d\udd6e\udd6f\udd70\udd71\udd72\udd73\udd74\udd75\udd76\udd77\udd78\udd79\udd7a\udd7b\udd7c\udd7d\udd7e\udd7f\udd80\udd81\udd82\udd83\udd84\udd85\udd86\udd87\udd88\udd89\udd8a\udd8b\udd8c\udd8d\udd8e\udd8f\udd90\udd91\udd92\udd93\udd94\udd95\udd96\udd97\udd98\udd99\udd9a\udd9b\udd9c\udd9d\udd9e\udd9f\udda0\udda1\udda2\udda3\udda4\udda5\udda6\udda7\udda8\udda9\uddaa\uddab\uddac\uddad\uddae\uddaf\uddb0\uddb1\uddb2\uddb3\uddb4\uddb5\uddb6\uddb7\uddb8\uddb9\uddba\uddbb\uddbc\uddbd\uddbe\uddbf\uddc0\uddc1\uddc2\uddc3\uddc4\uddc5\uddc6\uddc7\uddc8\uddc9\uddca\uddcb\uddcc\uddcd\uddce\uddcf\uddd0\uddd1\uddd2\uddd3\uddd4\uddd5\uddd6\uddd7\uddd8\uddd9\uddda\udddb\udddc\udddd\uddde\udddf\udde0\udde1\udde2\udde3\udde4\udde5\udde6\udde7\udde8\udde9\uddea\uddeb\uddec\udded\uddee\uddef\uddf0\uddf1\uddf2\uddf3\uddf4\uddf5\uddf6\uddf7\uddf8\uddf9\uddfa\uddfb\uddfc\uddfd\uddfe\uddff\ude00\ude01\ude02\ude03\ude04\ude05\ude06\ude07\ude08\ude09\ude0a\ude0b\ude0c\ude0d\ude0e\ude0f\ude10\ude11\ude12\ude13\ude14\ude15\ude16\ude17\ude18\ude19\ude1a\ude1b\ude1c\ude1d\ude1e\ude1f\ude20\ude21\ude22\ude23\ude24\ude25\ude26\ude27\ude28\ude29\ude2a\ude2b\ude2c\ude2d\ude2e\ude2f\ude30\ude31\ude32\ude33\ude34\ude35\ude36\ude37\ude38\ude39\ude3a\ude3b\ude3c\ude3d\ude3e\ude3f\ude40\ude41\ude42\ude43\ude44\ude45\ude46\ude47\ude48\ude49\ude4a\ude4b\ude4c\ude4d\ude4e\ude4f\ude50\ude51\ude52\ude53\ude54\ude55\ude56\ude57\ude58\ude59\ude5a\ude5b\ude5c\ude5d\ude5e\ude5f\ude60\ude61\ude62\ude63\ude64\ude65\ude66\ude67\ude68\ude69\ude6a\ude6b\ude6c\ude6d\ude6e\ude6f\ude70\ude71\ude72\ude73\ude74\ude75\ude76\ude77\ude78\ude79\ude7a\ude7b\ude7c\ude7d\ude7e\ude7f\ude80\ude81\ude82\ude83\ude84\ude85\ude86\ude87\ude88\ude89\ude8a\ude8b\ude8c\ude8d\ude8e\ude8f\ude90\ude91\ude92\ude93\ude94\ude95\ude96\ude97\ude98\ude99\ude9a\ude9b\ude9c\ude9d\ude9e\ude9f\udea0\udea1\udea2\udea3\udea4\udea5\udea6\udea7\udea8\udea9\udeaa\udeab\udeac\udead\udeae\udeaf\udeb0\udeb1\udeb2\udeb3\udeb4\udeb5\udeb6\udeb7\udeb8\udeb9\udeba\udebb\udebc\udebd\udebe\udebf\udec0\udec1\udec2\udec3\udec4\udec5\udec6\udec7\udec8\udec9\udeca\udecb\udecc\udecd\udece\udecf\uded0\uded1\uded2\uded3\uded4\uded5\uded6\uded7\uded8\uded9\udeda\udedb\udedc\udedd\udede\udedf\udee0\udee1\udee2\udee3\udee4\udee5\udee6\udee7\udee8\udee9\udeea\udeeb\udeec\udeed\udeee\udeef\udef0\udef1\udef2\udef3\udef4\udef5\udef6\udef7\udef8\udef9\udefa\udefb\udefc\udefd\udefe\udeff\udf00\udf01\udf02\udf03\udf04\udf05\udf06\udf07\udf08\udf09\udf0a\udf0b\udf0c\udf0d\udf0e\udf0f\udf10\udf11\udf12\udf13\udf14\udf15\udf16\udf17\udf18\udf19\udf1a\udf1b\udf1c\udf1d\udf1e\udf1f\udf20\udf21\udf22\udf23\udf24\udf25\udf26\udf27\udf28\udf29\udf2a\udf2b\udf2c\udf2d\udf2e\udf2f\udf30\udf31\udf32\udf33\udf34\udf35\udf36\udf37\udf38\udf39\udf3a\udf3b\udf3c\udf3d\udf3e\udf3f\udf40\udf41\udf42\udf43\udf44\udf45\udf46\udf47\udf48\udf49\udf4a\udf4b\udf4c\udf4d\udf4e\udf4f\udf50\udf51\udf52\udf53\udf54\udf55\udf56\udf57\udf58\udf59\udf5a\udf5b\udf5c\udf5d\udf5e\udf5f\udf60\udf61\udf62\udf63\udf64\udf65\udf66\udf67\udf68\udf69\udf6a\udf6b\udf6c\udf6d\udf6e\udf6f\udf70\udf71\udf72\udf73\udf74\udf75\udf76\udf77\udf78\udf79\udf7a\udf7b\udf7c\udf7d\udf7e\udf7f\udf80\udf81\udf82\udf83\udf84\udf85\udf86\udf87\udf88\udf89\udf8a\udf8b\udf8c\udf8d\udf8e\udf8f\udf90\udf91\udf92\udf93\udf94\udf95\udf96\udf97\udf98\udf99\udf9a\udf9b\udf9c\udf9d\udf9e\udf9f\udfa0\udfa1\udfa2\udfa3\udfa4\udfa5\udfa6\udfa7\udfa8\udfa9\udfaa\udfab\udfac\udfad\udfae\udfaf\udfb0\udfb1\udfb2\udfb3\udfb4\udfb5\udfb6\udfb7\udfb8\udfb9\udfba\udfbb\udfbc\udfbd\udfbe\udfbf\udfc0\udfc1\udfc2\udfc3\udfc4\udfc5\udfc6\udfc7\udfc8\udfc9\udfca\udfcb\udfcc\udfcd\udfce\udfcf\udfd0\udfd1\udfd2\udfd3\udfd4\udfd5\udfd6\udfd7\udfd8\udfd9\udfda\udfdb\udfdc\udfdd\udfde\udfdf\udfe0\udfe1\udfe2\udfe3\udfe4\udfe5\udfe6\udfe7\udfe8\udfe9\udfea\udfeb\udfec\udfed\udfee\udfef\udff0\udff1\udff2\udff3\udff4\udff5\udff6\udff7\udff8\udff9\udffa\udffb\udffc\udffd\udffe\udfff'") +except UnicodeDecodeError: + Cs = '' # Jython can't handle isolated surrogates + +Ll = u'abcdefghijklmnopqrstuvwxyz\xaa\xb5\xba\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e\u017f\u0180\u0183\u0185\u0188\u018c\u018d\u0192\u0195\u0199\u019a\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9\u01ba\u01bd\u01be\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233\u0234\u0235\u0236\u0237\u0238\u0239\u023c\u023f\u0240\u0250\u0251\u0252\u0253\u0254\u0255\u0256\u0257\u0258\u0259\u025a\u025b\u025c\u025d\u025e\u025f\u0260\u0261\u0262\u0263\u0264\u0265\u0266\u0267\u0268\u0269\u026a\u026b\u026c\u026d\u026e\u026f\u0270\u0271\u0272\u0273\u0274\u0275\u0276\u0277\u0278\u0279\u027a\u027b\u027c\u027d\u027e\u027f\u0280\u0281\u0282\u0283\u0284\u0285\u0286\u0287\u0288\u0289\u028a\u028b\u028c\u028d\u028e\u028f\u0290\u0291\u0292\u0293\u0294\u0295\u0296\u0297\u0298\u0299\u029a\u029b\u029c\u029d\u029e\u029f\u02a0\u02a1\u02a2\u02a3\u02a4\u02a5\u02a6\u02a7\u02a8\u02a9\u02aa\u02ab\u02ac\u02ad\u02ae\u02af\u0390\u03ac\u03ad\u03ae\u03af\u03b0\u03b1\u03b2\u03b3\u03b4\u03b5\u03b6\u03b7\u03b8\u03b9\u03ba\u03bb\u03bc\u03bd\u03be\u03bf\u03c0\u03c1\u03c2\u03c3\u03c4\u03c5\u03c6\u03c7\u03c8\u03c9\u03ca\u03cb\u03cc\u03cd\u03ce\u03d0\u03d1\u03d5\u03d6\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef\u03f0\u03f1\u03f2\u03f3\u03f5\u03f8\u03fb\u03fc\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0450\u0451\u0452\u0453\u0454\u0455\u0456\u0457\u0458\u0459\u045a\u045b\u045c\u045d\u045e\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0561\u0562\u0563\u0564\u0565\u0566\u0567\u0568\u0569\u056a\u056b\u056c\u056d\u056e\u056f\u0570\u0571\u0572\u0573\u0574\u0575\u0576\u0577\u0578\u0579\u057a\u057b\u057c\u057d\u057e\u057f\u0580\u0581\u0582\u0583\u0584\u0585\u0586\u0587\u1d00\u1d01\u1d02\u1d03\u1d04\u1d05\u1d06\u1d07\u1d08\u1d09\u1d0a\u1d0b\u1d0c\u1d0d\u1d0e\u1d0f\u1d10\u1d11\u1d12\u1d13\u1d14\u1d15\u1d16\u1d17\u1d18\u1d19\u1d1a\u1d1b\u1d1c\u1d1d\u1d1e\u1d1f\u1d20\u1d21\u1d22\u1d23\u1d24\u1d25\u1d26\u1d27\u1d28\u1d29\u1d2a\u1d2b\u1d62\u1d63\u1d64\u1d65\u1d66\u1d67\u1d68\u1d69\u1d6a\u1d6b\u1d6c\u1d6d\u1d6e\u1d6f\u1d70\u1d71\u1d72\u1d73\u1d74\u1d75\u1d76\u1d77\u1d79\u1d7a\u1d7b\u1d7c\u1d7d\u1d7e\u1d7f\u1d80\u1d81\u1d82\u1d83\u1d84\u1d85\u1d86\u1d87\u1d88\u1d89\u1d8a\u1d8b\u1d8c\u1d8d\u1d8e\u1d8f\u1d90\u1d91\u1d92\u1d93\u1d94\u1d95\u1d96\u1d97\u1d98\u1d99\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95\u1e96\u1e97\u1e98\u1e99\u1e9a\u1e9b\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1f00\u1f01\u1f02\u1f03\u1f04\u1f05\u1f06\u1f07\u1f10\u1f11\u1f12\u1f13\u1f14\u1f15\u1f20\u1f21\u1f22\u1f23\u1f24\u1f25\u1f26\u1f27\u1f30\u1f31\u1f32\u1f33\u1f34\u1f35\u1f36\u1f37\u1f40\u1f41\u1f42\u1f43\u1f44\u1f45\u1f50\u1f51\u1f52\u1f53\u1f54\u1f55\u1f56\u1f57\u1f60\u1f61\u1f62\u1f63\u1f64\u1f65\u1f66\u1f67\u1f70\u1f71\u1f72\u1f73\u1f74\u1f75\u1f76\u1f77\u1f78\u1f79\u1f7a\u1f7b\u1f7c\u1f7d\u1f80\u1f81\u1f82\u1f83\u1f84\u1f85\u1f86\u1f87\u1f90\u1f91\u1f92\u1f93\u1f94\u1f95\u1f96\u1f97\u1fa0\u1fa1\u1fa2\u1fa3\u1fa4\u1fa5\u1fa6\u1fa7\u1fb0\u1fb1\u1fb2\u1fb3\u1fb4\u1fb6\u1fb7\u1fbe\u1fc2\u1fc3\u1fc4\u1fc6\u1fc7\u1fd0\u1fd1\u1fd2\u1fd3\u1fd6\u1fd7\u1fe0\u1fe1\u1fe2\u1fe3\u1fe4\u1fe5\u1fe6\u1fe7\u1ff2\u1ff3\u1ff4\u1ff6\u1ff7\u2071\u207f\u210a\u210e\u210f\u2113\u212f\u2134\u2139\u213c\u213d\u2146\u2147\u2148\u2149\u2c30\u2c31\u2c32\u2c33\u2c34\u2c35\u2c36\u2c37\u2c38\u2c39\u2c3a\u2c3b\u2c3c\u2c3d\u2c3e\u2c3f\u2c40\u2c41\u2c42\u2c43\u2c44\u2c45\u2c46\u2c47\u2c48\u2c49\u2c4a\u2c4b\u2c4c\u2c4d\u2c4e\u2c4f\u2c50\u2c51\u2c52\u2c53\u2c54\u2c55\u2c56\u2c57\u2c58\u2c59\u2c5a\u2c5b\u2c5c\u2c5d\u2c5e\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3\u2ce4\u2d00\u2d01\u2d02\u2d03\u2d04\u2d05\u2d06\u2d07\u2d08\u2d09\u2d0a\u2d0b\u2d0c\u2d0d\u2d0e\u2d0f\u2d10\u2d11\u2d12\u2d13\u2d14\u2d15\u2d16\u2d17\u2d18\u2d19\u2d1a\u2d1b\u2d1c\u2d1d\u2d1e\u2d1f\u2d20\u2d21\u2d22\u2d23\u2d24\u2d25\ufb00\ufb01\ufb02\ufb03\ufb04\ufb05\ufb06\ufb13\ufb14\ufb15\ufb16\ufb17\uff41\uff42\uff43\uff44\uff45\uff46\uff47\uff48\uff49\uff4a\uff4b\uff4c\uff4d\uff4e\uff4f\uff50\uff51\uff52\uff53\uff54\uff55\uff56\uff57\uff58\uff59\uff5a' + +Lm = u'\u02b0\u02b1\u02b2\u02b3\u02b4\u02b5\u02b6\u02b7\u02b8\u02b9\u02ba\u02bb\u02bc\u02bd\u02be\u02bf\u02c0\u02c1\u02c6\u02c7\u02c8\u02c9\u02ca\u02cb\u02cc\u02cd\u02ce\u02cf\u02d0\u02d1\u02e0\u02e1\u02e2\u02e3\u02e4\u02ee\u037a\u0559\u0640\u06e5\u06e6\u0e46\u0ec6\u10fc\u17d7\u1843\u1d2c\u1d2d\u1d2e\u1d2f\u1d30\u1d31\u1d32\u1d33\u1d34\u1d35\u1d36\u1d37\u1d38\u1d39\u1d3a\u1d3b\u1d3c\u1d3d\u1d3e\u1d3f\u1d40\u1d41\u1d42\u1d43\u1d44\u1d45\u1d46\u1d47\u1d48\u1d49\u1d4a\u1d4b\u1d4c\u1d4d\u1d4e\u1d4f\u1d50\u1d51\u1d52\u1d53\u1d54\u1d55\u1d56\u1d57\u1d58\u1d59\u1d5a\u1d5b\u1d5c\u1d5d\u1d5e\u1d5f\u1d60\u1d61\u1d78\u1d9b\u1d9c\u1d9d\u1d9e\u1d9f\u1da0\u1da1\u1da2\u1da3\u1da4\u1da5\u1da6\u1da7\u1da8\u1da9\u1daa\u1dab\u1dac\u1dad\u1dae\u1daf\u1db0\u1db1\u1db2\u1db3\u1db4\u1db5\u1db6\u1db7\u1db8\u1db9\u1dba\u1dbb\u1dbc\u1dbd\u1dbe\u1dbf\u2090\u2091\u2092\u2093\u2094\u2d6f\u3005\u3031\u3032\u3033\u3034\u3035\u303b\u309d\u309e\u30fc\u30fd\u30fe\ua015\uff70\uff9e\uff9f' + +Lo = u'\u01bb\u01c0\u01c1\u01c2\u01c3\u05d0\u05d1\u05d2\u05d3\u05d4\u05d5\u05d6\u05d7\u05d8\u05d9\u05da\u05db\u05dc\u05dd\u05de\u05df\u05e0\u05e1\u05e2\u05e3\u05e4\u05e5\u05e6\u05e7\u05e8\u05e9\u05ea\u05f0\u05f1\u05f2\u0621\u0622\u0623\u0624\u0625\u0626\u0627\u0628\u0629\u062a\u062b\u062c\u062d\u062e\u062f\u0630\u0631\u0632\u0633\u0634\u0635\u0636\u0637\u0638\u0639\u063a\u0641\u0642\u0643\u0644\u0645\u0646\u0647\u0648\u0649\u064a\u066e\u066f\u0671\u0672\u0673\u0674\u0675\u0676\u0677\u0678\u0679\u067a\u067b\u067c\u067d\u067e\u067f\u0680\u0681\u0682\u0683\u0684\u0685\u0686\u0687\u0688\u0689\u068a\u068b\u068c\u068d\u068e\u068f\u0690\u0691\u0692\u0693\u0694\u0695\u0696\u0697\u0698\u0699\u069a\u069b\u069c\u069d\u069e\u069f\u06a0\u06a1\u06a2\u06a3\u06a4\u06a5\u06a6\u06a7\u06a8\u06a9\u06aa\u06ab\u06ac\u06ad\u06ae\u06af\u06b0\u06b1\u06b2\u06b3\u06b4\u06b5\u06b6\u06b7\u06b8\u06b9\u06ba\u06bb\u06bc\u06bd\u06be\u06bf\u06c0\u06c1\u06c2\u06c3\u06c4\u06c5\u06c6\u06c7\u06c8\u06c9\u06ca\u06cb\u06cc\u06cd\u06ce\u06cf\u06d0\u06d1\u06d2\u06d3\u06d5\u06ee\u06ef\u06fa\u06fb\u06fc\u06ff\u0710\u0712\u0713\u0714\u0715\u0716\u0717\u0718\u0719\u071a\u071b\u071c\u071d\u071e\u071f\u0720\u0721\u0722\u0723\u0724\u0725\u0726\u0727\u0728\u0729\u072a\u072b\u072c\u072d\u072e\u072f\u074d\u074e\u074f\u0750\u0751\u0752\u0753\u0754\u0755\u0756\u0757\u0758\u0759\u075a\u075b\u075c\u075d\u075e\u075f\u0760\u0761\u0762\u0763\u0764\u0765\u0766\u0767\u0768\u0769\u076a\u076b\u076c\u076d\u0780\u0781\u0782\u0783\u0784\u0785\u0786\u0787\u0788\u0789\u078a\u078b\u078c\u078d\u078e\u078f\u0790\u0791\u0792\u0793\u0794\u0795\u0796\u0797\u0798\u0799\u079a\u079b\u079c\u079d\u079e\u079f\u07a0\u07a1\u07a2\u07a3\u07a4\u07a5\u07b1\u0904\u0905\u0906\u0907\u0908\u0909\u090a\u090b\u090c\u090d\u090e\u090f\u0910\u0911\u0912\u0913\u0914\u0915\u0916\u0917\u0918\u0919\u091a\u091b\u091c\u091d\u091e\u091f\u0920\u0921\u0922\u0923\u0924\u0925\u0926\u0927\u0928\u0929\u092a\u092b\u092c\u092d\u092e\u092f\u0930\u0931\u0932\u0933\u0934\u0935\u0936\u0937\u0938\u0939\u093d\u0950\u0958\u0959\u095a\u095b\u095c\u095d\u095e\u095f\u0960\u0961\u097d\u0985\u0986\u0987\u0988\u0989\u098a\u098b\u098c\u098f\u0990\u0993\u0994\u0995\u0996\u0997\u0998\u0999\u099a\u099b\u099c\u099d\u099e\u099f\u09a0\u09a1\u09a2\u09a3\u09a4\u09a5\u09a6\u09a7\u09a8\u09aa\u09ab\u09ac\u09ad\u09ae\u09af\u09b0\u09b2\u09b6\u09b7\u09b8\u09b9\u09bd\u09ce\u09dc\u09dd\u09df\u09e0\u09e1\u09f0\u09f1\u0a05\u0a06\u0a07\u0a08\u0a09\u0a0a\u0a0f\u0a10\u0a13\u0a14\u0a15\u0a16\u0a17\u0a18\u0a19\u0a1a\u0a1b\u0a1c\u0a1d\u0a1e\u0a1f\u0a20\u0a21\u0a22\u0a23\u0a24\u0a25\u0a26\u0a27\u0a28\u0a2a\u0a2b\u0a2c\u0a2d\u0a2e\u0a2f\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59\u0a5a\u0a5b\u0a5c\u0a5e\u0a72\u0a73\u0a74\u0a85\u0a86\u0a87\u0a88\u0a89\u0a8a\u0a8b\u0a8c\u0a8d\u0a8f\u0a90\u0a91\u0a93\u0a94\u0a95\u0a96\u0a97\u0a98\u0a99\u0a9a\u0a9b\u0a9c\u0a9d\u0a9e\u0a9f\u0aa0\u0aa1\u0aa2\u0aa3\u0aa4\u0aa5\u0aa6\u0aa7\u0aa8\u0aaa\u0aab\u0aac\u0aad\u0aae\u0aaf\u0ab0\u0ab2\u0ab3\u0ab5\u0ab6\u0ab7\u0ab8\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05\u0b06\u0b07\u0b08\u0b09\u0b0a\u0b0b\u0b0c\u0b0f\u0b10\u0b13\u0b14\u0b15\u0b16\u0b17\u0b18\u0b19\u0b1a\u0b1b\u0b1c\u0b1d\u0b1e\u0b1f\u0b20\u0b21\u0b22\u0b23\u0b24\u0b25\u0b26\u0b27\u0b28\u0b2a\u0b2b\u0b2c\u0b2d\u0b2e\u0b2f\u0b30\u0b32\u0b33\u0b35\u0b36\u0b37\u0b38\u0b39\u0b3d\u0b5c\u0b5d\u0b5f\u0b60\u0b61\u0b71\u0b83\u0b85\u0b86\u0b87\u0b88\u0b89\u0b8a\u0b8e\u0b8f\u0b90\u0b92\u0b93\u0b94\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8\u0ba9\u0baa\u0bae\u0baf\u0bb0\u0bb1\u0bb2\u0bb3\u0bb4\u0bb5\u0bb6\u0bb7\u0bb8\u0bb9\u0c05\u0c06\u0c07\u0c08\u0c09\u0c0a\u0c0b\u0c0c\u0c0e\u0c0f\u0c10\u0c12\u0c13\u0c14\u0c15\u0c16\u0c17\u0c18\u0c19\u0c1a\u0c1b\u0c1c\u0c1d\u0c1e\u0c1f\u0c20\u0c21\u0c22\u0c23\u0c24\u0c25\u0c26\u0c27\u0c28\u0c2a\u0c2b\u0c2c\u0c2d\u0c2e\u0c2f\u0c30\u0c31\u0c32\u0c33\u0c35\u0c36\u0c37\u0c38\u0c39\u0c60\u0c61\u0c85\u0c86\u0c87\u0c88\u0c89\u0c8a\u0c8b\u0c8c\u0c8e\u0c8f\u0c90\u0c92\u0c93\u0c94\u0c95\u0c96\u0c97\u0c98\u0c99\u0c9a\u0c9b\u0c9c\u0c9d\u0c9e\u0c9f\u0ca0\u0ca1\u0ca2\u0ca3\u0ca4\u0ca5\u0ca6\u0ca7\u0ca8\u0caa\u0cab\u0cac\u0cad\u0cae\u0caf\u0cb0\u0cb1\u0cb2\u0cb3\u0cb5\u0cb6\u0cb7\u0cb8\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0d05\u0d06\u0d07\u0d08\u0d09\u0d0a\u0d0b\u0d0c\u0d0e\u0d0f\u0d10\u0d12\u0d13\u0d14\u0d15\u0d16\u0d17\u0d18\u0d19\u0d1a\u0d1b\u0d1c\u0d1d\u0d1e\u0d1f\u0d20\u0d21\u0d22\u0d23\u0d24\u0d25\u0d26\u0d27\u0d28\u0d2a\u0d2b\u0d2c\u0d2d\u0d2e\u0d2f\u0d30\u0d31\u0d32\u0d33\u0d34\u0d35\u0d36\u0d37\u0d38\u0d39\u0d60\u0d61\u0d85\u0d86\u0d87\u0d88\u0d89\u0d8a\u0d8b\u0d8c\u0d8d\u0d8e\u0d8f\u0d90\u0d91\u0d92\u0d93\u0d94\u0d95\u0d96\u0d9a\u0d9b\u0d9c\u0d9d\u0d9e\u0d9f\u0da0\u0da1\u0da2\u0da3\u0da4\u0da5\u0da6\u0da7\u0da8\u0da9\u0daa\u0dab\u0dac\u0dad\u0dae\u0daf\u0db0\u0db1\u0db3\u0db4\u0db5\u0db6\u0db7\u0db8\u0db9\u0dba\u0dbb\u0dbd\u0dc0\u0dc1\u0dc2\u0dc3\u0dc4\u0dc5\u0dc6\u0e01\u0e02\u0e03\u0e04\u0e05\u0e06\u0e07\u0e08\u0e09\u0e0a\u0e0b\u0e0c\u0e0d\u0e0e\u0e0f\u0e10\u0e11\u0e12\u0e13\u0e14\u0e15\u0e16\u0e17\u0e18\u0e19\u0e1a\u0e1b\u0e1c\u0e1d\u0e1e\u0e1f\u0e20\u0e21\u0e22\u0e23\u0e24\u0e25\u0e26\u0e27\u0e28\u0e29\u0e2a\u0e2b\u0e2c\u0e2d\u0e2e\u0e2f\u0e30\u0e32\u0e33\u0e40\u0e41\u0e42\u0e43\u0e44\u0e45\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94\u0e95\u0e96\u0e97\u0e99\u0e9a\u0e9b\u0e9c\u0e9d\u0e9e\u0e9f\u0ea1\u0ea2\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead\u0eae\u0eaf\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0\u0ec1\u0ec2\u0ec3\u0ec4\u0edc\u0edd\u0f00\u0f40\u0f41\u0f42\u0f43\u0f44\u0f45\u0f46\u0f47\u0f49\u0f4a\u0f4b\u0f4c\u0f4d\u0f4e\u0f4f\u0f50\u0f51\u0f52\u0f53\u0f54\u0f55\u0f56\u0f57\u0f58\u0f59\u0f5a\u0f5b\u0f5c\u0f5d\u0f5e\u0f5f\u0f60\u0f61\u0f62\u0f63\u0f64\u0f65\u0f66\u0f67\u0f68\u0f69\u0f6a\u0f88\u0f89\u0f8a\u0f8b\u1000\u1001\u1002\u1003\u1004\u1005\u1006\u1007\u1008\u1009\u100a\u100b\u100c\u100d\u100e\u100f\u1010\u1011\u1012\u1013\u1014\u1015\u1016\u1017\u1018\u1019\u101a\u101b\u101c\u101d\u101e\u101f\u1020\u1021\u1023\u1024\u1025\u1026\u1027\u1029\u102a\u1050\u1051\u1052\u1053\u1054\u1055\u10d0\u10d1\u10d2\u10d3\u10d4\u10d5\u10d6\u10d7\u10d8\u10d9\u10da\u10db\u10dc\u10dd\u10de\u10df\u10e0\u10e1\u10e2\u10e3\u10e4\u10e5\u10e6\u10e7\u10e8\u10e9\u10ea\u10eb\u10ec\u10ed\u10ee\u10ef\u10f0\u10f1\u10f2\u10f3\u10f4\u10f5\u10f6\u10f7\u10f8\u10f9\u10fa\u1100\u1101\u1102\u1103\u1104\u1105\u1106\u1107\u1108\u1109\u110a\u110b\u110c\u110d\u110e\u110f\u1110\u1111\u1112\u1113\u1114\u1115\u1116\u1117\u1118\u1119\u111a\u111b\u111c\u111d\u111e\u111f\u1120\u1121\u1122\u1123\u1124\u1125\u1126\u1127\u1128\u1129\u112a\u112b\u112c\u112d\u112e\u112f\u1130\u1131\u1132\u1133\u1134\u1135\u1136\u1137\u1138\u1139\u113a\u113b\u113c\u113d\u113e\u113f\u1140\u1141\u1142\u1143\u1144\u1145\u1146\u1147\u1148\u1149\u114a\u114b\u114c\u114d\u114e\u114f\u1150\u1151\u1152\u1153\u1154\u1155\u1156\u1157\u1158\u1159\u115f\u1160\u1161\u1162\u1163\u1164\u1165\u1166\u1167\u1168\u1169\u116a\u116b\u116c\u116d\u116e\u116f\u1170\u1171\u1172\u1173\u1174\u1175\u1176\u1177\u1178\u1179\u117a\u117b\u117c\u117d\u117e\u117f\u1180\u1181\u1182\u1183\u1184\u1185\u1186\u1187\u1188\u1189\u118a\u118b\u118c\u118d\u118e\u118f\u1190\u1191\u1192\u1193\u1194\u1195\u1196\u1197\u1198\u1199\u119a\u119b\u119c\u119d\u119e\u119f\u11a0\u11a1\u11a2\u11a8\u11a9\u11aa\u11ab\u11ac\u11ad\u11ae\u11af\u11b0\u11b1\u11b2\u11b3\u11b4\u11b5\u11b6\u11b7\u11b8\u11b9\u11ba\u11bb\u11bc\u11bd\u11be\u11bf\u11c0\u11c1\u11c2\u11c3\u11c4\u11c5\u11c6\u11c7\u11c8\u11c9\u11ca\u11cb\u11cc\u11cd\u11ce\u11cf\u11d0\u11d1\u11d2\u11d3\u11d4\u11d5\u11d6\u11d7\u11d8\u11d9\u11da\u11db\u11dc\u11dd\u11de\u11df\u11e0\u11e1\u11e2\u11e3\u11e4\u11e5\u11e6\u11e7\u11e8\u11e9\u11ea\u11eb\u11ec\u11ed\u11ee\u11ef\u11f0\u11f1\u11f2\u11f3\u11f4\u11f5\u11f6\u11f7\u11f8\u11f9\u1200\u1201\u1202\u1203\u1204\u1205\u1206\u1207\u1208\u1209\u120a\u120b\u120c\u120d\u120e\u120f\u1210\u1211\u1212\u1213\u1214\u1215\u1216\u1217\u1218\u1219\u121a\u121b\u121c\u121d\u121e\u121f\u1220\u1221\u1222\u1223\u1224\u1225\u1226\u1227\u1228\u1229\u122a\u122b\u122c\u122d\u122e\u122f\u1230\u1231\u1232\u1233\u1234\u1235\u1236\u1237\u1238\u1239\u123a\u123b\u123c\u123d\u123e\u123f\u1240\u1241\u1242\u1243\u1244\u1245\u1246\u1247\u1248\u124a\u124b\u124c\u124d\u1250\u1251\u1252\u1253\u1254\u1255\u1256\u1258\u125a\u125b\u125c\u125d\u1260\u1261\u1262\u1263\u1264\u1265\u1266\u1267\u1268\u1269\u126a\u126b\u126c\u126d\u126e\u126f\u1270\u1271\u1272\u1273\u1274\u1275\u1276\u1277\u1278\u1279\u127a\u127b\u127c\u127d\u127e\u127f\u1280\u1281\u1282\u1283\u1284\u1285\u1286\u1287\u1288\u128a\u128b\u128c\u128d\u1290\u1291\u1292\u1293\u1294\u1295\u1296\u1297\u1298\u1299\u129a\u129b\u129c\u129d\u129e\u129f\u12a0\u12a1\u12a2\u12a3\u12a4\u12a5\u12a6\u12a7\u12a8\u12a9\u12aa\u12ab\u12ac\u12ad\u12ae\u12af\u12b0\u12b2\u12b3\u12b4\u12b5\u12b8\u12b9\u12ba\u12bb\u12bc\u12bd\u12be\u12c0\u12c2\u12c3\u12c4\u12c5\u12c8\u12c9\u12ca\u12cb\u12cc\u12cd\u12ce\u12cf\u12d0\u12d1\u12d2\u12d3\u12d4\u12d5\u12d6\u12d8\u12d9\u12da\u12db\u12dc\u12dd\u12de\u12df\u12e0\u12e1\u12e2\u12e3\u12e4\u12e5\u12e6\u12e7\u12e8\u12e9\u12ea\u12eb\u12ec\u12ed\u12ee\u12ef\u12f0\u12f1\u12f2\u12f3\u12f4\u12f5\u12f6\u12f7\u12f8\u12f9\u12fa\u12fb\u12fc\u12fd\u12fe\u12ff\u1300\u1301\u1302\u1303\u1304\u1305\u1306\u1307\u1308\u1309\u130a\u130b\u130c\u130d\u130e\u130f\u1310\u1312\u1313\u1314\u1315\u1318\u1319\u131a\u131b\u131c\u131d\u131e\u131f\u1320\u1321\u1322\u1323\u1324\u1325\u1326\u1327\u1328\u1329\u132a\u132b\u132c\u132d\u132e\u132f\u1330\u1331\u1332\u1333\u1334\u1335\u1336\u1337\u1338\u1339\u133a\u133b\u133c\u133d\u133e\u133f\u1340\u1341\u1342\u1343\u1344\u1345\u1346\u1347\u1348\u1349\u134a\u134b\u134c\u134d\u134e\u134f\u1350\u1351\u1352\u1353\u1354\u1355\u1356\u1357\u1358\u1359\u135a\u1380\u1381\u1382\u1383\u1384\u1385\u1386\u1387\u1388\u1389\u138a\u138b\u138c\u138d\u138e\u138f\u13a0\u13a1\u13a2\u13a3\u13a4\u13a5\u13a6\u13a7\u13a8\u13a9\u13aa\u13ab\u13ac\u13ad\u13ae\u13af\u13b0\u13b1\u13b2\u13b3\u13b4\u13b5\u13b6\u13b7\u13b8\u13b9\u13ba\u13bb\u13bc\u13bd\u13be\u13bf\u13c0\u13c1\u13c2\u13c3\u13c4\u13c5\u13c6\u13c7\u13c8\u13c9\u13ca\u13cb\u13cc\u13cd\u13ce\u13cf\u13d0\u13d1\u13d2\u13d3\u13d4\u13d5\u13d6\u13d7\u13d8\u13d9\u13da\u13db\u13dc\u13dd\u13de\u13df\u13e0\u13e1\u13e2\u13e3\u13e4\u13e5\u13e6\u13e7\u13e8\u13e9\u13ea\u13eb\u13ec\u13ed\u13ee\u13ef\u13f0\u13f1\u13f2\u13f3\u13f4\u1401\u1402\u1403\u1404\u1405\u1406\u1407\u1408\u1409\u140a\u140b\u140c\u140d\u140e\u140f\u1410\u1411\u1412\u1413\u1414\u1415\u1416\u1417\u1418\u1419\u141a\u141b\u141c\u141d\u141e\u141f\u1420\u1421\u1422\u1423\u1424\u1425\u1426\u1427\u1428\u1429\u142a\u142b\u142c\u142d\u142e\u142f\u1430\u1431\u1432\u1433\u1434\u1435\u1436\u1437\u1438\u1439\u143a\u143b\u143c\u143d\u143e\u143f\u1440\u1441\u1442\u1443\u1444\u1445\u1446\u1447\u1448\u1449\u144a\u144b\u144c\u144d\u144e\u144f\u1450\u1451\u1452\u1453\u1454\u1455\u1456\u1457\u1458\u1459\u145a\u145b\u145c\u145d\u145e\u145f\u1460\u1461\u1462\u1463\u1464\u1465\u1466\u1467\u1468\u1469\u146a\u146b\u146c\u146d\u146e\u146f\u1470\u1471\u1472\u1473\u1474\u1475\u1476\u1477\u1478\u1479\u147a\u147b\u147c\u147d\u147e\u147f\u1480\u1481\u1482\u1483\u1484\u1485\u1486\u1487\u1488\u1489\u148a\u148b\u148c\u148d\u148e\u148f\u1490\u1491\u1492\u1493\u1494\u1495\u1496\u1497\u1498\u1499\u149a\u149b\u149c\u149d\u149e\u149f\u14a0\u14a1\u14a2\u14a3\u14a4\u14a5\u14a6\u14a7\u14a8\u14a9\u14aa\u14ab\u14ac\u14ad\u14ae\u14af\u14b0\u14b1\u14b2\u14b3\u14b4\u14b5\u14b6\u14b7\u14b8\u14b9\u14ba\u14bb\u14bc\u14bd\u14be\u14bf\u14c0\u14c1\u14c2\u14c3\u14c4\u14c5\u14c6\u14c7\u14c8\u14c9\u14ca\u14cb\u14cc\u14cd\u14ce\u14cf\u14d0\u14d1\u14d2\u14d3\u14d4\u14d5\u14d6\u14d7\u14d8\u14d9\u14da\u14db\u14dc\u14dd\u14de\u14df\u14e0\u14e1\u14e2\u14e3\u14e4\u14e5\u14e6\u14e7\u14e8\u14e9\u14ea\u14eb\u14ec\u14ed\u14ee\u14ef\u14f0\u14f1\u14f2\u14f3\u14f4\u14f5\u14f6\u14f7\u14f8\u14f9\u14fa\u14fb\u14fc\u14fd\u14fe\u14ff\u1500\u1501\u1502\u1503\u1504\u1505\u1506\u1507\u1508\u1509\u150a\u150b\u150c\u150d\u150e\u150f\u1510\u1511\u1512\u1513\u1514\u1515\u1516\u1517\u1518\u1519\u151a\u151b\u151c\u151d\u151e\u151f\u1520\u1521\u1522\u1523\u1524\u1525\u1526\u1527\u1528\u1529\u152a\u152b\u152c\u152d\u152e\u152f\u1530\u1531\u1532\u1533\u1534\u1535\u1536\u1537\u1538\u1539\u153a\u153b\u153c\u153d\u153e\u153f\u1540\u1541\u1542\u1543\u1544\u1545\u1546\u1547\u1548\u1549\u154a\u154b\u154c\u154d\u154e\u154f\u1550\u1551\u1552\u1553\u1554\u1555\u1556\u1557\u1558\u1559\u155a\u155b\u155c\u155d\u155e\u155f\u1560\u1561\u1562\u1563\u1564\u1565\u1566\u1567\u1568\u1569\u156a\u156b\u156c\u156d\u156e\u156f\u1570\u1571\u1572\u1573\u1574\u1575\u1576\u1577\u1578\u1579\u157a\u157b\u157c\u157d\u157e\u157f\u1580\u1581\u1582\u1583\u1584\u1585\u1586\u1587\u1588\u1589\u158a\u158b\u158c\u158d\u158e\u158f\u1590\u1591\u1592\u1593\u1594\u1595\u1596\u1597\u1598\u1599\u159a\u159b\u159c\u159d\u159e\u159f\u15a0\u15a1\u15a2\u15a3\u15a4\u15a5\u15a6\u15a7\u15a8\u15a9\u15aa\u15ab\u15ac\u15ad\u15ae\u15af\u15b0\u15b1\u15b2\u15b3\u15b4\u15b5\u15b6\u15b7\u15b8\u15b9\u15ba\u15bb\u15bc\u15bd\u15be\u15bf\u15c0\u15c1\u15c2\u15c3\u15c4\u15c5\u15c6\u15c7\u15c8\u15c9\u15ca\u15cb\u15cc\u15cd\u15ce\u15cf\u15d0\u15d1\u15d2\u15d3\u15d4\u15d5\u15d6\u15d7\u15d8\u15d9\u15da\u15db\u15dc\u15dd\u15de\u15df\u15e0\u15e1\u15e2\u15e3\u15e4\u15e5\u15e6\u15e7\u15e8\u15e9\u15ea\u15eb\u15ec\u15ed\u15ee\u15ef\u15f0\u15f1\u15f2\u15f3\u15f4\u15f5\u15f6\u15f7\u15f8\u15f9\u15fa\u15fb\u15fc\u15fd\u15fe\u15ff\u1600\u1601\u1602\u1603\u1604\u1605\u1606\u1607\u1608\u1609\u160a\u160b\u160c\u160d\u160e\u160f\u1610\u1611\u1612\u1613\u1614\u1615\u1616\u1617\u1618\u1619\u161a\u161b\u161c\u161d\u161e\u161f\u1620\u1621\u1622\u1623\u1624\u1625\u1626\u1627\u1628\u1629\u162a\u162b\u162c\u162d\u162e\u162f\u1630\u1631\u1632\u1633\u1634\u1635\u1636\u1637\u1638\u1639\u163a\u163b\u163c\u163d\u163e\u163f\u1640\u1641\u1642\u1643\u1644\u1645\u1646\u1647\u1648\u1649\u164a\u164b\u164c\u164d\u164e\u164f\u1650\u1651\u1652\u1653\u1654\u1655\u1656\u1657\u1658\u1659\u165a\u165b\u165c\u165d\u165e\u165f\u1660\u1661\u1662\u1663\u1664\u1665\u1666\u1667\u1668\u1669\u166a\u166b\u166c\u166f\u1670\u1671\u1672\u1673\u1674\u1675\u1676\u1681\u1682\u1683\u1684\u1685\u1686\u1687\u1688\u1689\u168a\u168b\u168c\u168d\u168e\u168f\u1690\u1691\u1692\u1693\u1694\u1695\u1696\u1697\u1698\u1699\u169a\u16a0\u16a1\u16a2\u16a3\u16a4\u16a5\u16a6\u16a7\u16a8\u16a9\u16aa\u16ab\u16ac\u16ad\u16ae\u16af\u16b0\u16b1\u16b2\u16b3\u16b4\u16b5\u16b6\u16b7\u16b8\u16b9\u16ba\u16bb\u16bc\u16bd\u16be\u16bf\u16c0\u16c1\u16c2\u16c3\u16c4\u16c5\u16c6\u16c7\u16c8\u16c9\u16ca\u16cb\u16cc\u16cd\u16ce\u16cf\u16d0\u16d1\u16d2\u16d3\u16d4\u16d5\u16d6\u16d7\u16d8\u16d9\u16da\u16db\u16dc\u16dd\u16de\u16df\u16e0\u16e1\u16e2\u16e3\u16e4\u16e5\u16e6\u16e7\u16e8\u16e9\u16ea\u1700\u1701\u1702\u1703\u1704\u1705\u1706\u1707\u1708\u1709\u170a\u170b\u170c\u170e\u170f\u1710\u1711\u1720\u1721\u1722\u1723\u1724\u1725\u1726\u1727\u1728\u1729\u172a\u172b\u172c\u172d\u172e\u172f\u1730\u1731\u1740\u1741\u1742\u1743\u1744\u1745\u1746\u1747\u1748\u1749\u174a\u174b\u174c\u174d\u174e\u174f\u1750\u1751\u1760\u1761\u1762\u1763\u1764\u1765\u1766\u1767\u1768\u1769\u176a\u176b\u176c\u176e\u176f\u1770\u1780\u1781\u1782\u1783\u1784\u1785\u1786\u1787\u1788\u1789\u178a\u178b\u178c\u178d\u178e\u178f\u1790\u1791\u1792\u1793\u1794\u1795\u1796\u1797\u1798\u1799\u179a\u179b\u179c\u179d\u179e\u179f\u17a0\u17a1\u17a2\u17a3\u17a4\u17a5\u17a6\u17a7\u17a8\u17a9\u17aa\u17ab\u17ac\u17ad\u17ae\u17af\u17b0\u17b1\u17b2\u17b3\u17dc\u1820\u1821\u1822\u1823\u1824\u1825\u1826\u1827\u1828\u1829\u182a\u182b\u182c\u182d\u182e\u182f\u1830\u1831\u1832\u1833\u1834\u1835\u1836\u1837\u1838\u1839\u183a\u183b\u183c\u183d\u183e\u183f\u1840\u1841\u1842\u1844\u1845\u1846\u1847\u1848\u1849\u184a\u184b\u184c\u184d\u184e\u184f\u1850\u1851\u1852\u1853\u1854\u1855\u1856\u1857\u1858\u1859\u185a\u185b\u185c\u185d\u185e\u185f\u1860\u1861\u1862\u1863\u1864\u1865\u1866\u1867\u1868\u1869\u186a\u186b\u186c\u186d\u186e\u186f\u1870\u1871\u1872\u1873\u1874\u1875\u1876\u1877\u1880\u1881\u1882\u1883\u1884\u1885\u1886\u1887\u1888\u1889\u188a\u188b\u188c\u188d\u188e\u188f\u1890\u1891\u1892\u1893\u1894\u1895\u1896\u1897\u1898\u1899\u189a\u189b\u189c\u189d\u189e\u189f\u18a0\u18a1\u18a2\u18a3\u18a4\u18a5\u18a6\u18a7\u18a8\u1900\u1901\u1902\u1903\u1904\u1905\u1906\u1907\u1908\u1909\u190a\u190b\u190c\u190d\u190e\u190f\u1910\u1911\u1912\u1913\u1914\u1915\u1916\u1917\u1918\u1919\u191a\u191b\u191c\u1950\u1951\u1952\u1953\u1954\u1955\u1956\u1957\u1958\u1959\u195a\u195b\u195c\u195d\u195e\u195f\u1960\u1961\u1962\u1963\u1964\u1965\u1966\u1967\u1968\u1969\u196a\u196b\u196c\u196d\u1970\u1971\u1972\u1973\u1974\u1980\u1981\u1982\u1983\u1984\u1985\u1986\u1987\u1988\u1989\u198a\u198b\u198c\u198d\u198e\u198f\u1990\u1991\u1992\u1993\u1994\u1995\u1996\u1997\u1998\u1999\u199a\u199b\u199c\u199d\u199e\u199f\u19a0\u19a1\u19a2\u19a3\u19a4\u19a5\u19a6\u19a7\u19a8\u19a9\u19c1\u19c2\u19c3\u19c4\u19c5\u19c6\u19c7\u1a00\u1a01\u1a02\u1a03\u1a04\u1a05\u1a06\u1a07\u1a08\u1a09\u1a0a\u1a0b\u1a0c\u1a0d\u1a0e\u1a0f\u1a10\u1a11\u1a12\u1a13\u1a14\u1a15\u1a16\u2135\u2136\u2137\u2138\u2d30\u2d31\u2d32\u2d33\u2d34\u2d35\u2d36\u2d37\u2d38\u2d39\u2d3a\u2d3b\u2d3c\u2d3d\u2d3e\u2d3f\u2d40\u2d41\u2d42\u2d43\u2d44\u2d45\u2d46\u2d47\u2d48\u2d49\u2d4a\u2d4b\u2d4c\u2d4d\u2d4e\u2d4f\u2d50\u2d51\u2d52\u2d53\u2d54\u2d55\u2d56\u2d57\u2d58\u2d59\u2d5a\u2d5b\u2d5c\u2d5d\u2d5e\u2d5f\u2d60\u2d61\u2d62\u2d63\u2d64\u2d65\u2d80\u2d81\u2d82\u2d83\u2d84\u2d85\u2d86\u2d87\u2d88\u2d89\u2d8a\u2d8b\u2d8c\u2d8d\u2d8e\u2d8f\u2d90\u2d91\u2d92\u2d93\u2d94\u2d95\u2d96\u2da0\u2da1\u2da2\u2da3\u2da4\u2da5\u2da6\u2da8\u2da9\u2daa\u2dab\u2dac\u2dad\u2dae\u2db0\u2db1\u2db2\u2db3\u2db4\u2db5\u2db6\u2db8\u2db9\u2dba\u2dbb\u2dbc\u2dbd\u2dbe\u2dc0\u2dc1\u2dc2\u2dc3\u2dc4\u2dc5\u2dc6\u2dc8\u2dc9\u2dca\u2dcb\u2dcc\u2dcd\u2dce\u2dd0\u2dd1\u2dd2\u2dd3\u2dd4\u2dd5\u2dd6\u2dd8\u2dd9\u2dda\u2ddb\u2ddc\u2ddd\u2dde\u3006\u303c\u3041\u3042\u3043\u3044\u3045\u3046\u3047\u3048\u3049\u304a\u304b\u304c\u304d\u304e\u304f\u3050\u3051\u3052\u3053\u3054\u3055\u3056\u3057\u3058\u3059\u305a\u305b\u305c\u305d\u305e\u305f\u3060\u3061\u3062\u3063\u3064\u3065\u3066\u3067\u3068\u3069\u306a\u306b\u306c\u306d\u306e\u306f\u3070\u3071\u3072\u3073\u3074\u3075\u3076\u3077\u3078\u3079\u307a\u307b\u307c\u307d\u307e\u307f\u3080\u3081\u3082\u3083\u3084\u3085\u3086\u3087\u3088\u3089\u308a\u308b\u308c\u308d\u308e\u308f\u3090\u3091\u3092\u3093\u3094\u3095\u3096\u309f\u30a1\u30a2\u30a3\u30a4\u30a5\u30a6\u30a7\u30a8\u30a9\u30aa\u30ab\u30ac\u30ad\u30ae\u30af\u30b0\u30b1\u30b2\u30b3\u30b4\u30b5\u30b6\u30b7\u30b8\u30b9\u30ba\u30bb\u30bc\u30bd\u30be\u30bf\u30c0\u30c1\u30c2\u30c3\u30c4\u30c5\u30c6\u30c7\u30c8\u30c9\u30ca\u30cb\u30cc\u30cd\u30ce\u30cf\u30d0\u30d1\u30d2\u30d3\u30d4\u30d5\u30d6\u30d7\u30d8\u30d9\u30da\u30db\u30dc\u30dd\u30de\u30df\u30e0\u30e1\u30e2\u30e3\u30e4\u30e5\u30e6\u30e7\u30e8\u30e9\u30ea\u30eb\u30ec\u30ed\u30ee\u30ef\u30f0\u30f1\u30f2\u30f3\u30f4\u30f5\u30f6\u30f7\u30f8\u30f9\u30fa\u30ff\u3105\u3106\u3107\u3108\u3109\u310a\u310b\u310c\u310d\u310e\u310f\u3110\u3111\u3112\u3113\u3114\u3115\u3116\u3117\u3118\u3119\u311a\u311b\u311c\u311d\u311e\u311f\u3120\u3121\u3122\u3123\u3124\u3125\u3126\u3127\u3128\u3129\u312a\u312b\u312c\u3131\u3132\u3133\u3134\u3135\u3136\u3137\u3138\u3139\u313a\u313b\u313c\u313d\u313e\u313f\u3140\u3141\u3142\u3143\u3144\u3145\u3146\u3147\u3148\u3149\u314a\u314b\u314c\u314d\u314e\u314f\u3150\u3151\u3152\u3153\u3154\u3155\u3156\u3157\u3158\u3159\u315a\u315b\u315c\u315d\u315e\u315f\u3160\u3161\u3162\u3163\u3164\u3165\u3166\u3167\u3168\u3169\u316a\u316b\u316c\u316d\u316e\u316f\u3170\u3171\u3172\u3173\u3174\u3175\u3176\u3177\u3178\u3179\u317a\u317b\u317c\u317d\u317e\u317f\u3180\u3181\u3182\u3183\u3184\u3185\u3186\u3187\u3188\u3189\u318a\u318b\u318c\u318d\u318e\u31a0\u31a1\u31a2\u31a3\u31a4\u31a5\u31a6\u31a7\u31a8\u31a9\u31aa\u31ab\u31ac\u31ad\u31ae\u31af\u31b0\u31b1\u31b2\u31b3\u31b4\u31b5\u31b6\u31b7\u31f0\u31f1\u31f2\u31f3\u31f4\u31f5\u31f6\u31f7\u31f8\u31f9\u31fa\u31fb\u31fc\u31fd\u31fe\u31ff\u3400\u3401\u3402\u3403\u3404\u3405\u3406\u3407\u3408\u3409\u340a\u340b\u340c\u340d\u340e\u340f\u3410\u3411\u3412\u3413\u3414\u3415\u3416\u3417\u3418\u3419\u341a\u341b\u341c\u341d\u341e\u341f\u3420\u3421\u3422\u3423\u3424\u3425\u3426\u3427\u3428\u3429\u342a\u342b\u342c\u342d\u342e\u342f\u3430\u3431\u3432\u3433\u3434\u3435\u3436\u3437\u3438\u3439\u343a\u343b\u343c\u343d\u343e\u343f\u3440\u3441\u3442\u3443\u3444\u3445\u3446\u3447\u3448\u3449\u344a\u344b\u344c\u344d\u344e\u344f\u3450\u3451\u3452\u3453\u3454\u3455\u3456\u3457\u3458\u3459\u345a\u345b\u345c\u345d\u345e\u345f\u3460\u3461\u3462\u3463\u3464\u3465\u3466\u3467\u3468\u3469\u346a\u346b\u346c\u346d\u346e\u346f\u3470\u3471\u3472\u3473\u3474\u3475\u3476\u3477\u3478\u3479\u347a\u347b\u347c\u347d\u347e\u347f\u3480\u3481\u3482\u3483\u3484\u3485\u3486\u3487\u3488\u3489\u348a\u348b\u348c\u348d\u348e\u348f\u3490\u3491\u3492\u3493\u3494\u3495\u3496\u3497\u3498\u3499\u349a\u349b\u349c\u349d\u349e\u349f\u34a0\u34a1\u34a2\u34a3\u34a4\u34a5\u34a6\u34a7\u34a8\u34a9\u34aa\u34ab\u34ac\u34ad\u34ae\u34af\u34b0\u34b1\u34b2\u34b3\u34b4\u34b5\u34b6\u34b7\u34b8\u34b9\u34ba\u34bb\u34bc\u34bd\u34be\u34bf\u34c0\u34c1\u34c2\u34c3\u34c4\u34c5\u34c6\u34c7\u34c8\u34c9\u34ca\u34cb\u34cc\u34cd\u34ce\u34cf\u34d0\u34d1\u34d2\u34d3\u34d4\u34d5\u34d6\u34d7\u34d8\u34d9\u34da\u34db\u34dc\u34dd\u34de\u34df\u34e0\u34e1\u34e2\u34e3\u34e4\u34e5\u34e6\u34e7\u34e8\u34e9\u34ea\u34eb\u34ec\u34ed\u34ee\u34ef\u34f0\u34f1\u34f2\u34f3\u34f4\u34f5\u34f6\u34f7\u34f8\u34f9\u34fa\u34fb\u34fc\u34fd\u34fe\u34ff\u3500\u3501\u3502\u3503\u3504\u3505\u3506\u3507\u3508\u3509\u350a\u350b\u350c\u350d\u350e\u350f\u3510\u3511\u3512\u3513\u3514\u3515\u3516\u3517\u3518\u3519\u351a\u351b\u351c\u351d\u351e\u351f\u3520\u3521\u3522\u3523\u3524\u3525\u3526\u3527\u3528\u3529\u352a\u352b\u352c\u352d\u352e\u352f\u3530\u3531\u3532\u3533\u3534\u3535\u3536\u3537\u3538\u3539\u353a\u353b\u353c\u353d\u353e\u353f\u3540\u3541\u3542\u3543\u3544\u3545\u3546\u3547\u3548\u3549\u354a\u354b\u354c\u354d\u354e\u354f\u3550\u3551\u3552\u3553\u3554\u3555\u3556\u3557\u3558\u3559\u355a\u355b\u355c\u355d\u355e\u355f\u3560\u3561\u3562\u3563\u3564\u3565\u3566\u3567\u3568\u3569\u356a\u356b\u356c\u356d\u356e\u356f\u3570\u3571\u3572\u3573\u3574\u3575\u3576\u3577\u3578\u3579\u357a\u357b\u357c\u357d\u357e\u357f\u3580\u3581\u3582\u3583\u3584\u3585\u3586\u3587\u3588\u3589\u358a\u358b\u358c\u358d\u358e\u358f\u3590\u3591\u3592\u3593\u3594\u3595\u3596\u3597\u3598\u3599\u359a\u359b\u359c\u359d\u359e\u359f\u35a0\u35a1\u35a2\u35a3\u35a4\u35a5\u35a6\u35a7\u35a8\u35a9\u35aa\u35ab\u35ac\u35ad\u35ae\u35af\u35b0\u35b1\u35b2\u35b3\u35b4\u35b5\u35b6\u35b7\u35b8\u35b9\u35ba\u35bb\u35bc\u35bd\u35be\u35bf\u35c0\u35c1\u35c2\u35c3\u35c4\u35c5\u35c6\u35c7\u35c8\u35c9\u35ca\u35cb\u35cc\u35cd\u35ce\u35cf\u35d0\u35d1\u35d2\u35d3\u35d4\u35d5\u35d6\u35d7\u35d8\u35d9\u35da\u35db\u35dc\u35dd\u35de\u35df\u35e0\u35e1\u35e2\u35e3\u35e4\u35e5\u35e6\u35e7\u35e8\u35e9\u35ea\u35eb\u35ec\u35ed\u35ee\u35ef\u35f0\u35f1\u35f2\u35f3\u35f4\u35f5\u35f6\u35f7\u35f8\u35f9\u35fa\u35fb\u35fc\u35fd\u35fe\u35ff\u3600\u3601\u3602\u3603\u3604\u3605\u3606\u3607\u3608\u3609\u360a\u360b\u360c\u360d\u360e\u360f\u3610\u3611\u3612\u3613\u3614\u3615\u3616\u3617\u3618\u3619\u361a\u361b\u361c\u361d\u361e\u361f\u3620\u3621\u3622\u3623\u3624\u3625\u3626\u3627\u3628\u3629\u362a\u362b\u362c\u362d\u362e\u362f\u3630\u3631\u3632\u3633\u3634\u3635\u3636\u3637\u3638\u3639\u363a\u363b\u363c\u363d\u363e\u363f\u3640\u3641\u3642\u3643\u3644\u3645\u3646\u3647\u3648\u3649\u364a\u364b\u364c\u364d\u364e\u364f\u3650\u3651\u3652\u3653\u3654\u3655\u3656\u3657\u3658\u3659\u365a\u365b\u365c\u365d\u365e\u365f\u3660\u3661\u3662\u3663\u3664\u3665\u3666\u3667\u3668\u3669\u366a\u366b\u366c\u366d\u366e\u366f\u3670\u3671\u3672\u3673\u3674\u3675\u3676\u3677\u3678\u3679\u367a\u367b\u367c\u367d\u367e\u367f\u3680\u3681\u3682\u3683\u3684\u3685\u3686\u3687\u3688\u3689\u368a\u368b\u368c\u368d\u368e\u368f\u3690\u3691\u3692\u3693\u3694\u3695\u3696\u3697\u3698\u3699\u369a\u369b\u369c\u369d\u369e\u369f\u36a0\u36a1\u36a2\u36a3\u36a4\u36a5\u36a6\u36a7\u36a8\u36a9\u36aa\u36ab\u36ac\u36ad\u36ae\u36af\u36b0\u36b1\u36b2\u36b3\u36b4\u36b5\u36b6\u36b7\u36b8\u36b9\u36ba\u36bb\u36bc\u36bd\u36be\u36bf\u36c0\u36c1\u36c2\u36c3\u36c4\u36c5\u36c6\u36c7\u36c8\u36c9\u36ca\u36cb\u36cc\u36cd\u36ce\u36cf\u36d0\u36d1\u36d2\u36d3\u36d4\u36d5\u36d6\u36d7\u36d8\u36d9\u36da\u36db\u36dc\u36dd\u36de\u36df\u36e0\u36e1\u36e2\u36e3\u36e4\u36e5\u36e6\u36e7\u36e8\u36e9\u36ea\u36eb\u36ec\u36ed\u36ee\u36ef\u36f0\u36f1\u36f2\u36f3\u36f4\u36f5\u36f6\u36f7\u36f8\u36f9\u36fa\u36fb\u36fc\u36fd\u36fe\u36ff\u3700\u3701\u3702\u3703\u3704\u3705\u3706\u3707\u3708\u3709\u370a\u370b\u370c\u370d\u370e\u370f\u3710\u3711\u3712\u3713\u3714\u3715\u3716\u3717\u3718\u3719\u371a\u371b\u371c\u371d\u371e\u371f\u3720\u3721\u3722\u3723\u3724\u3725\u3726\u3727\u3728\u3729\u372a\u372b\u372c\u372d\u372e\u372f\u3730\u3731\u3732\u3733\u3734\u3735\u3736\u3737\u3738\u3739\u373a\u373b\u373c\u373d\u373e\u373f\u3740\u3741\u3742\u3743\u3744\u3745\u3746\u3747\u3748\u3749\u374a\u374b\u374c\u374d\u374e\u374f\u3750\u3751\u3752\u3753\u3754\u3755\u3756\u3757\u3758\u3759\u375a\u375b\u375c\u375d\u375e\u375f\u3760\u3761\u3762\u3763\u3764\u3765\u3766\u3767\u3768\u3769\u376a\u376b\u376c\u376d\u376e\u376f\u3770\u3771\u3772\u3773\u3774\u3775\u3776\u3777\u3778\u3779\u377a\u377b\u377c\u377d\u377e\u377f\u3780\u3781\u3782\u3783\u3784\u3785\u3786\u3787\u3788\u3789\u378a\u378b\u378c\u378d\u378e\u378f\u3790\u3791\u3792\u3793\u3794\u3795\u3796\u3797\u3798\u3799\u379a\u379b\u379c\u379d\u379e\u379f\u37a0\u37a1\u37a2\u37a3\u37a4\u37a5\u37a6\u37a7\u37a8\u37a9\u37aa\u37ab\u37ac\u37ad\u37ae\u37af\u37b0\u37b1\u37b2\u37b3\u37b4\u37b5\u37b6\u37b7\u37b8\u37b9\u37ba\u37bb\u37bc\u37bd\u37be\u37bf\u37c0\u37c1\u37c2\u37c3\u37c4\u37c5\u37c6\u37c7\u37c8\u37c9\u37ca\u37cb\u37cc\u37cd\u37ce\u37cf\u37d0\u37d1\u37d2\u37d3\u37d4\u37d5\u37d6\u37d7\u37d8\u37d9\u37da\u37db\u37dc\u37dd\u37de\u37df\u37e0\u37e1\u37e2\u37e3\u37e4\u37e5\u37e6\u37e7\u37e8\u37e9\u37ea\u37eb\u37ec\u37ed\u37ee\u37ef\u37f0\u37f1\u37f2\u37f3\u37f4\u37f5\u37f6\u37f7\u37f8\u37f9\u37fa\u37fb\u37fc\u37fd\u37fe\u37ff\u3800\u3801\u3802\u3803\u3804\u3805\u3806\u3807\u3808\u3809\u380a\u380b\u380c\u380d\u380e\u380f\u3810\u3811\u3812\u3813\u3814\u3815\u3816\u3817\u3818\u3819\u381a\u381b\u381c\u381d\u381e\u381f\u3820\u3821\u3822\u3823\u3824\u3825\u3826\u3827\u3828\u3829\u382a\u382b\u382c\u382d\u382e\u382f\u3830\u3831\u3832\u3833\u3834\u3835\u3836\u3837\u3838\u3839\u383a\u383b\u383c\u383d\u383e\u383f\u3840\u3841\u3842\u3843\u3844\u3845\u3846\u3847\u3848\u3849\u384a\u384b\u384c\u384d\u384e\u384f\u3850\u3851\u3852\u3853\u3854\u3855\u3856\u3857\u3858\u3859\u385a\u385b\u385c\u385d\u385e\u385f\u3860\u3861\u3862\u3863\u3864\u3865\u3866\u3867\u3868\u3869\u386a\u386b\u386c\u386d\u386e\u386f\u3870\u3871\u3872\u3873\u3874\u3875\u3876\u3877\u3878\u3879\u387a\u387b\u387c\u387d\u387e\u387f\u3880\u3881\u3882\u3883\u3884\u3885\u3886\u3887\u3888\u3889\u388a\u388b\u388c\u388d\u388e\u388f\u3890\u3891\u3892\u3893\u3894\u3895\u3896\u3897\u3898\u3899\u389a\u389b\u389c\u389d\u389e\u389f\u38a0\u38a1\u38a2\u38a3\u38a4\u38a5\u38a6\u38a7\u38a8\u38a9\u38aa\u38ab\u38ac\u38ad\u38ae\u38af\u38b0\u38b1\u38b2\u38b3\u38b4\u38b5\u38b6\u38b7\u38b8\u38b9\u38ba\u38bb\u38bc\u38bd\u38be\u38bf\u38c0\u38c1\u38c2\u38c3\u38c4\u38c5\u38c6\u38c7\u38c8\u38c9\u38ca\u38cb\u38cc\u38cd\u38ce\u38cf\u38d0\u38d1\u38d2\u38d3\u38d4\u38d5\u38d6\u38d7\u38d8\u38d9\u38da\u38db\u38dc\u38dd\u38de\u38df\u38e0\u38e1\u38e2\u38e3\u38e4\u38e5\u38e6\u38e7\u38e8\u38e9\u38ea\u38eb\u38ec\u38ed\u38ee\u38ef\u38f0\u38f1\u38f2\u38f3\u38f4\u38f5\u38f6\u38f7\u38f8\u38f9\u38fa\u38fb\u38fc\u38fd\u38fe\u38ff\u3900\u3901\u3902\u3903\u3904\u3905\u3906\u3907\u3908\u3909\u390a\u390b\u390c\u390d\u390e\u390f\u3910\u3911\u3912\u3913\u3914\u3915\u3916\u3917\u3918\u3919\u391a\u391b\u391c\u391d\u391e\u391f\u3920\u3921\u3922\u3923\u3924\u3925\u3926\u3927\u3928\u3929\u392a\u392b\u392c\u392d\u392e\u392f\u3930\u3931\u3932\u3933\u3934\u3935\u3936\u3937\u3938\u3939\u393a\u393b\u393c\u393d\u393e\u393f\u3940\u3941\u3942\u3943\u3944\u3945\u3946\u3947\u3948\u3949\u394a\u394b\u394c\u394d\u394e\u394f\u3950\u3951\u3952\u3953\u3954\u3955\u3956\u3957\u3958\u3959\u395a\u395b\u395c\u395d\u395e\u395f\u3960\u3961\u3962\u3963\u3964\u3965\u3966\u3967\u3968\u3969\u396a\u396b\u396c\u396d\u396e\u396f\u3970\u3971\u3972\u3973\u3974\u3975\u3976\u3977\u3978\u3979\u397a\u397b\u397c\u397d\u397e\u397f\u3980\u3981\u3982\u3983\u3984\u3985\u3986\u3987\u3988\u3989\u398a\u398b\u398c\u398d\u398e\u398f\u3990\u3991\u3992\u3993\u3994\u3995\u3996\u3997\u3998\u3999\u399a\u399b\u399c\u399d\u399e\u399f\u39a0\u39a1\u39a2\u39a3\u39a4\u39a5\u39a6\u39a7\u39a8\u39a9\u39aa\u39ab\u39ac\u39ad\u39ae\u39af\u39b0\u39b1\u39b2\u39b3\u39b4\u39b5\u39b6\u39b7\u39b8\u39b9\u39ba\u39bb\u39bc\u39bd\u39be\u39bf\u39c0\u39c1\u39c2\u39c3\u39c4\u39c5\u39c6\u39c7\u39c8\u39c9\u39ca\u39cb\u39cc\u39cd\u39ce\u39cf\u39d0\u39d1\u39d2\u39d3\u39d4\u39d5\u39d6\u39d7\u39d8\u39d9\u39da\u39db\u39dc\u39dd\u39de\u39df\u39e0\u39e1\u39e2\u39e3\u39e4\u39e5\u39e6\u39e7\u39e8\u39e9\u39ea\u39eb\u39ec\u39ed\u39ee\u39ef\u39f0\u39f1\u39f2\u39f3\u39f4\u39f5\u39f6\u39f7\u39f8\u39f9\u39fa\u39fb\u39fc\u39fd\u39fe\u39ff\u3a00\u3a01\u3a02\u3a03\u3a04\u3a05\u3a06\u3a07\u3a08\u3a09\u3a0a\u3a0b\u3a0c\u3a0d\u3a0e\u3a0f\u3a10\u3a11\u3a12\u3a13\u3a14\u3a15\u3a16\u3a17\u3a18\u3a19\u3a1a\u3a1b\u3a1c\u3a1d\u3a1e\u3a1f\u3a20\u3a21\u3a22\u3a23\u3a24\u3a25\u3a26\u3a27\u3a28\u3a29\u3a2a\u3a2b\u3a2c\u3a2d\u3a2e\u3a2f\u3a30\u3a31\u3a32\u3a33\u3a34\u3a35\u3a36\u3a37\u3a38\u3a39\u3a3a\u3a3b\u3a3c\u3a3d\u3a3e\u3a3f\u3a40\u3a41\u3a42\u3a43\u3a44\u3a45\u3a46\u3a47\u3a48\u3a49\u3a4a\u3a4b\u3a4c\u3a4d\u3a4e\u3a4f\u3a50\u3a51\u3a52\u3a53\u3a54\u3a55\u3a56\u3a57\u3a58\u3a59\u3a5a\u3a5b\u3a5c\u3a5d\u3a5e\u3a5f\u3a60\u3a61\u3a62\u3a63\u3a64\u3a65\u3a66\u3a67\u3a68\u3a69\u3a6a\u3a6b\u3a6c\u3a6d\u3a6e\u3a6f\u3a70\u3a71\u3a72\u3a73\u3a74\u3a75\u3a76\u3a77\u3a78\u3a79\u3a7a\u3a7b\u3a7c\u3a7d\u3a7e\u3a7f\u3a80\u3a81\u3a82\u3a83\u3a84\u3a85\u3a86\u3a87\u3a88\u3a89\u3a8a\u3a8b\u3a8c\u3a8d\u3a8e\u3a8f\u3a90\u3a91\u3a92\u3a93\u3a94\u3a95\u3a96\u3a97\u3a98\u3a99\u3a9a\u3a9b\u3a9c\u3a9d\u3a9e\u3a9f\u3aa0\u3aa1\u3aa2\u3aa3\u3aa4\u3aa5\u3aa6\u3aa7\u3aa8\u3aa9\u3aaa\u3aab\u3aac\u3aad\u3aae\u3aaf\u3ab0\u3ab1\u3ab2\u3ab3\u3ab4\u3ab5\u3ab6\u3ab7\u3ab8\u3ab9\u3aba\u3abb\u3abc\u3abd\u3abe\u3abf\u3ac0\u3ac1\u3ac2\u3ac3\u3ac4\u3ac5\u3ac6\u3ac7\u3ac8\u3ac9\u3aca\u3acb\u3acc\u3acd\u3ace\u3acf\u3ad0\u3ad1\u3ad2\u3ad3\u3ad4\u3ad5\u3ad6\u3ad7\u3ad8\u3ad9\u3ada\u3adb\u3adc\u3add\u3ade\u3adf\u3ae0\u3ae1\u3ae2\u3ae3\u3ae4\u3ae5\u3ae6\u3ae7\u3ae8\u3ae9\u3aea\u3aeb\u3aec\u3aed\u3aee\u3aef\u3af0\u3af1\u3af2\u3af3\u3af4\u3af5\u3af6\u3af7\u3af8\u3af9\u3afa\u3afb\u3afc\u3afd\u3afe\u3aff\u3b00\u3b01\u3b02\u3b03\u3b04\u3b05\u3b06\u3b07\u3b08\u3b09\u3b0a\u3b0b\u3b0c\u3b0d\u3b0e\u3b0f\u3b10\u3b11\u3b12\u3b13\u3b14\u3b15\u3b16\u3b17\u3b18\u3b19\u3b1a\u3b1b\u3b1c\u3b1d\u3b1e\u3b1f\u3b20\u3b21\u3b22\u3b23\u3b24\u3b25\u3b26\u3b27\u3b28\u3b29\u3b2a\u3b2b\u3b2c\u3b2d\u3b2e\u3b2f\u3b30\u3b31\u3b32\u3b33\u3b34\u3b35\u3b36\u3b37\u3b38\u3b39\u3b3a\u3b3b\u3b3c\u3b3d\u3b3e\u3b3f\u3b40\u3b41\u3b42\u3b43\u3b44\u3b45\u3b46\u3b47\u3b48\u3b49\u3b4a\u3b4b\u3b4c\u3b4d\u3b4e\u3b4f\u3b50\u3b51\u3b52\u3b53\u3b54\u3b55\u3b56\u3b57\u3b58\u3b59\u3b5a\u3b5b\u3b5c\u3b5d\u3b5e\u3b5f\u3b60\u3b61\u3b62\u3b63\u3b64\u3b65\u3b66\u3b67\u3b68\u3b69\u3b6a\u3b6b\u3b6c\u3b6d\u3b6e\u3b6f\u3b70\u3b71\u3b72\u3b73\u3b74\u3b75\u3b76\u3b77\u3b78\u3b79\u3b7a\u3b7b\u3b7c\u3b7d\u3b7e\u3b7f\u3b80\u3b81\u3b82\u3b83\u3b84\u3b85\u3b86\u3b87\u3b88\u3b89\u3b8a\u3b8b\u3b8c\u3b8d\u3b8e\u3b8f\u3b90\u3b91\u3b92\u3b93\u3b94\u3b95\u3b96\u3b97\u3b98\u3b99\u3b9a\u3b9b\u3b9c\u3b9d\u3b9e\u3b9f\u3ba0\u3ba1\u3ba2\u3ba3\u3ba4\u3ba5\u3ba6\u3ba7\u3ba8\u3ba9\u3baa\u3bab\u3bac\u3bad\u3bae\u3baf\u3bb0\u3bb1\u3bb2\u3bb3\u3bb4\u3bb5\u3bb6\u3bb7\u3bb8\u3bb9\u3bba\u3bbb\u3bbc\u3bbd\u3bbe\u3bbf\u3bc0\u3bc1\u3bc2\u3bc3\u3bc4\u3bc5\u3bc6\u3bc7\u3bc8\u3bc9\u3bca\u3bcb\u3bcc\u3bcd\u3bce\u3bcf\u3bd0\u3bd1\u3bd2\u3bd3\u3bd4\u3bd5\u3bd6\u3bd7\u3bd8\u3bd9\u3bda\u3bdb\u3bdc\u3bdd\u3bde\u3bdf\u3be0\u3be1\u3be2\u3be3\u3be4\u3be5\u3be6\u3be7\u3be8\u3be9\u3bea\u3beb\u3bec\u3bed\u3bee\u3bef\u3bf0\u3bf1\u3bf2\u3bf3\u3bf4\u3bf5\u3bf6\u3bf7\u3bf8\u3bf9\u3bfa\u3bfb\u3bfc\u3bfd\u3bfe\u3bff\u3c00\u3c01\u3c02\u3c03\u3c04\u3c05\u3c06\u3c07\u3c08\u3c09\u3c0a\u3c0b\u3c0c\u3c0d\u3c0e\u3c0f\u3c10\u3c11\u3c12\u3c13\u3c14\u3c15\u3c16\u3c17\u3c18\u3c19\u3c1a\u3c1b\u3c1c\u3c1d\u3c1e\u3c1f\u3c20\u3c21\u3c22\u3c23\u3c24\u3c25\u3c26\u3c27\u3c28\u3c29\u3c2a\u3c2b\u3c2c\u3c2d\u3c2e\u3c2f\u3c30\u3c31\u3c32\u3c33\u3c34\u3c35\u3c36\u3c37\u3c38\u3c39\u3c3a\u3c3b\u3c3c\u3c3d\u3c3e\u3c3f\u3c40\u3c41\u3c42\u3c43\u3c44\u3c45\u3c46\u3c47\u3c48\u3c49\u3c4a\u3c4b\u3c4c\u3c4d\u3c4e\u3c4f\u3c50\u3c51\u3c52\u3c53\u3c54\u3c55\u3c56\u3c57\u3c58\u3c59\u3c5a\u3c5b\u3c5c\u3c5d\u3c5e\u3c5f\u3c60\u3c61\u3c62\u3c63\u3c64\u3c65\u3c66\u3c67\u3c68\u3c69\u3c6a\u3c6b\u3c6c\u3c6d\u3c6e\u3c6f\u3c70\u3c71\u3c72\u3c73\u3c74\u3c75\u3c76\u3c77\u3c78\u3c79\u3c7a\u3c7b\u3c7c\u3c7d\u3c7e\u3c7f\u3c80\u3c81\u3c82\u3c83\u3c84\u3c85\u3c86\u3c87\u3c88\u3c89\u3c8a\u3c8b\u3c8c\u3c8d\u3c8e\u3c8f\u3c90\u3c91\u3c92\u3c93\u3c94\u3c95\u3c96\u3c97\u3c98\u3c99\u3c9a\u3c9b\u3c9c\u3c9d\u3c9e\u3c9f\u3ca0\u3ca1\u3ca2\u3ca3\u3ca4\u3ca5\u3ca6\u3ca7\u3ca8\u3ca9\u3caa\u3cab\u3cac\u3cad\u3cae\u3caf\u3cb0\u3cb1\u3cb2\u3cb3\u3cb4\u3cb5\u3cb6\u3cb7\u3cb8\u3cb9\u3cba\u3cbb\u3cbc\u3cbd\u3cbe\u3cbf\u3cc0\u3cc1\u3cc2\u3cc3\u3cc4\u3cc5\u3cc6\u3cc7\u3cc8\u3cc9\u3cca\u3ccb\u3ccc\u3ccd\u3cce\u3ccf\u3cd0\u3cd1\u3cd2\u3cd3\u3cd4\u3cd5\u3cd6\u3cd7\u3cd8\u3cd9\u3cda\u3cdb\u3cdc\u3cdd\u3cde\u3cdf\u3ce0\u3ce1\u3ce2\u3ce3\u3ce4\u3ce5\u3ce6\u3ce7\u3ce8\u3ce9\u3cea\u3ceb\u3cec\u3ced\u3cee\u3cef\u3cf0\u3cf1\u3cf2\u3cf3\u3cf4\u3cf5\u3cf6\u3cf7\u3cf8\u3cf9\u3cfa\u3cfb\u3cfc\u3cfd\u3cfe\u3cff\u3d00\u3d01\u3d02\u3d03\u3d04\u3d05\u3d06\u3d07\u3d08\u3d09\u3d0a\u3d0b\u3d0c\u3d0d\u3d0e\u3d0f\u3d10\u3d11\u3d12\u3d13\u3d14\u3d15\u3d16\u3d17\u3d18\u3d19\u3d1a\u3d1b\u3d1c\u3d1d\u3d1e\u3d1f\u3d20\u3d21\u3d22\u3d23\u3d24\u3d25\u3d26\u3d27\u3d28\u3d29\u3d2a\u3d2b\u3d2c\u3d2d\u3d2e\u3d2f\u3d30\u3d31\u3d32\u3d33\u3d34\u3d35\u3d36\u3d37\u3d38\u3d39\u3d3a\u3d3b\u3d3c\u3d3d\u3d3e\u3d3f\u3d40\u3d41\u3d42\u3d43\u3d44\u3d45\u3d46\u3d47\u3d48\u3d49\u3d4a\u3d4b\u3d4c\u3d4d\u3d4e\u3d4f\u3d50\u3d51\u3d52\u3d53\u3d54\u3d55\u3d56\u3d57\u3d58\u3d59\u3d5a\u3d5b\u3d5c\u3d5d\u3d5e\u3d5f\u3d60\u3d61\u3d62\u3d63\u3d64\u3d65\u3d66\u3d67\u3d68\u3d69\u3d6a\u3d6b\u3d6c\u3d6d\u3d6e\u3d6f\u3d70\u3d71\u3d72\u3d73\u3d74\u3d75\u3d76\u3d77\u3d78\u3d79\u3d7a\u3d7b\u3d7c\u3d7d\u3d7e\u3d7f\u3d80\u3d81\u3d82\u3d83\u3d84\u3d85\u3d86\u3d87\u3d88\u3d89\u3d8a\u3d8b\u3d8c\u3d8d\u3d8e\u3d8f\u3d90\u3d91\u3d92\u3d93\u3d94\u3d95\u3d96\u3d97\u3d98\u3d99\u3d9a\u3d9b\u3d9c\u3d9d\u3d9e\u3d9f\u3da0\u3da1\u3da2\u3da3\u3da4\u3da5\u3da6\u3da7\u3da8\u3da9\u3daa\u3dab\u3dac\u3dad\u3dae\u3daf\u3db0\u3db1\u3db2\u3db3\u3db4\u3db5\u3db6\u3db7\u3db8\u3db9\u3dba\u3dbb\u3dbc\u3dbd\u3dbe\u3dbf\u3dc0\u3dc1\u3dc2\u3dc3\u3dc4\u3dc5\u3dc6\u3dc7\u3dc8\u3dc9\u3dca\u3dcb\u3dcc\u3dcd\u3dce\u3dcf\u3dd0\u3dd1\u3dd2\u3dd3\u3dd4\u3dd5\u3dd6\u3dd7\u3dd8\u3dd9\u3dda\u3ddb\u3ddc\u3ddd\u3dde\u3ddf\u3de0\u3de1\u3de2\u3de3\u3de4\u3de5\u3de6\u3de7\u3de8\u3de9\u3dea\u3deb\u3dec\u3ded\u3dee\u3def\u3df0\u3df1\u3df2\u3df3\u3df4\u3df5\u3df6\u3df7\u3df8\u3df9\u3dfa\u3dfb\u3dfc\u3dfd\u3dfe\u3dff\u3e00\u3e01\u3e02\u3e03\u3e04\u3e05\u3e06\u3e07\u3e08\u3e09\u3e0a\u3e0b\u3e0c\u3e0d\u3e0e\u3e0f\u3e10\u3e11\u3e12\u3e13\u3e14\u3e15\u3e16\u3e17\u3e18\u3e19\u3e1a\u3e1b\u3e1c\u3e1d\u3e1e\u3e1f\u3e20\u3e21\u3e22\u3e23\u3e24\u3e25\u3e26\u3e27\u3e28\u3e29\u3e2a\u3e2b\u3e2c\u3e2d\u3e2e\u3e2f\u3e30\u3e31\u3e32\u3e33\u3e34\u3e35\u3e36\u3e37\u3e38\u3e39\u3e3a\u3e3b\u3e3c\u3e3d\u3e3e\u3e3f\u3e40\u3e41\u3e42\u3e43\u3e44\u3e45\u3e46\u3e47\u3e48\u3e49\u3e4a\u3e4b\u3e4c\u3e4d\u3e4e\u3e4f\u3e50\u3e51\u3e52\u3e53\u3e54\u3e55\u3e56\u3e57\u3e58\u3e59\u3e5a\u3e5b\u3e5c\u3e5d\u3e5e\u3e5f\u3e60\u3e61\u3e62\u3e63\u3e64\u3e65\u3e66\u3e67\u3e68\u3e69\u3e6a\u3e6b\u3e6c\u3e6d\u3e6e\u3e6f\u3e70\u3e71\u3e72\u3e73\u3e74\u3e75\u3e76\u3e77\u3e78\u3e79\u3e7a\u3e7b\u3e7c\u3e7d\u3e7e\u3e7f\u3e80\u3e81\u3e82\u3e83\u3e84\u3e85\u3e86\u3e87\u3e88\u3e89\u3e8a\u3e8b\u3e8c\u3e8d\u3e8e\u3e8f\u3e90\u3e91\u3e92\u3e93\u3e94\u3e95\u3e96\u3e97\u3e98\u3e99\u3e9a\u3e9b\u3e9c\u3e9d\u3e9e\u3e9f\u3ea0\u3ea1\u3ea2\u3ea3\u3ea4\u3ea5\u3ea6\u3ea7\u3ea8\u3ea9\u3eaa\u3eab\u3eac\u3ead\u3eae\u3eaf\u3eb0\u3eb1\u3eb2\u3eb3\u3eb4\u3eb5\u3eb6\u3eb7\u3eb8\u3eb9\u3eba\u3ebb\u3ebc\u3ebd\u3ebe\u3ebf\u3ec0\u3ec1\u3ec2\u3ec3\u3ec4\u3ec5\u3ec6\u3ec7\u3ec8\u3ec9\u3eca\u3ecb\u3ecc\u3ecd\u3ece\u3ecf\u3ed0\u3ed1\u3ed2\u3ed3\u3ed4\u3ed5\u3ed6\u3ed7\u3ed8\u3ed9\u3eda\u3edb\u3edc\u3edd\u3ede\u3edf\u3ee0\u3ee1\u3ee2\u3ee3\u3ee4\u3ee5\u3ee6\u3ee7\u3ee8\u3ee9\u3eea\u3eeb\u3eec\u3eed\u3eee\u3eef\u3ef0\u3ef1\u3ef2\u3ef3\u3ef4\u3ef5\u3ef6\u3ef7\u3ef8\u3ef9\u3efa\u3efb\u3efc\u3efd\u3efe\u3eff\u3f00\u3f01\u3f02\u3f03\u3f04\u3f05\u3f06\u3f07\u3f08\u3f09\u3f0a\u3f0b\u3f0c\u3f0d\u3f0e\u3f0f\u3f10\u3f11\u3f12\u3f13\u3f14\u3f15\u3f16\u3f17\u3f18\u3f19\u3f1a\u3f1b\u3f1c\u3f1d\u3f1e\u3f1f\u3f20\u3f21\u3f22\u3f23\u3f24\u3f25\u3f26\u3f27\u3f28\u3f29\u3f2a\u3f2b\u3f2c\u3f2d\u3f2e\u3f2f\u3f30\u3f31\u3f32\u3f33\u3f34\u3f35\u3f36\u3f37\u3f38\u3f39\u3f3a\u3f3b\u3f3c\u3f3d\u3f3e\u3f3f\u3f40\u3f41\u3f42\u3f43\u3f44\u3f45\u3f46\u3f47\u3f48\u3f49\u3f4a\u3f4b\u3f4c\u3f4d\u3f4e\u3f4f\u3f50\u3f51\u3f52\u3f53\u3f54\u3f55\u3f56\u3f57\u3f58\u3f59\u3f5a\u3f5b\u3f5c\u3f5d\u3f5e\u3f5f\u3f60\u3f61\u3f62\u3f63\u3f64\u3f65\u3f66\u3f67\u3f68\u3f69\u3f6a\u3f6b\u3f6c\u3f6d\u3f6e\u3f6f\u3f70\u3f71\u3f72\u3f73\u3f74\u3f75\u3f76\u3f77\u3f78\u3f79\u3f7a\u3f7b\u3f7c\u3f7d\u3f7e\u3f7f\u3f80\u3f81\u3f82\u3f83\u3f84\u3f85\u3f86\u3f87\u3f88\u3f89\u3f8a\u3f8b\u3f8c\u3f8d\u3f8e\u3f8f\u3f90\u3f91\u3f92\u3f93\u3f94\u3f95\u3f96\u3f97\u3f98\u3f99\u3f9a\u3f9b\u3f9c\u3f9d\u3f9e\u3f9f\u3fa0\u3fa1\u3fa2\u3fa3\u3fa4\u3fa5\u3fa6\u3fa7\u3fa8\u3fa9\u3faa\u3fab\u3fac\u3fad\u3fae\u3faf\u3fb0\u3fb1\u3fb2\u3fb3\u3fb4\u3fb5\u3fb6\u3fb7\u3fb8\u3fb9\u3fba\u3fbb\u3fbc\u3fbd\u3fbe\u3fbf\u3fc0\u3fc1\u3fc2\u3fc3\u3fc4\u3fc5\u3fc6\u3fc7\u3fc8\u3fc9\u3fca\u3fcb\u3fcc\u3fcd\u3fce\u3fcf\u3fd0\u3fd1\u3fd2\u3fd3\u3fd4\u3fd5\u3fd6\u3fd7\u3fd8\u3fd9\u3fda\u3fdb\u3fdc\u3fdd\u3fde\u3fdf\u3fe0\u3fe1\u3fe2\u3fe3\u3fe4\u3fe5\u3fe6\u3fe7\u3fe8\u3fe9\u3fea\u3feb\u3fec\u3fed\u3fee\u3fef\u3ff0\u3ff1\u3ff2\u3ff3\u3ff4\u3ff5\u3ff6\u3ff7\u3ff8\u3ff9\u3ffa\u3ffb\u3ffc\u3ffd\u3ffe\u3fff\u4000\u4001\u4002\u4003\u4004\u4005\u4006\u4007\u4008\u4009\u400a\u400b\u400c\u400d\u400e\u400f\u4010\u4011\u4012\u4013\u4014\u4015\u4016\u4017\u4018\u4019\u401a\u401b\u401c\u401d\u401e\u401f\u4020\u4021\u4022\u4023\u4024\u4025\u4026\u4027\u4028\u4029\u402a\u402b\u402c\u402d\u402e\u402f\u4030\u4031\u4032\u4033\u4034\u4035\u4036\u4037\u4038\u4039\u403a\u403b\u403c\u403d\u403e\u403f\u4040\u4041\u4042\u4043\u4044\u4045\u4046\u4047\u4048\u4049\u404a\u404b\u404c\u404d\u404e\u404f\u4050\u4051\u4052\u4053\u4054\u4055\u4056\u4057\u4058\u4059\u405a\u405b\u405c\u405d\u405e\u405f\u4060\u4061\u4062\u4063\u4064\u4065\u4066\u4067\u4068\u4069\u406a\u406b\u406c\u406d\u406e\u406f\u4070\u4071\u4072\u4073\u4074\u4075\u4076\u4077\u4078\u4079\u407a\u407b\u407c\u407d\u407e\u407f\u4080\u4081\u4082\u4083\u4084\u4085\u4086\u4087\u4088\u4089\u408a\u408b\u408c\u408d\u408e\u408f\u4090\u4091\u4092\u4093\u4094\u4095\u4096\u4097\u4098\u4099\u409a\u409b\u409c\u409d\u409e\u409f\u40a0\u40a1\u40a2\u40a3\u40a4\u40a5\u40a6\u40a7\u40a8\u40a9\u40aa\u40ab\u40ac\u40ad\u40ae\u40af\u40b0\u40b1\u40b2\u40b3\u40b4\u40b5\u40b6\u40b7\u40b8\u40b9\u40ba\u40bb\u40bc\u40bd\u40be\u40bf\u40c0\u40c1\u40c2\u40c3\u40c4\u40c5\u40c6\u40c7\u40c8\u40c9\u40ca\u40cb\u40cc\u40cd\u40ce\u40cf\u40d0\u40d1\u40d2\u40d3\u40d4\u40d5\u40d6\u40d7\u40d8\u40d9\u40da\u40db\u40dc\u40dd\u40de\u40df\u40e0\u40e1\u40e2\u40e3\u40e4\u40e5\u40e6\u40e7\u40e8\u40e9\u40ea\u40eb\u40ec\u40ed\u40ee\u40ef\u40f0\u40f1\u40f2\u40f3\u40f4\u40f5\u40f6\u40f7\u40f8\u40f9\u40fa\u40fb\u40fc\u40fd\u40fe\u40ff\u4100\u4101\u4102\u4103\u4104\u4105\u4106\u4107\u4108\u4109\u410a\u410b\u410c\u410d\u410e\u410f\u4110\u4111\u4112\u4113\u4114\u4115\u4116\u4117\u4118\u4119\u411a\u411b\u411c\u411d\u411e\u411f\u4120\u4121\u4122\u4123\u4124\u4125\u4126\u4127\u4128\u4129\u412a\u412b\u412c\u412d\u412e\u412f\u4130\u4131\u4132\u4133\u4134\u4135\u4136\u4137\u4138\u4139\u413a\u413b\u413c\u413d\u413e\u413f\u4140\u4141\u4142\u4143\u4144\u4145\u4146\u4147\u4148\u4149\u414a\u414b\u414c\u414d\u414e\u414f\u4150\u4151\u4152\u4153\u4154\u4155\u4156\u4157\u4158\u4159\u415a\u415b\u415c\u415d\u415e\u415f\u4160\u4161\u4162\u4163\u4164\u4165\u4166\u4167\u4168\u4169\u416a\u416b\u416c\u416d\u416e\u416f\u4170\u4171\u4172\u4173\u4174\u4175\u4176\u4177\u4178\u4179\u417a\u417b\u417c\u417d\u417e\u417f\u4180\u4181\u4182\u4183\u4184\u4185\u4186\u4187\u4188\u4189\u418a\u418b\u418c\u418d\u418e\u418f\u4190\u4191\u4192\u4193\u4194\u4195\u4196\u4197\u4198\u4199\u419a\u419b\u419c\u419d\u419e\u419f\u41a0\u41a1\u41a2\u41a3\u41a4\u41a5\u41a6\u41a7\u41a8\u41a9\u41aa\u41ab\u41ac\u41ad\u41ae\u41af\u41b0\u41b1\u41b2\u41b3\u41b4\u41b5\u41b6\u41b7\u41b8\u41b9\u41ba\u41bb\u41bc\u41bd\u41be\u41bf\u41c0\u41c1\u41c2\u41c3\u41c4\u41c5\u41c6\u41c7\u41c8\u41c9\u41ca\u41cb\u41cc\u41cd\u41ce\u41cf\u41d0\u41d1\u41d2\u41d3\u41d4\u41d5\u41d6\u41d7\u41d8\u41d9\u41da\u41db\u41dc\u41dd\u41de\u41df\u41e0\u41e1\u41e2\u41e3\u41e4\u41e5\u41e6\u41e7\u41e8\u41e9\u41ea\u41eb\u41ec\u41ed\u41ee\u41ef\u41f0\u41f1\u41f2\u41f3\u41f4\u41f5\u41f6\u41f7\u41f8\u41f9\u41fa\u41fb\u41fc\u41fd\u41fe\u41ff\u4200\u4201\u4202\u4203\u4204\u4205\u4206\u4207\u4208\u4209\u420a\u420b\u420c\u420d\u420e\u420f\u4210\u4211\u4212\u4213\u4214\u4215\u4216\u4217\u4218\u4219\u421a\u421b\u421c\u421d\u421e\u421f\u4220\u4221\u4222\u4223\u4224\u4225\u4226\u4227\u4228\u4229\u422a\u422b\u422c\u422d\u422e\u422f\u4230\u4231\u4232\u4233\u4234\u4235\u4236\u4237\u4238\u4239\u423a\u423b\u423c\u423d\u423e\u423f\u4240\u4241\u4242\u4243\u4244\u4245\u4246\u4247\u4248\u4249\u424a\u424b\u424c\u424d\u424e\u424f\u4250\u4251\u4252\u4253\u4254\u4255\u4256\u4257\u4258\u4259\u425a\u425b\u425c\u425d\u425e\u425f\u4260\u4261\u4262\u4263\u4264\u4265\u4266\u4267\u4268\u4269\u426a\u426b\u426c\u426d\u426e\u426f\u4270\u4271\u4272\u4273\u4274\u4275\u4276\u4277\u4278\u4279\u427a\u427b\u427c\u427d\u427e\u427f\u4280\u4281\u4282\u4283\u4284\u4285\u4286\u4287\u4288\u4289\u428a\u428b\u428c\u428d\u428e\u428f\u4290\u4291\u4292\u4293\u4294\u4295\u4296\u4297\u4298\u4299\u429a\u429b\u429c\u429d\u429e\u429f\u42a0\u42a1\u42a2\u42a3\u42a4\u42a5\u42a6\u42a7\u42a8\u42a9\u42aa\u42ab\u42ac\u42ad\u42ae\u42af\u42b0\u42b1\u42b2\u42b3\u42b4\u42b5\u42b6\u42b7\u42b8\u42b9\u42ba\u42bb\u42bc\u42bd\u42be\u42bf\u42c0\u42c1\u42c2\u42c3\u42c4\u42c5\u42c6\u42c7\u42c8\u42c9\u42ca\u42cb\u42cc\u42cd\u42ce\u42cf\u42d0\u42d1\u42d2\u42d3\u42d4\u42d5\u42d6\u42d7\u42d8\u42d9\u42da\u42db\u42dc\u42dd\u42de\u42df\u42e0\u42e1\u42e2\u42e3\u42e4\u42e5\u42e6\u42e7\u42e8\u42e9\u42ea\u42eb\u42ec\u42ed\u42ee\u42ef\u42f0\u42f1\u42f2\u42f3\u42f4\u42f5\u42f6\u42f7\u42f8\u42f9\u42fa\u42fb\u42fc\u42fd\u42fe\u42ff\u4300\u4301\u4302\u4303\u4304\u4305\u4306\u4307\u4308\u4309\u430a\u430b\u430c\u430d\u430e\u430f\u4310\u4311\u4312\u4313\u4314\u4315\u4316\u4317\u4318\u4319\u431a\u431b\u431c\u431d\u431e\u431f\u4320\u4321\u4322\u4323\u4324\u4325\u4326\u4327\u4328\u4329\u432a\u432b\u432c\u432d\u432e\u432f\u4330\u4331\u4332\u4333\u4334\u4335\u4336\u4337\u4338\u4339\u433a\u433b\u433c\u433d\u433e\u433f\u4340\u4341\u4342\u4343\u4344\u4345\u4346\u4347\u4348\u4349\u434a\u434b\u434c\u434d\u434e\u434f\u4350\u4351\u4352\u4353\u4354\u4355\u4356\u4357\u4358\u4359\u435a\u435b\u435c\u435d\u435e\u435f\u4360\u4361\u4362\u4363\u4364\u4365\u4366\u4367\u4368\u4369\u436a\u436b\u436c\u436d\u436e\u436f\u4370\u4371\u4372\u4373\u4374\u4375\u4376\u4377\u4378\u4379\u437a\u437b\u437c\u437d\u437e\u437f\u4380\u4381\u4382\u4383\u4384\u4385\u4386\u4387\u4388\u4389\u438a\u438b\u438c\u438d\u438e\u438f\u4390\u4391\u4392\u4393\u4394\u4395\u4396\u4397\u4398\u4399\u439a\u439b\u439c\u439d\u439e\u439f\u43a0\u43a1\u43a2\u43a3\u43a4\u43a5\u43a6\u43a7\u43a8\u43a9\u43aa\u43ab\u43ac\u43ad\u43ae\u43af\u43b0\u43b1\u43b2\u43b3\u43b4\u43b5\u43b6\u43b7\u43b8\u43b9\u43ba\u43bb\u43bc\u43bd\u43be\u43bf\u43c0\u43c1\u43c2\u43c3\u43c4\u43c5\u43c6\u43c7\u43c8\u43c9\u43ca\u43cb\u43cc\u43cd\u43ce\u43cf\u43d0\u43d1\u43d2\u43d3\u43d4\u43d5\u43d6\u43d7\u43d8\u43d9\u43da\u43db\u43dc\u43dd\u43de\u43df\u43e0\u43e1\u43e2\u43e3\u43e4\u43e5\u43e6\u43e7\u43e8\u43e9\u43ea\u43eb\u43ec\u43ed\u43ee\u43ef\u43f0\u43f1\u43f2\u43f3\u43f4\u43f5\u43f6\u43f7\u43f8\u43f9\u43fa\u43fb\u43fc\u43fd\u43fe\u43ff\u4400\u4401\u4402\u4403\u4404\u4405\u4406\u4407\u4408\u4409\u440a\u440b\u440c\u440d\u440e\u440f\u4410\u4411\u4412\u4413\u4414\u4415\u4416\u4417\u4418\u4419\u441a\u441b\u441c\u441d\u441e\u441f\u4420\u4421\u4422\u4423\u4424\u4425\u4426\u4427\u4428\u4429\u442a\u442b\u442c\u442d\u442e\u442f\u4430\u4431\u4432\u4433\u4434\u4435\u4436\u4437\u4438\u4439\u443a\u443b\u443c\u443d\u443e\u443f\u4440\u4441\u4442\u4443\u4444\u4445\u4446\u4447\u4448\u4449\u444a\u444b\u444c\u444d\u444e\u444f\u4450\u4451\u4452\u4453\u4454\u4455\u4456\u4457\u4458\u4459\u445a\u445b\u445c\u445d\u445e\u445f\u4460\u4461\u4462\u4463\u4464\u4465\u4466\u4467\u4468\u4469\u446a\u446b\u446c\u446d\u446e\u446f\u4470\u4471\u4472\u4473\u4474\u4475\u4476\u4477\u4478\u4479\u447a\u447b\u447c\u447d\u447e\u447f\u4480\u4481\u4482\u4483\u4484\u4485\u4486\u4487\u4488\u4489\u448a\u448b\u448c\u448d\u448e\u448f\u4490\u4491\u4492\u4493\u4494\u4495\u4496\u4497\u4498\u4499\u449a\u449b\u449c\u449d\u449e\u449f\u44a0\u44a1\u44a2\u44a3\u44a4\u44a5\u44a6\u44a7\u44a8\u44a9\u44aa\u44ab\u44ac\u44ad\u44ae\u44af\u44b0\u44b1\u44b2\u44b3\u44b4\u44b5\u44b6\u44b7\u44b8\u44b9\u44ba\u44bb\u44bc\u44bd\u44be\u44bf\u44c0\u44c1\u44c2\u44c3\u44c4\u44c5\u44c6\u44c7\u44c8\u44c9\u44ca\u44cb\u44cc\u44cd\u44ce\u44cf\u44d0\u44d1\u44d2\u44d3\u44d4\u44d5\u44d6\u44d7\u44d8\u44d9\u44da\u44db\u44dc\u44dd\u44de\u44df\u44e0\u44e1\u44e2\u44e3\u44e4\u44e5\u44e6\u44e7\u44e8\u44e9\u44ea\u44eb\u44ec\u44ed\u44ee\u44ef\u44f0\u44f1\u44f2\u44f3\u44f4\u44f5\u44f6\u44f7\u44f8\u44f9\u44fa\u44fb\u44fc\u44fd\u44fe\u44ff\u4500\u4501\u4502\u4503\u4504\u4505\u4506\u4507\u4508\u4509\u450a\u450b\u450c\u450d\u450e\u450f\u4510\u4511\u4512\u4513\u4514\u4515\u4516\u4517\u4518\u4519\u451a\u451b\u451c\u451d\u451e\u451f\u4520\u4521\u4522\u4523\u4524\u4525\u4526\u4527\u4528\u4529\u452a\u452b\u452c\u452d\u452e\u452f\u4530\u4531\u4532\u4533\u4534\u4535\u4536\u4537\u4538\u4539\u453a\u453b\u453c\u453d\u453e\u453f\u4540\u4541\u4542\u4543\u4544\u4545\u4546\u4547\u4548\u4549\u454a\u454b\u454c\u454d\u454e\u454f\u4550\u4551\u4552\u4553\u4554\u4555\u4556\u4557\u4558\u4559\u455a\u455b\u455c\u455d\u455e\u455f\u4560\u4561\u4562\u4563\u4564\u4565\u4566\u4567\u4568\u4569\u456a\u456b\u456c\u456d\u456e\u456f\u4570\u4571\u4572\u4573\u4574\u4575\u4576\u4577\u4578\u4579\u457a\u457b\u457c\u457d\u457e\u457f\u4580\u4581\u4582\u4583\u4584\u4585\u4586\u4587\u4588\u4589\u458a\u458b\u458c\u458d\u458e\u458f\u4590\u4591\u4592\u4593\u4594\u4595\u4596\u4597\u4598\u4599\u459a\u459b\u459c\u459d\u459e\u459f\u45a0\u45a1\u45a2\u45a3\u45a4\u45a5\u45a6\u45a7\u45a8\u45a9\u45aa\u45ab\u45ac\u45ad\u45ae\u45af\u45b0\u45b1\u45b2\u45b3\u45b4\u45b5\u45b6\u45b7\u45b8\u45b9\u45ba\u45bb\u45bc\u45bd\u45be\u45bf\u45c0\u45c1\u45c2\u45c3\u45c4\u45c5\u45c6\u45c7\u45c8\u45c9\u45ca\u45cb\u45cc\u45cd\u45ce\u45cf\u45d0\u45d1\u45d2\u45d3\u45d4\u45d5\u45d6\u45d7\u45d8\u45d9\u45da\u45db\u45dc\u45dd\u45de\u45df\u45e0\u45e1\u45e2\u45e3\u45e4\u45e5\u45e6\u45e7\u45e8\u45e9\u45ea\u45eb\u45ec\u45ed\u45ee\u45ef\u45f0\u45f1\u45f2\u45f3\u45f4\u45f5\u45f6\u45f7\u45f8\u45f9\u45fa\u45fb\u45fc\u45fd\u45fe\u45ff\u4600\u4601\u4602\u4603\u4604\u4605\u4606\u4607\u4608\u4609\u460a\u460b\u460c\u460d\u460e\u460f\u4610\u4611\u4612\u4613\u4614\u4615\u4616\u4617\u4618\u4619\u461a\u461b\u461c\u461d\u461e\u461f\u4620\u4621\u4622\u4623\u4624\u4625\u4626\u4627\u4628\u4629\u462a\u462b\u462c\u462d\u462e\u462f\u4630\u4631\u4632\u4633\u4634\u4635\u4636\u4637\u4638\u4639\u463a\u463b\u463c\u463d\u463e\u463f\u4640\u4641\u4642\u4643\u4644\u4645\u4646\u4647\u4648\u4649\u464a\u464b\u464c\u464d\u464e\u464f\u4650\u4651\u4652\u4653\u4654\u4655\u4656\u4657\u4658\u4659\u465a\u465b\u465c\u465d\u465e\u465f\u4660\u4661\u4662\u4663\u4664\u4665\u4666\u4667\u4668\u4669\u466a\u466b\u466c\u466d\u466e\u466f\u4670\u4671\u4672\u4673\u4674\u4675\u4676\u4677\u4678\u4679\u467a\u467b\u467c\u467d\u467e\u467f\u4680\u4681\u4682\u4683\u4684\u4685\u4686\u4687\u4688\u4689\u468a\u468b\u468c\u468d\u468e\u468f\u4690\u4691\u4692\u4693\u4694\u4695\u4696\u4697\u4698\u4699\u469a\u469b\u469c\u469d\u469e\u469f\u46a0\u46a1\u46a2\u46a3\u46a4\u46a5\u46a6\u46a7\u46a8\u46a9\u46aa\u46ab\u46ac\u46ad\u46ae\u46af\u46b0\u46b1\u46b2\u46b3\u46b4\u46b5\u46b6\u46b7\u46b8\u46b9\u46ba\u46bb\u46bc\u46bd\u46be\u46bf\u46c0\u46c1\u46c2\u46c3\u46c4\u46c5\u46c6\u46c7\u46c8\u46c9\u46ca\u46cb\u46cc\u46cd\u46ce\u46cf\u46d0\u46d1\u46d2\u46d3\u46d4\u46d5\u46d6\u46d7\u46d8\u46d9\u46da\u46db\u46dc\u46dd\u46de\u46df\u46e0\u46e1\u46e2\u46e3\u46e4\u46e5\u46e6\u46e7\u46e8\u46e9\u46ea\u46eb\u46ec\u46ed\u46ee\u46ef\u46f0\u46f1\u46f2\u46f3\u46f4\u46f5\u46f6\u46f7\u46f8\u46f9\u46fa\u46fb\u46fc\u46fd\u46fe\u46ff\u4700\u4701\u4702\u4703\u4704\u4705\u4706\u4707\u4708\u4709\u470a\u470b\u470c\u470d\u470e\u470f\u4710\u4711\u4712\u4713\u4714\u4715\u4716\u4717\u4718\u4719\u471a\u471b\u471c\u471d\u471e\u471f\u4720\u4721\u4722\u4723\u4724\u4725\u4726\u4727\u4728\u4729\u472a\u472b\u472c\u472d\u472e\u472f\u4730\u4731\u4732\u4733\u4734\u4735\u4736\u4737\u4738\u4739\u473a\u473b\u473c\u473d\u473e\u473f\u4740\u4741\u4742\u4743\u4744\u4745\u4746\u4747\u4748\u4749\u474a\u474b\u474c\u474d\u474e\u474f\u4750\u4751\u4752\u4753\u4754\u4755\u4756\u4757\u4758\u4759\u475a\u475b\u475c\u475d\u475e\u475f\u4760\u4761\u4762\u4763\u4764\u4765\u4766\u4767\u4768\u4769\u476a\u476b\u476c\u476d\u476e\u476f\u4770\u4771\u4772\u4773\u4774\u4775\u4776\u4777\u4778\u4779\u477a\u477b\u477c\u477d\u477e\u477f\u4780\u4781\u4782\u4783\u4784\u4785\u4786\u4787\u4788\u4789\u478a\u478b\u478c\u478d\u478e\u478f\u4790\u4791\u4792\u4793\u4794\u4795\u4796\u4797\u4798\u4799\u479a\u479b\u479c\u479d\u479e\u479f\u47a0\u47a1\u47a2\u47a3\u47a4\u47a5\u47a6\u47a7\u47a8\u47a9\u47aa\u47ab\u47ac\u47ad\u47ae\u47af\u47b0\u47b1\u47b2\u47b3\u47b4\u47b5\u47b6\u47b7\u47b8\u47b9\u47ba\u47bb\u47bc\u47bd\u47be\u47bf\u47c0\u47c1\u47c2\u47c3\u47c4\u47c5\u47c6\u47c7\u47c8\u47c9\u47ca\u47cb\u47cc\u47cd\u47ce\u47cf\u47d0\u47d1\u47d2\u47d3\u47d4\u47d5\u47d6\u47d7\u47d8\u47d9\u47da\u47db\u47dc\u47dd\u47de\u47df\u47e0\u47e1\u47e2\u47e3\u47e4\u47e5\u47e6\u47e7\u47e8\u47e9\u47ea\u47eb\u47ec\u47ed\u47ee\u47ef\u47f0\u47f1\u47f2\u47f3\u47f4\u47f5\u47f6\u47f7\u47f8\u47f9\u47fa\u47fb\u47fc\u47fd\u47fe\u47ff\u4800\u4801\u4802\u4803\u4804\u4805\u4806\u4807\u4808\u4809\u480a\u480b\u480c\u480d\u480e\u480f\u4810\u4811\u4812\u4813\u4814\u4815\u4816\u4817\u4818\u4819\u481a\u481b\u481c\u481d\u481e\u481f\u4820\u4821\u4822\u4823\u4824\u4825\u4826\u4827\u4828\u4829\u482a\u482b\u482c\u482d\u482e\u482f\u4830\u4831\u4832\u4833\u4834\u4835\u4836\u4837\u4838\u4839\u483a\u483b\u483c\u483d\u483e\u483f\u4840\u4841\u4842\u4843\u4844\u4845\u4846\u4847\u4848\u4849\u484a\u484b\u484c\u484d\u484e\u484f\u4850\u4851\u4852\u4853\u4854\u4855\u4856\u4857\u4858\u4859\u485a\u485b\u485c\u485d\u485e\u485f\u4860\u4861\u4862\u4863\u4864\u4865\u4866\u4867\u4868\u4869\u486a\u486b\u486c\u486d\u486e\u486f\u4870\u4871\u4872\u4873\u4874\u4875\u4876\u4877\u4878\u4879\u487a\u487b\u487c\u487d\u487e\u487f\u4880\u4881\u4882\u4883\u4884\u4885\u4886\u4887\u4888\u4889\u488a\u488b\u488c\u488d\u488e\u488f\u4890\u4891\u4892\u4893\u4894\u4895\u4896\u4897\u4898\u4899\u489a\u489b\u489c\u489d\u489e\u489f\u48a0\u48a1\u48a2\u48a3\u48a4\u48a5\u48a6\u48a7\u48a8\u48a9\u48aa\u48ab\u48ac\u48ad\u48ae\u48af\u48b0\u48b1\u48b2\u48b3\u48b4\u48b5\u48b6\u48b7\u48b8\u48b9\u48ba\u48bb\u48bc\u48bd\u48be\u48bf\u48c0\u48c1\u48c2\u48c3\u48c4\u48c5\u48c6\u48c7\u48c8\u48c9\u48ca\u48cb\u48cc\u48cd\u48ce\u48cf\u48d0\u48d1\u48d2\u48d3\u48d4\u48d5\u48d6\u48d7\u48d8\u48d9\u48da\u48db\u48dc\u48dd\u48de\u48df\u48e0\u48e1\u48e2\u48e3\u48e4\u48e5\u48e6\u48e7\u48e8\u48e9\u48ea\u48eb\u48ec\u48ed\u48ee\u48ef\u48f0\u48f1\u48f2\u48f3\u48f4\u48f5\u48f6\u48f7\u48f8\u48f9\u48fa\u48fb\u48fc\u48fd\u48fe\u48ff\u4900\u4901\u4902\u4903\u4904\u4905\u4906\u4907\u4908\u4909\u490a\u490b\u490c\u490d\u490e\u490f\u4910\u4911\u4912\u4913\u4914\u4915\u4916\u4917\u4918\u4919\u491a\u491b\u491c\u491d\u491e\u491f\u4920\u4921\u4922\u4923\u4924\u4925\u4926\u4927\u4928\u4929\u492a\u492b\u492c\u492d\u492e\u492f\u4930\u4931\u4932\u4933\u4934\u4935\u4936\u4937\u4938\u4939\u493a\u493b\u493c\u493d\u493e\u493f\u4940\u4941\u4942\u4943\u4944\u4945\u4946\u4947\u4948\u4949\u494a\u494b\u494c\u494d\u494e\u494f\u4950\u4951\u4952\u4953\u4954\u4955\u4956\u4957\u4958\u4959\u495a\u495b\u495c\u495d\u495e\u495f\u4960\u4961\u4962\u4963\u4964\u4965\u4966\u4967\u4968\u4969\u496a\u496b\u496c\u496d\u496e\u496f\u4970\u4971\u4972\u4973\u4974\u4975\u4976\u4977\u4978\u4979\u497a\u497b\u497c\u497d\u497e\u497f\u4980\u4981\u4982\u4983\u4984\u4985\u4986\u4987\u4988\u4989\u498a\u498b\u498c\u498d\u498e\u498f\u4990\u4991\u4992\u4993\u4994\u4995\u4996\u4997\u4998\u4999\u499a\u499b\u499c\u499d\u499e\u499f\u49a0\u49a1\u49a2\u49a3\u49a4\u49a5\u49a6\u49a7\u49a8\u49a9\u49aa\u49ab\u49ac\u49ad\u49ae\u49af\u49b0\u49b1\u49b2\u49b3\u49b4\u49b5\u49b6\u49b7\u49b8\u49b9\u49ba\u49bb\u49bc\u49bd\u49be\u49bf\u49c0\u49c1\u49c2\u49c3\u49c4\u49c5\u49c6\u49c7\u49c8\u49c9\u49ca\u49cb\u49cc\u49cd\u49ce\u49cf\u49d0\u49d1\u49d2\u49d3\u49d4\u49d5\u49d6\u49d7\u49d8\u49d9\u49da\u49db\u49dc\u49dd\u49de\u49df\u49e0\u49e1\u49e2\u49e3\u49e4\u49e5\u49e6\u49e7\u49e8\u49e9\u49ea\u49eb\u49ec\u49ed\u49ee\u49ef\u49f0\u49f1\u49f2\u49f3\u49f4\u49f5\u49f6\u49f7\u49f8\u49f9\u49fa\u49fb\u49fc\u49fd\u49fe\u49ff\u4a00\u4a01\u4a02\u4a03\u4a04\u4a05\u4a06\u4a07\u4a08\u4a09\u4a0a\u4a0b\u4a0c\u4a0d\u4a0e\u4a0f\u4a10\u4a11\u4a12\u4a13\u4a14\u4a15\u4a16\u4a17\u4a18\u4a19\u4a1a\u4a1b\u4a1c\u4a1d\u4a1e\u4a1f\u4a20\u4a21\u4a22\u4a23\u4a24\u4a25\u4a26\u4a27\u4a28\u4a29\u4a2a\u4a2b\u4a2c\u4a2d\u4a2e\u4a2f\u4a30\u4a31\u4a32\u4a33\u4a34\u4a35\u4a36\u4a37\u4a38\u4a39\u4a3a\u4a3b\u4a3c\u4a3d\u4a3e\u4a3f\u4a40\u4a41\u4a42\u4a43\u4a44\u4a45\u4a46\u4a47\u4a48\u4a49\u4a4a\u4a4b\u4a4c\u4a4d\u4a4e\u4a4f\u4a50\u4a51\u4a52\u4a53\u4a54\u4a55\u4a56\u4a57\u4a58\u4a59\u4a5a\u4a5b\u4a5c\u4a5d\u4a5e\u4a5f\u4a60\u4a61\u4a62\u4a63\u4a64\u4a65\u4a66\u4a67\u4a68\u4a69\u4a6a\u4a6b\u4a6c\u4a6d\u4a6e\u4a6f\u4a70\u4a71\u4a72\u4a73\u4a74\u4a75\u4a76\u4a77\u4a78\u4a79\u4a7a\u4a7b\u4a7c\u4a7d\u4a7e\u4a7f\u4a80\u4a81\u4a82\u4a83\u4a84\u4a85\u4a86\u4a87\u4a88\u4a89\u4a8a\u4a8b\u4a8c\u4a8d\u4a8e\u4a8f\u4a90\u4a91\u4a92\u4a93\u4a94\u4a95\u4a96\u4a97\u4a98\u4a99\u4a9a\u4a9b\u4a9c\u4a9d\u4a9e\u4a9f\u4aa0\u4aa1\u4aa2\u4aa3\u4aa4\u4aa5\u4aa6\u4aa7\u4aa8\u4aa9\u4aaa\u4aab\u4aac\u4aad\u4aae\u4aaf\u4ab0\u4ab1\u4ab2\u4ab3\u4ab4\u4ab5\u4ab6\u4ab7\u4ab8\u4ab9\u4aba\u4abb\u4abc\u4abd\u4abe\u4abf\u4ac0\u4ac1\u4ac2\u4ac3\u4ac4\u4ac5\u4ac6\u4ac7\u4ac8\u4ac9\u4aca\u4acb\u4acc\u4acd\u4ace\u4acf\u4ad0\u4ad1\u4ad2\u4ad3\u4ad4\u4ad5\u4ad6\u4ad7\u4ad8\u4ad9\u4ada\u4adb\u4adc\u4add\u4ade\u4adf\u4ae0\u4ae1\u4ae2\u4ae3\u4ae4\u4ae5\u4ae6\u4ae7\u4ae8\u4ae9\u4aea\u4aeb\u4aec\u4aed\u4aee\u4aef\u4af0\u4af1\u4af2\u4af3\u4af4\u4af5\u4af6\u4af7\u4af8\u4af9\u4afa\u4afb\u4afc\u4afd\u4afe\u4aff\u4b00\u4b01\u4b02\u4b03\u4b04\u4b05\u4b06\u4b07\u4b08\u4b09\u4b0a\u4b0b\u4b0c\u4b0d\u4b0e\u4b0f\u4b10\u4b11\u4b12\u4b13\u4b14\u4b15\u4b16\u4b17\u4b18\u4b19\u4b1a\u4b1b\u4b1c\u4b1d\u4b1e\u4b1f\u4b20\u4b21\u4b22\u4b23\u4b24\u4b25\u4b26\u4b27\u4b28\u4b29\u4b2a\u4b2b\u4b2c\u4b2d\u4b2e\u4b2f\u4b30\u4b31\u4b32\u4b33\u4b34\u4b35\u4b36\u4b37\u4b38\u4b39\u4b3a\u4b3b\u4b3c\u4b3d\u4b3e\u4b3f\u4b40\u4b41\u4b42\u4b43\u4b44\u4b45\u4b46\u4b47\u4b48\u4b49\u4b4a\u4b4b\u4b4c\u4b4d\u4b4e\u4b4f\u4b50\u4b51\u4b52\u4b53\u4b54\u4b55\u4b56\u4b57\u4b58\u4b59\u4b5a\u4b5b\u4b5c\u4b5d\u4b5e\u4b5f\u4b60\u4b61\u4b62\u4b63\u4b64\u4b65\u4b66\u4b67\u4b68\u4b69\u4b6a\u4b6b\u4b6c\u4b6d\u4b6e\u4b6f\u4b70\u4b71\u4b72\u4b73\u4b74\u4b75\u4b76\u4b77\u4b78\u4b79\u4b7a\u4b7b\u4b7c\u4b7d\u4b7e\u4b7f\u4b80\u4b81\u4b82\u4b83\u4b84\u4b85\u4b86\u4b87\u4b88\u4b89\u4b8a\u4b8b\u4b8c\u4b8d\u4b8e\u4b8f\u4b90\u4b91\u4b92\u4b93\u4b94\u4b95\u4b96\u4b97\u4b98\u4b99\u4b9a\u4b9b\u4b9c\u4b9d\u4b9e\u4b9f\u4ba0\u4ba1\u4ba2\u4ba3\u4ba4\u4ba5\u4ba6\u4ba7\u4ba8\u4ba9\u4baa\u4bab\u4bac\u4bad\u4bae\u4baf\u4bb0\u4bb1\u4bb2\u4bb3\u4bb4\u4bb5\u4bb6\u4bb7\u4bb8\u4bb9\u4bba\u4bbb\u4bbc\u4bbd\u4bbe\u4bbf\u4bc0\u4bc1\u4bc2\u4bc3\u4bc4\u4bc5\u4bc6\u4bc7\u4bc8\u4bc9\u4bca\u4bcb\u4bcc\u4bcd\u4bce\u4bcf\u4bd0\u4bd1\u4bd2\u4bd3\u4bd4\u4bd5\u4bd6\u4bd7\u4bd8\u4bd9\u4bda\u4bdb\u4bdc\u4bdd\u4bde\u4bdf\u4be0\u4be1\u4be2\u4be3\u4be4\u4be5\u4be6\u4be7\u4be8\u4be9\u4bea\u4beb\u4bec\u4bed\u4bee\u4bef\u4bf0\u4bf1\u4bf2\u4bf3\u4bf4\u4bf5\u4bf6\u4bf7\u4bf8\u4bf9\u4bfa\u4bfb\u4bfc\u4bfd\u4bfe\u4bff\u4c00\u4c01\u4c02\u4c03\u4c04\u4c05\u4c06\u4c07\u4c08\u4c09\u4c0a\u4c0b\u4c0c\u4c0d\u4c0e\u4c0f\u4c10\u4c11\u4c12\u4c13\u4c14\u4c15\u4c16\u4c17\u4c18\u4c19\u4c1a\u4c1b\u4c1c\u4c1d\u4c1e\u4c1f\u4c20\u4c21\u4c22\u4c23\u4c24\u4c25\u4c26\u4c27\u4c28\u4c29\u4c2a\u4c2b\u4c2c\u4c2d\u4c2e\u4c2f\u4c30\u4c31\u4c32\u4c33\u4c34\u4c35\u4c36\u4c37\u4c38\u4c39\u4c3a\u4c3b\u4c3c\u4c3d\u4c3e\u4c3f\u4c40\u4c41\u4c42\u4c43\u4c44\u4c45\u4c46\u4c47\u4c48\u4c49\u4c4a\u4c4b\u4c4c\u4c4d\u4c4e\u4c4f\u4c50\u4c51\u4c52\u4c53\u4c54\u4c55\u4c56\u4c57\u4c58\u4c59\u4c5a\u4c5b\u4c5c\u4c5d\u4c5e\u4c5f\u4c60\u4c61\u4c62\u4c63\u4c64\u4c65\u4c66\u4c67\u4c68\u4c69\u4c6a\u4c6b\u4c6c\u4c6d\u4c6e\u4c6f\u4c70\u4c71\u4c72\u4c73\u4c74\u4c75\u4c76\u4c77\u4c78\u4c79\u4c7a\u4c7b\u4c7c\u4c7d\u4c7e\u4c7f\u4c80\u4c81\u4c82\u4c83\u4c84\u4c85\u4c86\u4c87\u4c88\u4c89\u4c8a\u4c8b\u4c8c\u4c8d\u4c8e\u4c8f\u4c90\u4c91\u4c92\u4c93\u4c94\u4c95\u4c96\u4c97\u4c98\u4c99\u4c9a\u4c9b\u4c9c\u4c9d\u4c9e\u4c9f\u4ca0\u4ca1\u4ca2\u4ca3\u4ca4\u4ca5\u4ca6\u4ca7\u4ca8\u4ca9\u4caa\u4cab\u4cac\u4cad\u4cae\u4caf\u4cb0\u4cb1\u4cb2\u4cb3\u4cb4\u4cb5\u4cb6\u4cb7\u4cb8\u4cb9\u4cba\u4cbb\u4cbc\u4cbd\u4cbe\u4cbf\u4cc0\u4cc1\u4cc2\u4cc3\u4cc4\u4cc5\u4cc6\u4cc7\u4cc8\u4cc9\u4cca\u4ccb\u4ccc\u4ccd\u4cce\u4ccf\u4cd0\u4cd1\u4cd2\u4cd3\u4cd4\u4cd5\u4cd6\u4cd7\u4cd8\u4cd9\u4cda\u4cdb\u4cdc\u4cdd\u4cde\u4cdf\u4ce0\u4ce1\u4ce2\u4ce3\u4ce4\u4ce5\u4ce6\u4ce7\u4ce8\u4ce9\u4cea\u4ceb\u4cec\u4ced\u4cee\u4cef\u4cf0\u4cf1\u4cf2\u4cf3\u4cf4\u4cf5\u4cf6\u4cf7\u4cf8\u4cf9\u4cfa\u4cfb\u4cfc\u4cfd\u4cfe\u4cff\u4d00\u4d01\u4d02\u4d03\u4d04\u4d05\u4d06\u4d07\u4d08\u4d09\u4d0a\u4d0b\u4d0c\u4d0d\u4d0e\u4d0f\u4d10\u4d11\u4d12\u4d13\u4d14\u4d15\u4d16\u4d17\u4d18\u4d19\u4d1a\u4d1b\u4d1c\u4d1d\u4d1e\u4d1f\u4d20\u4d21\u4d22\u4d23\u4d24\u4d25\u4d26\u4d27\u4d28\u4d29\u4d2a\u4d2b\u4d2c\u4d2d\u4d2e\u4d2f\u4d30\u4d31\u4d32\u4d33\u4d34\u4d35\u4d36\u4d37\u4d38\u4d39\u4d3a\u4d3b\u4d3c\u4d3d\u4d3e\u4d3f\u4d40\u4d41\u4d42\u4d43\u4d44\u4d45\u4d46\u4d47\u4d48\u4d49\u4d4a\u4d4b\u4d4c\u4d4d\u4d4e\u4d4f\u4d50\u4d51\u4d52\u4d53\u4d54\u4d55\u4d56\u4d57\u4d58\u4d59\u4d5a\u4d5b\u4d5c\u4d5d\u4d5e\u4d5f\u4d60\u4d61\u4d62\u4d63\u4d64\u4d65\u4d66\u4d67\u4d68\u4d69\u4d6a\u4d6b\u4d6c\u4d6d\u4d6e\u4d6f\u4d70\u4d71\u4d72\u4d73\u4d74\u4d75\u4d76\u4d77\u4d78\u4d79\u4d7a\u4d7b\u4d7c\u4d7d\u4d7e\u4d7f\u4d80\u4d81\u4d82\u4d83\u4d84\u4d85\u4d86\u4d87\u4d88\u4d89\u4d8a\u4d8b\u4d8c\u4d8d\u4d8e\u4d8f\u4d90\u4d91\u4d92\u4d93\u4d94\u4d95\u4d96\u4d97\u4d98\u4d99\u4d9a\u4d9b\u4d9c\u4d9d\u4d9e\u4d9f\u4da0\u4da1\u4da2\u4da3\u4da4\u4da5\u4da6\u4da7\u4da8\u4da9\u4daa\u4dab\u4dac\u4dad\u4dae\u4daf\u4db0\u4db1\u4db2\u4db3\u4db4\u4db5\u4e00\u4e01\u4e02\u4e03\u4e04\u4e05\u4e06\u4e07\u4e08\u4e09\u4e0a\u4e0b\u4e0c\u4e0d\u4e0e\u4e0f\u4e10\u4e11\u4e12\u4e13\u4e14\u4e15\u4e16\u4e17\u4e18\u4e19\u4e1a\u4e1b\u4e1c\u4e1d\u4e1e\u4e1f\u4e20\u4e21\u4e22\u4e23\u4e24\u4e25\u4e26\u4e27\u4e28\u4e29\u4e2a\u4e2b\u4e2c\u4e2d\u4e2e\u4e2f\u4e30\u4e31\u4e32\u4e33\u4e34\u4e35\u4e36\u4e37\u4e38\u4e39\u4e3a\u4e3b\u4e3c\u4e3d\u4e3e\u4e3f\u4e40\u4e41\u4e42\u4e43\u4e44\u4e45\u4e46\u4e47\u4e48\u4e49\u4e4a\u4e4b\u4e4c\u4e4d\u4e4e\u4e4f\u4e50\u4e51\u4e52\u4e53\u4e54\u4e55\u4e56\u4e57\u4e58\u4e59\u4e5a\u4e5b\u4e5c\u4e5d\u4e5e\u4e5f\u4e60\u4e61\u4e62\u4e63\u4e64\u4e65\u4e66\u4e67\u4e68\u4e69\u4e6a\u4e6b\u4e6c\u4e6d\u4e6e\u4e6f\u4e70\u4e71\u4e72\u4e73\u4e74\u4e75\u4e76\u4e77\u4e78\u4e79\u4e7a\u4e7b\u4e7c\u4e7d\u4e7e\u4e7f\u4e80\u4e81\u4e82\u4e83\u4e84\u4e85\u4e86\u4e87\u4e88\u4e89\u4e8a\u4e8b\u4e8c\u4e8d\u4e8e\u4e8f\u4e90\u4e91\u4e92\u4e93\u4e94\u4e95\u4e96\u4e97\u4e98\u4e99\u4e9a\u4e9b\u4e9c\u4e9d\u4e9e\u4e9f\u4ea0\u4ea1\u4ea2\u4ea3\u4ea4\u4ea5\u4ea6\u4ea7\u4ea8\u4ea9\u4eaa\u4eab\u4eac\u4ead\u4eae\u4eaf\u4eb0\u4eb1\u4eb2\u4eb3\u4eb4\u4eb5\u4eb6\u4eb7\u4eb8\u4eb9\u4eba\u4ebb\u4ebc\u4ebd\u4ebe\u4ebf\u4ec0\u4ec1\u4ec2\u4ec3\u4ec4\u4ec5\u4ec6\u4ec7\u4ec8\u4ec9\u4eca\u4ecb\u4ecc\u4ecd\u4ece\u4ecf\u4ed0\u4ed1\u4ed2\u4ed3\u4ed4\u4ed5\u4ed6\u4ed7\u4ed8\u4ed9\u4eda\u4edb\u4edc\u4edd\u4ede\u4edf\u4ee0\u4ee1\u4ee2\u4ee3\u4ee4\u4ee5\u4ee6\u4ee7\u4ee8\u4ee9\u4eea\u4eeb\u4eec\u4eed\u4eee\u4eef\u4ef0\u4ef1\u4ef2\u4ef3\u4ef4\u4ef5\u4ef6\u4ef7\u4ef8\u4ef9\u4efa\u4efb\u4efc\u4efd\u4efe\u4eff\u4f00\u4f01\u4f02\u4f03\u4f04\u4f05\u4f06\u4f07\u4f08\u4f09\u4f0a\u4f0b\u4f0c\u4f0d\u4f0e\u4f0f\u4f10\u4f11\u4f12\u4f13\u4f14\u4f15\u4f16\u4f17\u4f18\u4f19\u4f1a\u4f1b\u4f1c\u4f1d\u4f1e\u4f1f\u4f20\u4f21\u4f22\u4f23\u4f24\u4f25\u4f26\u4f27\u4f28\u4f29\u4f2a\u4f2b\u4f2c\u4f2d\u4f2e\u4f2f\u4f30\u4f31\u4f32\u4f33\u4f34\u4f35\u4f36\u4f37\u4f38\u4f39\u4f3a\u4f3b\u4f3c\u4f3d\u4f3e\u4f3f\u4f40\u4f41\u4f42\u4f43\u4f44\u4f45\u4f46\u4f47\u4f48\u4f49\u4f4a\u4f4b\u4f4c\u4f4d\u4f4e\u4f4f\u4f50\u4f51\u4f52\u4f53\u4f54\u4f55\u4f56\u4f57\u4f58\u4f59\u4f5a\u4f5b\u4f5c\u4f5d\u4f5e\u4f5f\u4f60\u4f61\u4f62\u4f63\u4f64\u4f65\u4f66\u4f67\u4f68\u4f69\u4f6a\u4f6b\u4f6c\u4f6d\u4f6e\u4f6f\u4f70\u4f71\u4f72\u4f73\u4f74\u4f75\u4f76\u4f77\u4f78\u4f79\u4f7a\u4f7b\u4f7c\u4f7d\u4f7e\u4f7f\u4f80\u4f81\u4f82\u4f83\u4f84\u4f85\u4f86\u4f87\u4f88\u4f89\u4f8a\u4f8b\u4f8c\u4f8d\u4f8e\u4f8f\u4f90\u4f91\u4f92\u4f93\u4f94\u4f95\u4f96\u4f97\u4f98\u4f99\u4f9a\u4f9b\u4f9c\u4f9d\u4f9e\u4f9f\u4fa0\u4fa1\u4fa2\u4fa3\u4fa4\u4fa5\u4fa6\u4fa7\u4fa8\u4fa9\u4faa\u4fab\u4fac\u4fad\u4fae\u4faf\u4fb0\u4fb1\u4fb2\u4fb3\u4fb4\u4fb5\u4fb6\u4fb7\u4fb8\u4fb9\u4fba\u4fbb\u4fbc\u4fbd\u4fbe\u4fbf\u4fc0\u4fc1\u4fc2\u4fc3\u4fc4\u4fc5\u4fc6\u4fc7\u4fc8\u4fc9\u4fca\u4fcb\u4fcc\u4fcd\u4fce\u4fcf\u4fd0\u4fd1\u4fd2\u4fd3\u4fd4\u4fd5\u4fd6\u4fd7\u4fd8\u4fd9\u4fda\u4fdb\u4fdc\u4fdd\u4fde\u4fdf\u4fe0\u4fe1\u4fe2\u4fe3\u4fe4\u4fe5\u4fe6\u4fe7\u4fe8\u4fe9\u4fea\u4feb\u4fec\u4fed\u4fee\u4fef\u4ff0\u4ff1\u4ff2\u4ff3\u4ff4\u4ff5\u4ff6\u4ff7\u4ff8\u4ff9\u4ffa\u4ffb\u4ffc\u4ffd\u4ffe\u4fff\u5000\u5001\u5002\u5003\u5004\u5005\u5006\u5007\u5008\u5009\u500a\u500b\u500c\u500d\u500e\u500f\u5010\u5011\u5012\u5013\u5014\u5015\u5016\u5017\u5018\u5019\u501a\u501b\u501c\u501d\u501e\u501f\u5020\u5021\u5022\u5023\u5024\u5025\u5026\u5027\u5028\u5029\u502a\u502b\u502c\u502d\u502e\u502f\u5030\u5031\u5032\u5033\u5034\u5035\u5036\u5037\u5038\u5039\u503a\u503b\u503c\u503d\u503e\u503f\u5040\u5041\u5042\u5043\u5044\u5045\u5046\u5047\u5048\u5049\u504a\u504b\u504c\u504d\u504e\u504f\u5050\u5051\u5052\u5053\u5054\u5055\u5056\u5057\u5058\u5059\u505a\u505b\u505c\u505d\u505e\u505f\u5060\u5061\u5062\u5063\u5064\u5065\u5066\u5067\u5068\u5069\u506a\u506b\u506c\u506d\u506e\u506f\u5070\u5071\u5072\u5073\u5074\u5075\u5076\u5077\u5078\u5079\u507a\u507b\u507c\u507d\u507e\u507f\u5080\u5081\u5082\u5083\u5084\u5085\u5086\u5087\u5088\u5089\u508a\u508b\u508c\u508d\u508e\u508f\u5090\u5091\u5092\u5093\u5094\u5095\u5096\u5097\u5098\u5099\u509a\u509b\u509c\u509d\u509e\u509f\u50a0\u50a1\u50a2\u50a3\u50a4\u50a5\u50a6\u50a7\u50a8\u50a9\u50aa\u50ab\u50ac\u50ad\u50ae\u50af\u50b0\u50b1\u50b2\u50b3\u50b4\u50b5\u50b6\u50b7\u50b8\u50b9\u50ba\u50bb\u50bc\u50bd\u50be\u50bf\u50c0\u50c1\u50c2\u50c3\u50c4\u50c5\u50c6\u50c7\u50c8\u50c9\u50ca\u50cb\u50cc\u50cd\u50ce\u50cf\u50d0\u50d1\u50d2\u50d3\u50d4\u50d5\u50d6\u50d7\u50d8\u50d9\u50da\u50db\u50dc\u50dd\u50de\u50df\u50e0\u50e1\u50e2\u50e3\u50e4\u50e5\u50e6\u50e7\u50e8\u50e9\u50ea\u50eb\u50ec\u50ed\u50ee\u50ef\u50f0\u50f1\u50f2\u50f3\u50f4\u50f5\u50f6\u50f7\u50f8\u50f9\u50fa\u50fb\u50fc\u50fd\u50fe\u50ff\u5100\u5101\u5102\u5103\u5104\u5105\u5106\u5107\u5108\u5109\u510a\u510b\u510c\u510d\u510e\u510f\u5110\u5111\u5112\u5113\u5114\u5115\u5116\u5117\u5118\u5119\u511a\u511b\u511c\u511d\u511e\u511f\u5120\u5121\u5122\u5123\u5124\u5125\u5126\u5127\u5128\u5129\u512a\u512b\u512c\u512d\u512e\u512f\u5130\u5131\u5132\u5133\u5134\u5135\u5136\u5137\u5138\u5139\u513a\u513b\u513c\u513d\u513e\u513f\u5140\u5141\u5142\u5143\u5144\u5145\u5146\u5147\u5148\u5149\u514a\u514b\u514c\u514d\u514e\u514f\u5150\u5151\u5152\u5153\u5154\u5155\u5156\u5157\u5158\u5159\u515a\u515b\u515c\u515d\u515e\u515f\u5160\u5161\u5162\u5163\u5164\u5165\u5166\u5167\u5168\u5169\u516a\u516b\u516c\u516d\u516e\u516f\u5170\u5171\u5172\u5173\u5174\u5175\u5176\u5177\u5178\u5179\u517a\u517b\u517c\u517d\u517e\u517f\u5180\u5181\u5182\u5183\u5184\u5185\u5186\u5187\u5188\u5189\u518a\u518b\u518c\u518d\u518e\u518f\u5190\u5191\u5192\u5193\u5194\u5195\u5196\u5197\u5198\u5199\u519a\u519b\u519c\u519d\u519e\u519f\u51a0\u51a1\u51a2\u51a3\u51a4\u51a5\u51a6\u51a7\u51a8\u51a9\u51aa\u51ab\u51ac\u51ad\u51ae\u51af\u51b0\u51b1\u51b2\u51b3\u51b4\u51b5\u51b6\u51b7\u51b8\u51b9\u51ba\u51bb\u51bc\u51bd\u51be\u51bf\u51c0\u51c1\u51c2\u51c3\u51c4\u51c5\u51c6\u51c7\u51c8\u51c9\u51ca\u51cb\u51cc\u51cd\u51ce\u51cf\u51d0\u51d1\u51d2\u51d3\u51d4\u51d5\u51d6\u51d7\u51d8\u51d9\u51da\u51db\u51dc\u51dd\u51de\u51df\u51e0\u51e1\u51e2\u51e3\u51e4\u51e5\u51e6\u51e7\u51e8\u51e9\u51ea\u51eb\u51ec\u51ed\u51ee\u51ef\u51f0\u51f1\u51f2\u51f3\u51f4\u51f5\u51f6\u51f7\u51f8\u51f9\u51fa\u51fb\u51fc\u51fd\u51fe\u51ff\u5200\u5201\u5202\u5203\u5204\u5205\u5206\u5207\u5208\u5209\u520a\u520b\u520c\u520d\u520e\u520f\u5210\u5211\u5212\u5213\u5214\u5215\u5216\u5217\u5218\u5219\u521a\u521b\u521c\u521d\u521e\u521f\u5220\u5221\u5222\u5223\u5224\u5225\u5226\u5227\u5228\u5229\u522a\u522b\u522c\u522d\u522e\u522f\u5230\u5231\u5232\u5233\u5234\u5235\u5236\u5237\u5238\u5239\u523a\u523b\u523c\u523d\u523e\u523f\u5240\u5241\u5242\u5243\u5244\u5245\u5246\u5247\u5248\u5249\u524a\u524b\u524c\u524d\u524e\u524f\u5250\u5251\u5252\u5253\u5254\u5255\u5256\u5257\u5258\u5259\u525a\u525b\u525c\u525d\u525e\u525f\u5260\u5261\u5262\u5263\u5264\u5265\u5266\u5267\u5268\u5269\u526a\u526b\u526c\u526d\u526e\u526f\u5270\u5271\u5272\u5273\u5274\u5275\u5276\u5277\u5278\u5279\u527a\u527b\u527c\u527d\u527e\u527f\u5280\u5281\u5282\u5283\u5284\u5285\u5286\u5287\u5288\u5289\u528a\u528b\u528c\u528d\u528e\u528f\u5290\u5291\u5292\u5293\u5294\u5295\u5296\u5297\u5298\u5299\u529a\u529b\u529c\u529d\u529e\u529f\u52a0\u52a1\u52a2\u52a3\u52a4\u52a5\u52a6\u52a7\u52a8\u52a9\u52aa\u52ab\u52ac\u52ad\u52ae\u52af\u52b0\u52b1\u52b2\u52b3\u52b4\u52b5\u52b6\u52b7\u52b8\u52b9\u52ba\u52bb\u52bc\u52bd\u52be\u52bf\u52c0\u52c1\u52c2\u52c3\u52c4\u52c5\u52c6\u52c7\u52c8\u52c9\u52ca\u52cb\u52cc\u52cd\u52ce\u52cf\u52d0\u52d1\u52d2\u52d3\u52d4\u52d5\u52d6\u52d7\u52d8\u52d9\u52da\u52db\u52dc\u52dd\u52de\u52df\u52e0\u52e1\u52e2\u52e3\u52e4\u52e5\u52e6\u52e7\u52e8\u52e9\u52ea\u52eb\u52ec\u52ed\u52ee\u52ef\u52f0\u52f1\u52f2\u52f3\u52f4\u52f5\u52f6\u52f7\u52f8\u52f9\u52fa\u52fb\u52fc\u52fd\u52fe\u52ff\u5300\u5301\u5302\u5303\u5304\u5305\u5306\u5307\u5308\u5309\u530a\u530b\u530c\u530d\u530e\u530f\u5310\u5311\u5312\u5313\u5314\u5315\u5316\u5317\u5318\u5319\u531a\u531b\u531c\u531d\u531e\u531f\u5320\u5321\u5322\u5323\u5324\u5325\u5326\u5327\u5328\u5329\u532a\u532b\u532c\u532d\u532e\u532f\u5330\u5331\u5332\u5333\u5334\u5335\u5336\u5337\u5338\u5339\u533a\u533b\u533c\u533d\u533e\u533f\u5340\u5341\u5342\u5343\u5344\u5345\u5346\u5347\u5348\u5349\u534a\u534b\u534c\u534d\u534e\u534f\u5350\u5351\u5352\u5353\u5354\u5355\u5356\u5357\u5358\u5359\u535a\u535b\u535c\u535d\u535e\u535f\u5360\u5361\u5362\u5363\u5364\u5365\u5366\u5367\u5368\u5369\u536a\u536b\u536c\u536d\u536e\u536f\u5370\u5371\u5372\u5373\u5374\u5375\u5376\u5377\u5378\u5379\u537a\u537b\u537c\u537d\u537e\u537f\u5380\u5381\u5382\u5383\u5384\u5385\u5386\u5387\u5388\u5389\u538a\u538b\u538c\u538d\u538e\u538f\u5390\u5391\u5392\u5393\u5394\u5395\u5396\u5397\u5398\u5399\u539a\u539b\u539c\u539d\u539e\u539f\u53a0\u53a1\u53a2\u53a3\u53a4\u53a5\u53a6\u53a7\u53a8\u53a9\u53aa\u53ab\u53ac\u53ad\u53ae\u53af\u53b0\u53b1\u53b2\u53b3\u53b4\u53b5\u53b6\u53b7\u53b8\u53b9\u53ba\u53bb\u53bc\u53bd\u53be\u53bf\u53c0\u53c1\u53c2\u53c3\u53c4\u53c5\u53c6\u53c7\u53c8\u53c9\u53ca\u53cb\u53cc\u53cd\u53ce\u53cf\u53d0\u53d1\u53d2\u53d3\u53d4\u53d5\u53d6\u53d7\u53d8\u53d9\u53da\u53db\u53dc\u53dd\u53de\u53df\u53e0\u53e1\u53e2\u53e3\u53e4\u53e5\u53e6\u53e7\u53e8\u53e9\u53ea\u53eb\u53ec\u53ed\u53ee\u53ef\u53f0\u53f1\u53f2\u53f3\u53f4\u53f5\u53f6\u53f7\u53f8\u53f9\u53fa\u53fb\u53fc\u53fd\u53fe\u53ff\u5400\u5401\u5402\u5403\u5404\u5405\u5406\u5407\u5408\u5409\u540a\u540b\u540c\u540d\u540e\u540f\u5410\u5411\u5412\u5413\u5414\u5415\u5416\u5417\u5418\u5419\u541a\u541b\u541c\u541d\u541e\u541f\u5420\u5421\u5422\u5423\u5424\u5425\u5426\u5427\u5428\u5429\u542a\u542b\u542c\u542d\u542e\u542f\u5430\u5431\u5432\u5433\u5434\u5435\u5436\u5437\u5438\u5439\u543a\u543b\u543c\u543d\u543e\u543f\u5440\u5441\u5442\u5443\u5444\u5445\u5446\u5447\u5448\u5449\u544a\u544b\u544c\u544d\u544e\u544f\u5450\u5451\u5452\u5453\u5454\u5455\u5456\u5457\u5458\u5459\u545a\u545b\u545c\u545d\u545e\u545f\u5460\u5461\u5462\u5463\u5464\u5465\u5466\u5467\u5468\u5469\u546a\u546b\u546c\u546d\u546e\u546f\u5470\u5471\u5472\u5473\u5474\u5475\u5476\u5477\u5478\u5479\u547a\u547b\u547c\u547d\u547e\u547f\u5480\u5481\u5482\u5483\u5484\u5485\u5486\u5487\u5488\u5489\u548a\u548b\u548c\u548d\u548e\u548f\u5490\u5491\u5492\u5493\u5494\u5495\u5496\u5497\u5498\u5499\u549a\u549b\u549c\u549d\u549e\u549f\u54a0\u54a1\u54a2\u54a3\u54a4\u54a5\u54a6\u54a7\u54a8\u54a9\u54aa\u54ab\u54ac\u54ad\u54ae\u54af\u54b0\u54b1\u54b2\u54b3\u54b4\u54b5\u54b6\u54b7\u54b8\u54b9\u54ba\u54bb\u54bc\u54bd\u54be\u54bf\u54c0\u54c1\u54c2\u54c3\u54c4\u54c5\u54c6\u54c7\u54c8\u54c9\u54ca\u54cb\u54cc\u54cd\u54ce\u54cf\u54d0\u54d1\u54d2\u54d3\u54d4\u54d5\u54d6\u54d7\u54d8\u54d9\u54da\u54db\u54dc\u54dd\u54de\u54df\u54e0\u54e1\u54e2\u54e3\u54e4\u54e5\u54e6\u54e7\u54e8\u54e9\u54ea\u54eb\u54ec\u54ed\u54ee\u54ef\u54f0\u54f1\u54f2\u54f3\u54f4\u54f5\u54f6\u54f7\u54f8\u54f9\u54fa\u54fb\u54fc\u54fd\u54fe\u54ff\u5500\u5501\u5502\u5503\u5504\u5505\u5506\u5507\u5508\u5509\u550a\u550b\u550c\u550d\u550e\u550f\u5510\u5511\u5512\u5513\u5514\u5515\u5516\u5517\u5518\u5519\u551a\u551b\u551c\u551d\u551e\u551f\u5520\u5521\u5522\u5523\u5524\u5525\u5526\u5527\u5528\u5529\u552a\u552b\u552c\u552d\u552e\u552f\u5530\u5531\u5532\u5533\u5534\u5535\u5536\u5537\u5538\u5539\u553a\u553b\u553c\u553d\u553e\u553f\u5540\u5541\u5542\u5543\u5544\u5545\u5546\u5547\u5548\u5549\u554a\u554b\u554c\u554d\u554e\u554f\u5550\u5551\u5552\u5553\u5554\u5555\u5556\u5557\u5558\u5559\u555a\u555b\u555c\u555d\u555e\u555f\u5560\u5561\u5562\u5563\u5564\u5565\u5566\u5567\u5568\u5569\u556a\u556b\u556c\u556d\u556e\u556f\u5570\u5571\u5572\u5573\u5574\u5575\u5576\u5577\u5578\u5579\u557a\u557b\u557c\u557d\u557e\u557f\u5580\u5581\u5582\u5583\u5584\u5585\u5586\u5587\u5588\u5589\u558a\u558b\u558c\u558d\u558e\u558f\u5590\u5591\u5592\u5593\u5594\u5595\u5596\u5597\u5598\u5599\u559a\u559b\u559c\u559d\u559e\u559f\u55a0\u55a1\u55a2\u55a3\u55a4\u55a5\u55a6\u55a7\u55a8\u55a9\u55aa\u55ab\u55ac\u55ad\u55ae\u55af\u55b0\u55b1\u55b2\u55b3\u55b4\u55b5\u55b6\u55b7\u55b8\u55b9\u55ba\u55bb\u55bc\u55bd\u55be\u55bf\u55c0\u55c1\u55c2\u55c3\u55c4\u55c5\u55c6\u55c7\u55c8\u55c9\u55ca\u55cb\u55cc\u55cd\u55ce\u55cf\u55d0\u55d1\u55d2\u55d3\u55d4\u55d5\u55d6\u55d7\u55d8\u55d9\u55da\u55db\u55dc\u55dd\u55de\u55df\u55e0\u55e1\u55e2\u55e3\u55e4\u55e5\u55e6\u55e7\u55e8\u55e9\u55ea\u55eb\u55ec\u55ed\u55ee\u55ef\u55f0\u55f1\u55f2\u55f3\u55f4\u55f5\u55f6\u55f7\u55f8\u55f9\u55fa\u55fb\u55fc\u55fd\u55fe\u55ff\u5600\u5601\u5602\u5603\u5604\u5605\u5606\u5607\u5608\u5609\u560a\u560b\u560c\u560d\u560e\u560f\u5610\u5611\u5612\u5613\u5614\u5615\u5616\u5617\u5618\u5619\u561a\u561b\u561c\u561d\u561e\u561f\u5620\u5621\u5622\u5623\u5624\u5625\u5626\u5627\u5628\u5629\u562a\u562b\u562c\u562d\u562e\u562f\u5630\u5631\u5632\u5633\u5634\u5635\u5636\u5637\u5638\u5639\u563a\u563b\u563c\u563d\u563e\u563f\u5640\u5641\u5642\u5643\u5644\u5645\u5646\u5647\u5648\u5649\u564a\u564b\u564c\u564d\u564e\u564f\u5650\u5651\u5652\u5653\u5654\u5655\u5656\u5657\u5658\u5659\u565a\u565b\u565c\u565d\u565e\u565f\u5660\u5661\u5662\u5663\u5664\u5665\u5666\u5667\u5668\u5669\u566a\u566b\u566c\u566d\u566e\u566f\u5670\u5671\u5672\u5673\u5674\u5675\u5676\u5677\u5678\u5679\u567a\u567b\u567c\u567d\u567e\u567f\u5680\u5681\u5682\u5683\u5684\u5685\u5686\u5687\u5688\u5689\u568a\u568b\u568c\u568d\u568e\u568f\u5690\u5691\u5692\u5693\u5694\u5695\u5696\u5697\u5698\u5699\u569a\u569b\u569c\u569d\u569e\u569f\u56a0\u56a1\u56a2\u56a3\u56a4\u56a5\u56a6\u56a7\u56a8\u56a9\u56aa\u56ab\u56ac\u56ad\u56ae\u56af\u56b0\u56b1\u56b2\u56b3\u56b4\u56b5\u56b6\u56b7\u56b8\u56b9\u56ba\u56bb\u56bc\u56bd\u56be\u56bf\u56c0\u56c1\u56c2\u56c3\u56c4\u56c5\u56c6\u56c7\u56c8\u56c9\u56ca\u56cb\u56cc\u56cd\u56ce\u56cf\u56d0\u56d1\u56d2\u56d3\u56d4\u56d5\u56d6\u56d7\u56d8\u56d9\u56da\u56db\u56dc\u56dd\u56de\u56df\u56e0\u56e1\u56e2\u56e3\u56e4\u56e5\u56e6\u56e7\u56e8\u56e9\u56ea\u56eb\u56ec\u56ed\u56ee\u56ef\u56f0\u56f1\u56f2\u56f3\u56f4\u56f5\u56f6\u56f7\u56f8\u56f9\u56fa\u56fb\u56fc\u56fd\u56fe\u56ff\u5700\u5701\u5702\u5703\u5704\u5705\u5706\u5707\u5708\u5709\u570a\u570b\u570c\u570d\u570e\u570f\u5710\u5711\u5712\u5713\u5714\u5715\u5716\u5717\u5718\u5719\u571a\u571b\u571c\u571d\u571e\u571f\u5720\u5721\u5722\u5723\u5724\u5725\u5726\u5727\u5728\u5729\u572a\u572b\u572c\u572d\u572e\u572f\u5730\u5731\u5732\u5733\u5734\u5735\u5736\u5737\u5738\u5739\u573a\u573b\u573c\u573d\u573e\u573f\u5740\u5741\u5742\u5743\u5744\u5745\u5746\u5747\u5748\u5749\u574a\u574b\u574c\u574d\u574e\u574f\u5750\u5751\u5752\u5753\u5754\u5755\u5756\u5757\u5758\u5759\u575a\u575b\u575c\u575d\u575e\u575f\u5760\u5761\u5762\u5763\u5764\u5765\u5766\u5767\u5768\u5769\u576a\u576b\u576c\u576d\u576e\u576f\u5770\u5771\u5772\u5773\u5774\u5775\u5776\u5777\u5778\u5779\u577a\u577b\u577c\u577d\u577e\u577f\u5780\u5781\u5782\u5783\u5784\u5785\u5786\u5787\u5788\u5789\u578a\u578b\u578c\u578d\u578e\u578f\u5790\u5791\u5792\u5793\u5794\u5795\u5796\u5797\u5798\u5799\u579a\u579b\u579c\u579d\u579e\u579f\u57a0\u57a1\u57a2\u57a3\u57a4\u57a5\u57a6\u57a7\u57a8\u57a9\u57aa\u57ab\u57ac\u57ad\u57ae\u57af\u57b0\u57b1\u57b2\u57b3\u57b4\u57b5\u57b6\u57b7\u57b8\u57b9\u57ba\u57bb\u57bc\u57bd\u57be\u57bf\u57c0\u57c1\u57c2\u57c3\u57c4\u57c5\u57c6\u57c7\u57c8\u57c9\u57ca\u57cb\u57cc\u57cd\u57ce\u57cf\u57d0\u57d1\u57d2\u57d3\u57d4\u57d5\u57d6\u57d7\u57d8\u57d9\u57da\u57db\u57dc\u57dd\u57de\u57df\u57e0\u57e1\u57e2\u57e3\u57e4\u57e5\u57e6\u57e7\u57e8\u57e9\u57ea\u57eb\u57ec\u57ed\u57ee\u57ef\u57f0\u57f1\u57f2\u57f3\u57f4\u57f5\u57f6\u57f7\u57f8\u57f9\u57fa\u57fb\u57fc\u57fd\u57fe\u57ff\u5800\u5801\u5802\u5803\u5804\u5805\u5806\u5807\u5808\u5809\u580a\u580b\u580c\u580d\u580e\u580f\u5810\u5811\u5812\u5813\u5814\u5815\u5816\u5817\u5818\u5819\u581a\u581b\u581c\u581d\u581e\u581f\u5820\u5821\u5822\u5823\u5824\u5825\u5826\u5827\u5828\u5829\u582a\u582b\u582c\u582d\u582e\u582f\u5830\u5831\u5832\u5833\u5834\u5835\u5836\u5837\u5838\u5839\u583a\u583b\u583c\u583d\u583e\u583f\u5840\u5841\u5842\u5843\u5844\u5845\u5846\u5847\u5848\u5849\u584a\u584b\u584c\u584d\u584e\u584f\u5850\u5851\u5852\u5853\u5854\u5855\u5856\u5857\u5858\u5859\u585a\u585b\u585c\u585d\u585e\u585f\u5860\u5861\u5862\u5863\u5864\u5865\u5866\u5867\u5868\u5869\u586a\u586b\u586c\u586d\u586e\u586f\u5870\u5871\u5872\u5873\u5874\u5875\u5876\u5877\u5878\u5879\u587a\u587b\u587c\u587d\u587e\u587f\u5880\u5881\u5882\u5883\u5884\u5885\u5886\u5887\u5888\u5889\u588a\u588b\u588c\u588d\u588e\u588f\u5890\u5891\u5892\u5893\u5894\u5895\u5896\u5897\u5898\u5899\u589a\u589b\u589c\u589d\u589e\u589f\u58a0\u58a1\u58a2\u58a3\u58a4\u58a5\u58a6\u58a7\u58a8\u58a9\u58aa\u58ab\u58ac\u58ad\u58ae\u58af\u58b0\u58b1\u58b2\u58b3\u58b4\u58b5\u58b6\u58b7\u58b8\u58b9\u58ba\u58bb\u58bc\u58bd\u58be\u58bf\u58c0\u58c1\u58c2\u58c3\u58c4\u58c5\u58c6\u58c7\u58c8\u58c9\u58ca\u58cb\u58cc\u58cd\u58ce\u58cf\u58d0\u58d1\u58d2\u58d3\u58d4\u58d5\u58d6\u58d7\u58d8\u58d9\u58da\u58db\u58dc\u58dd\u58de\u58df\u58e0\u58e1\u58e2\u58e3\u58e4\u58e5\u58e6\u58e7\u58e8\u58e9\u58ea\u58eb\u58ec\u58ed\u58ee\u58ef\u58f0\u58f1\u58f2\u58f3\u58f4\u58f5\u58f6\u58f7\u58f8\u58f9\u58fa\u58fb\u58fc\u58fd\u58fe\u58ff\u5900\u5901\u5902\u5903\u5904\u5905\u5906\u5907\u5908\u5909\u590a\u590b\u590c\u590d\u590e\u590f\u5910\u5911\u5912\u5913\u5914\u5915\u5916\u5917\u5918\u5919\u591a\u591b\u591c\u591d\u591e\u591f\u5920\u5921\u5922\u5923\u5924\u5925\u5926\u5927\u5928\u5929\u592a\u592b\u592c\u592d\u592e\u592f\u5930\u5931\u5932\u5933\u5934\u5935\u5936\u5937\u5938\u5939\u593a\u593b\u593c\u593d\u593e\u593f\u5940\u5941\u5942\u5943\u5944\u5945\u5946\u5947\u5948\u5949\u594a\u594b\u594c\u594d\u594e\u594f\u5950\u5951\u5952\u5953\u5954\u5955\u5956\u5957\u5958\u5959\u595a\u595b\u595c\u595d\u595e\u595f\u5960\u5961\u5962\u5963\u5964\u5965\u5966\u5967\u5968\u5969\u596a\u596b\u596c\u596d\u596e\u596f\u5970\u5971\u5972\u5973\u5974\u5975\u5976\u5977\u5978\u5979\u597a\u597b\u597c\u597d\u597e\u597f\u5980\u5981\u5982\u5983\u5984\u5985\u5986\u5987\u5988\u5989\u598a\u598b\u598c\u598d\u598e\u598f\u5990\u5991\u5992\u5993\u5994\u5995\u5996\u5997\u5998\u5999\u599a\u599b\u599c\u599d\u599e\u599f\u59a0\u59a1\u59a2\u59a3\u59a4\u59a5\u59a6\u59a7\u59a8\u59a9\u59aa\u59ab\u59ac\u59ad\u59ae\u59af\u59b0\u59b1\u59b2\u59b3\u59b4\u59b5\u59b6\u59b7\u59b8\u59b9\u59ba\u59bb\u59bc\u59bd\u59be\u59bf\u59c0\u59c1\u59c2\u59c3\u59c4\u59c5\u59c6\u59c7\u59c8\u59c9\u59ca\u59cb\u59cc\u59cd\u59ce\u59cf\u59d0\u59d1\u59d2\u59d3\u59d4\u59d5\u59d6\u59d7\u59d8\u59d9\u59da\u59db\u59dc\u59dd\u59de\u59df\u59e0\u59e1\u59e2\u59e3\u59e4\u59e5\u59e6\u59e7\u59e8\u59e9\u59ea\u59eb\u59ec\u59ed\u59ee\u59ef\u59f0\u59f1\u59f2\u59f3\u59f4\u59f5\u59f6\u59f7\u59f8\u59f9\u59fa\u59fb\u59fc\u59fd\u59fe\u59ff\u5a00\u5a01\u5a02\u5a03\u5a04\u5a05\u5a06\u5a07\u5a08\u5a09\u5a0a\u5a0b\u5a0c\u5a0d\u5a0e\u5a0f\u5a10\u5a11\u5a12\u5a13\u5a14\u5a15\u5a16\u5a17\u5a18\u5a19\u5a1a\u5a1b\u5a1c\u5a1d\u5a1e\u5a1f\u5a20\u5a21\u5a22\u5a23\u5a24\u5a25\u5a26\u5a27\u5a28\u5a29\u5a2a\u5a2b\u5a2c\u5a2d\u5a2e\u5a2f\u5a30\u5a31\u5a32\u5a33\u5a34\u5a35\u5a36\u5a37\u5a38\u5a39\u5a3a\u5a3b\u5a3c\u5a3d\u5a3e\u5a3f\u5a40\u5a41\u5a42\u5a43\u5a44\u5a45\u5a46\u5a47\u5a48\u5a49\u5a4a\u5a4b\u5a4c\u5a4d\u5a4e\u5a4f\u5a50\u5a51\u5a52\u5a53\u5a54\u5a55\u5a56\u5a57\u5a58\u5a59\u5a5a\u5a5b\u5a5c\u5a5d\u5a5e\u5a5f\u5a60\u5a61\u5a62\u5a63\u5a64\u5a65\u5a66\u5a67\u5a68\u5a69\u5a6a\u5a6b\u5a6c\u5a6d\u5a6e\u5a6f\u5a70\u5a71\u5a72\u5a73\u5a74\u5a75\u5a76\u5a77\u5a78\u5a79\u5a7a\u5a7b\u5a7c\u5a7d\u5a7e\u5a7f\u5a80\u5a81\u5a82\u5a83\u5a84\u5a85\u5a86\u5a87\u5a88\u5a89\u5a8a\u5a8b\u5a8c\u5a8d\u5a8e\u5a8f\u5a90\u5a91\u5a92\u5a93\u5a94\u5a95\u5a96\u5a97\u5a98\u5a99\u5a9a\u5a9b\u5a9c\u5a9d\u5a9e\u5a9f\u5aa0\u5aa1\u5aa2\u5aa3\u5aa4\u5aa5\u5aa6\u5aa7\u5aa8\u5aa9\u5aaa\u5aab\u5aac\u5aad\u5aae\u5aaf\u5ab0\u5ab1\u5ab2\u5ab3\u5ab4\u5ab5\u5ab6\u5ab7\u5ab8\u5ab9\u5aba\u5abb\u5abc\u5abd\u5abe\u5abf\u5ac0\u5ac1\u5ac2\u5ac3\u5ac4\u5ac5\u5ac6\u5ac7\u5ac8\u5ac9\u5aca\u5acb\u5acc\u5acd\u5ace\u5acf\u5ad0\u5ad1\u5ad2\u5ad3\u5ad4\u5ad5\u5ad6\u5ad7\u5ad8\u5ad9\u5ada\u5adb\u5adc\u5add\u5ade\u5adf\u5ae0\u5ae1\u5ae2\u5ae3\u5ae4\u5ae5\u5ae6\u5ae7\u5ae8\u5ae9\u5aea\u5aeb\u5aec\u5aed\u5aee\u5aef\u5af0\u5af1\u5af2\u5af3\u5af4\u5af5\u5af6\u5af7\u5af8\u5af9\u5afa\u5afb\u5afc\u5afd\u5afe\u5aff\u5b00\u5b01\u5b02\u5b03\u5b04\u5b05\u5b06\u5b07\u5b08\u5b09\u5b0a\u5b0b\u5b0c\u5b0d\u5b0e\u5b0f\u5b10\u5b11\u5b12\u5b13\u5b14\u5b15\u5b16\u5b17\u5b18\u5b19\u5b1a\u5b1b\u5b1c\u5b1d\u5b1e\u5b1f\u5b20\u5b21\u5b22\u5b23\u5b24\u5b25\u5b26\u5b27\u5b28\u5b29\u5b2a\u5b2b\u5b2c\u5b2d\u5b2e\u5b2f\u5b30\u5b31\u5b32\u5b33\u5b34\u5b35\u5b36\u5b37\u5b38\u5b39\u5b3a\u5b3b\u5b3c\u5b3d\u5b3e\u5b3f\u5b40\u5b41\u5b42\u5b43\u5b44\u5b45\u5b46\u5b47\u5b48\u5b49\u5b4a\u5b4b\u5b4c\u5b4d\u5b4e\u5b4f\u5b50\u5b51\u5b52\u5b53\u5b54\u5b55\u5b56\u5b57\u5b58\u5b59\u5b5a\u5b5b\u5b5c\u5b5d\u5b5e\u5b5f\u5b60\u5b61\u5b62\u5b63\u5b64\u5b65\u5b66\u5b67\u5b68\u5b69\u5b6a\u5b6b\u5b6c\u5b6d\u5b6e\u5b6f\u5b70\u5b71\u5b72\u5b73\u5b74\u5b75\u5b76\u5b77\u5b78\u5b79\u5b7a\u5b7b\u5b7c\u5b7d\u5b7e\u5b7f\u5b80\u5b81\u5b82\u5b83\u5b84\u5b85\u5b86\u5b87\u5b88\u5b89\u5b8a\u5b8b\u5b8c\u5b8d\u5b8e\u5b8f\u5b90\u5b91\u5b92\u5b93\u5b94\u5b95\u5b96\u5b97\u5b98\u5b99\u5b9a\u5b9b\u5b9c\u5b9d\u5b9e\u5b9f\u5ba0\u5ba1\u5ba2\u5ba3\u5ba4\u5ba5\u5ba6\u5ba7\u5ba8\u5ba9\u5baa\u5bab\u5bac\u5bad\u5bae\u5baf\u5bb0\u5bb1\u5bb2\u5bb3\u5bb4\u5bb5\u5bb6\u5bb7\u5bb8\u5bb9\u5bba\u5bbb\u5bbc\u5bbd\u5bbe\u5bbf\u5bc0\u5bc1\u5bc2\u5bc3\u5bc4\u5bc5\u5bc6\u5bc7\u5bc8\u5bc9\u5bca\u5bcb\u5bcc\u5bcd\u5bce\u5bcf\u5bd0\u5bd1\u5bd2\u5bd3\u5bd4\u5bd5\u5bd6\u5bd7\u5bd8\u5bd9\u5bda\u5bdb\u5bdc\u5bdd\u5bde\u5bdf\u5be0\u5be1\u5be2\u5be3\u5be4\u5be5\u5be6\u5be7\u5be8\u5be9\u5bea\u5beb\u5bec\u5bed\u5bee\u5bef\u5bf0\u5bf1\u5bf2\u5bf3\u5bf4\u5bf5\u5bf6\u5bf7\u5bf8\u5bf9\u5bfa\u5bfb\u5bfc\u5bfd\u5bfe\u5bff\u5c00\u5c01\u5c02\u5c03\u5c04\u5c05\u5c06\u5c07\u5c08\u5c09\u5c0a\u5c0b\u5c0c\u5c0d\u5c0e\u5c0f\u5c10\u5c11\u5c12\u5c13\u5c14\u5c15\u5c16\u5c17\u5c18\u5c19\u5c1a\u5c1b\u5c1c\u5c1d\u5c1e\u5c1f\u5c20\u5c21\u5c22\u5c23\u5c24\u5c25\u5c26\u5c27\u5c28\u5c29\u5c2a\u5c2b\u5c2c\u5c2d\u5c2e\u5c2f\u5c30\u5c31\u5c32\u5c33\u5c34\u5c35\u5c36\u5c37\u5c38\u5c39\u5c3a\u5c3b\u5c3c\u5c3d\u5c3e\u5c3f\u5c40\u5c41\u5c42\u5c43\u5c44\u5c45\u5c46\u5c47\u5c48\u5c49\u5c4a\u5c4b\u5c4c\u5c4d\u5c4e\u5c4f\u5c50\u5c51\u5c52\u5c53\u5c54\u5c55\u5c56\u5c57\u5c58\u5c59\u5c5a\u5c5b\u5c5c\u5c5d\u5c5e\u5c5f\u5c60\u5c61\u5c62\u5c63\u5c64\u5c65\u5c66\u5c67\u5c68\u5c69\u5c6a\u5c6b\u5c6c\u5c6d\u5c6e\u5c6f\u5c70\u5c71\u5c72\u5c73\u5c74\u5c75\u5c76\u5c77\u5c78\u5c79\u5c7a\u5c7b\u5c7c\u5c7d\u5c7e\u5c7f\u5c80\u5c81\u5c82\u5c83\u5c84\u5c85\u5c86\u5c87\u5c88\u5c89\u5c8a\u5c8b\u5c8c\u5c8d\u5c8e\u5c8f\u5c90\u5c91\u5c92\u5c93\u5c94\u5c95\u5c96\u5c97\u5c98\u5c99\u5c9a\u5c9b\u5c9c\u5c9d\u5c9e\u5c9f\u5ca0\u5ca1\u5ca2\u5ca3\u5ca4\u5ca5\u5ca6\u5ca7\u5ca8\u5ca9\u5caa\u5cab\u5cac\u5cad\u5cae\u5caf\u5cb0\u5cb1\u5cb2\u5cb3\u5cb4\u5cb5\u5cb6\u5cb7\u5cb8\u5cb9\u5cba\u5cbb\u5cbc\u5cbd\u5cbe\u5cbf\u5cc0\u5cc1\u5cc2\u5cc3\u5cc4\u5cc5\u5cc6\u5cc7\u5cc8\u5cc9\u5cca\u5ccb\u5ccc\u5ccd\u5cce\u5ccf\u5cd0\u5cd1\u5cd2\u5cd3\u5cd4\u5cd5\u5cd6\u5cd7\u5cd8\u5cd9\u5cda\u5cdb\u5cdc\u5cdd\u5cde\u5cdf\u5ce0\u5ce1\u5ce2\u5ce3\u5ce4\u5ce5\u5ce6\u5ce7\u5ce8\u5ce9\u5cea\u5ceb\u5cec\u5ced\u5cee\u5cef\u5cf0\u5cf1\u5cf2\u5cf3\u5cf4\u5cf5\u5cf6\u5cf7\u5cf8\u5cf9\u5cfa\u5cfb\u5cfc\u5cfd\u5cfe\u5cff\u5d00\u5d01\u5d02\u5d03\u5d04\u5d05\u5d06\u5d07\u5d08\u5d09\u5d0a\u5d0b\u5d0c\u5d0d\u5d0e\u5d0f\u5d10\u5d11\u5d12\u5d13\u5d14\u5d15\u5d16\u5d17\u5d18\u5d19\u5d1a\u5d1b\u5d1c\u5d1d\u5d1e\u5d1f\u5d20\u5d21\u5d22\u5d23\u5d24\u5d25\u5d26\u5d27\u5d28\u5d29\u5d2a\u5d2b\u5d2c\u5d2d\u5d2e\u5d2f\u5d30\u5d31\u5d32\u5d33\u5d34\u5d35\u5d36\u5d37\u5d38\u5d39\u5d3a\u5d3b\u5d3c\u5d3d\u5d3e\u5d3f\u5d40\u5d41\u5d42\u5d43\u5d44\u5d45\u5d46\u5d47\u5d48\u5d49\u5d4a\u5d4b\u5d4c\u5d4d\u5d4e\u5d4f\u5d50\u5d51\u5d52\u5d53\u5d54\u5d55\u5d56\u5d57\u5d58\u5d59\u5d5a\u5d5b\u5d5c\u5d5d\u5d5e\u5d5f\u5d60\u5d61\u5d62\u5d63\u5d64\u5d65\u5d66\u5d67\u5d68\u5d69\u5d6a\u5d6b\u5d6c\u5d6d\u5d6e\u5d6f\u5d70\u5d71\u5d72\u5d73\u5d74\u5d75\u5d76\u5d77\u5d78\u5d79\u5d7a\u5d7b\u5d7c\u5d7d\u5d7e\u5d7f\u5d80\u5d81\u5d82\u5d83\u5d84\u5d85\u5d86\u5d87\u5d88\u5d89\u5d8a\u5d8b\u5d8c\u5d8d\u5d8e\u5d8f\u5d90\u5d91\u5d92\u5d93\u5d94\u5d95\u5d96\u5d97\u5d98\u5d99\u5d9a\u5d9b\u5d9c\u5d9d\u5d9e\u5d9f\u5da0\u5da1\u5da2\u5da3\u5da4\u5da5\u5da6\u5da7\u5da8\u5da9\u5daa\u5dab\u5dac\u5dad\u5dae\u5daf\u5db0\u5db1\u5db2\u5db3\u5db4\u5db5\u5db6\u5db7\u5db8\u5db9\u5dba\u5dbb\u5dbc\u5dbd\u5dbe\u5dbf\u5dc0\u5dc1\u5dc2\u5dc3\u5dc4\u5dc5\u5dc6\u5dc7\u5dc8\u5dc9\u5dca\u5dcb\u5dcc\u5dcd\u5dce\u5dcf\u5dd0\u5dd1\u5dd2\u5dd3\u5dd4\u5dd5\u5dd6\u5dd7\u5dd8\u5dd9\u5dda\u5ddb\u5ddc\u5ddd\u5dde\u5ddf\u5de0\u5de1\u5de2\u5de3\u5de4\u5de5\u5de6\u5de7\u5de8\u5de9\u5dea\u5deb\u5dec\u5ded\u5dee\u5def\u5df0\u5df1\u5df2\u5df3\u5df4\u5df5\u5df6\u5df7\u5df8\u5df9\u5dfa\u5dfb\u5dfc\u5dfd\u5dfe\u5dff\u5e00\u5e01\u5e02\u5e03\u5e04\u5e05\u5e06\u5e07\u5e08\u5e09\u5e0a\u5e0b\u5e0c\u5e0d\u5e0e\u5e0f\u5e10\u5e11\u5e12\u5e13\u5e14\u5e15\u5e16\u5e17\u5e18\u5e19\u5e1a\u5e1b\u5e1c\u5e1d\u5e1e\u5e1f\u5e20\u5e21\u5e22\u5e23\u5e24\u5e25\u5e26\u5e27\u5e28\u5e29\u5e2a\u5e2b\u5e2c\u5e2d\u5e2e\u5e2f\u5e30\u5e31\u5e32\u5e33\u5e34\u5e35\u5e36\u5e37\u5e38\u5e39\u5e3a\u5e3b\u5e3c\u5e3d\u5e3e\u5e3f\u5e40\u5e41\u5e42\u5e43\u5e44\u5e45\u5e46\u5e47\u5e48\u5e49\u5e4a\u5e4b\u5e4c\u5e4d\u5e4e\u5e4f\u5e50\u5e51\u5e52\u5e53\u5e54\u5e55\u5e56\u5e57\u5e58\u5e59\u5e5a\u5e5b\u5e5c\u5e5d\u5e5e\u5e5f\u5e60\u5e61\u5e62\u5e63\u5e64\u5e65\u5e66\u5e67\u5e68\u5e69\u5e6a\u5e6b\u5e6c\u5e6d\u5e6e\u5e6f\u5e70\u5e71\u5e72\u5e73\u5e74\u5e75\u5e76\u5e77\u5e78\u5e79\u5e7a\u5e7b\u5e7c\u5e7d\u5e7e\u5e7f\u5e80\u5e81\u5e82\u5e83\u5e84\u5e85\u5e86\u5e87\u5e88\u5e89\u5e8a\u5e8b\u5e8c\u5e8d\u5e8e\u5e8f\u5e90\u5e91\u5e92\u5e93\u5e94\u5e95\u5e96\u5e97\u5e98\u5e99\u5e9a\u5e9b\u5e9c\u5e9d\u5e9e\u5e9f\u5ea0\u5ea1\u5ea2\u5ea3\u5ea4\u5ea5\u5ea6\u5ea7\u5ea8\u5ea9\u5eaa\u5eab\u5eac\u5ead\u5eae\u5eaf\u5eb0\u5eb1\u5eb2\u5eb3\u5eb4\u5eb5\u5eb6\u5eb7\u5eb8\u5eb9\u5eba\u5ebb\u5ebc\u5ebd\u5ebe\u5ebf\u5ec0\u5ec1\u5ec2\u5ec3\u5ec4\u5ec5\u5ec6\u5ec7\u5ec8\u5ec9\u5eca\u5ecb\u5ecc\u5ecd\u5ece\u5ecf\u5ed0\u5ed1\u5ed2\u5ed3\u5ed4\u5ed5\u5ed6\u5ed7\u5ed8\u5ed9\u5eda\u5edb\u5edc\u5edd\u5ede\u5edf\u5ee0\u5ee1\u5ee2\u5ee3\u5ee4\u5ee5\u5ee6\u5ee7\u5ee8\u5ee9\u5eea\u5eeb\u5eec\u5eed\u5eee\u5eef\u5ef0\u5ef1\u5ef2\u5ef3\u5ef4\u5ef5\u5ef6\u5ef7\u5ef8\u5ef9\u5efa\u5efb\u5efc\u5efd\u5efe\u5eff\u5f00\u5f01\u5f02\u5f03\u5f04\u5f05\u5f06\u5f07\u5f08\u5f09\u5f0a\u5f0b\u5f0c\u5f0d\u5f0e\u5f0f\u5f10\u5f11\u5f12\u5f13\u5f14\u5f15\u5f16\u5f17\u5f18\u5f19\u5f1a\u5f1b\u5f1c\u5f1d\u5f1e\u5f1f\u5f20\u5f21\u5f22\u5f23\u5f24\u5f25\u5f26\u5f27\u5f28\u5f29\u5f2a\u5f2b\u5f2c\u5f2d\u5f2e\u5f2f\u5f30\u5f31\u5f32\u5f33\u5f34\u5f35\u5f36\u5f37\u5f38\u5f39\u5f3a\u5f3b\u5f3c\u5f3d\u5f3e\u5f3f\u5f40\u5f41\u5f42\u5f43\u5f44\u5f45\u5f46\u5f47\u5f48\u5f49\u5f4a\u5f4b\u5f4c\u5f4d\u5f4e\u5f4f\u5f50\u5f51\u5f52\u5f53\u5f54\u5f55\u5f56\u5f57\u5f58\u5f59\u5f5a\u5f5b\u5f5c\u5f5d\u5f5e\u5f5f\u5f60\u5f61\u5f62\u5f63\u5f64\u5f65\u5f66\u5f67\u5f68\u5f69\u5f6a\u5f6b\u5f6c\u5f6d\u5f6e\u5f6f\u5f70\u5f71\u5f72\u5f73\u5f74\u5f75\u5f76\u5f77\u5f78\u5f79\u5f7a\u5f7b\u5f7c\u5f7d\u5f7e\u5f7f\u5f80\u5f81\u5f82\u5f83\u5f84\u5f85\u5f86\u5f87\u5f88\u5f89\u5f8a\u5f8b\u5f8c\u5f8d\u5f8e\u5f8f\u5f90\u5f91\u5f92\u5f93\u5f94\u5f95\u5f96\u5f97\u5f98\u5f99\u5f9a\u5f9b\u5f9c\u5f9d\u5f9e\u5f9f\u5fa0\u5fa1\u5fa2\u5fa3\u5fa4\u5fa5\u5fa6\u5fa7\u5fa8\u5fa9\u5faa\u5fab\u5fac\u5fad\u5fae\u5faf\u5fb0\u5fb1\u5fb2\u5fb3\u5fb4\u5fb5\u5fb6\u5fb7\u5fb8\u5fb9\u5fba\u5fbb\u5fbc\u5fbd\u5fbe\u5fbf\u5fc0\u5fc1\u5fc2\u5fc3\u5fc4\u5fc5\u5fc6\u5fc7\u5fc8\u5fc9\u5fca\u5fcb\u5fcc\u5fcd\u5fce\u5fcf\u5fd0\u5fd1\u5fd2\u5fd3\u5fd4\u5fd5\u5fd6\u5fd7\u5fd8\u5fd9\u5fda\u5fdb\u5fdc\u5fdd\u5fde\u5fdf\u5fe0\u5fe1\u5fe2\u5fe3\u5fe4\u5fe5\u5fe6\u5fe7\u5fe8\u5fe9\u5fea\u5feb\u5fec\u5fed\u5fee\u5fef\u5ff0\u5ff1\u5ff2\u5ff3\u5ff4\u5ff5\u5ff6\u5ff7\u5ff8\u5ff9\u5ffa\u5ffb\u5ffc\u5ffd\u5ffe\u5fff\u6000\u6001\u6002\u6003\u6004\u6005\u6006\u6007\u6008\u6009\u600a\u600b\u600c\u600d\u600e\u600f\u6010\u6011\u6012\u6013\u6014\u6015\u6016\u6017\u6018\u6019\u601a\u601b\u601c\u601d\u601e\u601f\u6020\u6021\u6022\u6023\u6024\u6025\u6026\u6027\u6028\u6029\u602a\u602b\u602c\u602d\u602e\u602f\u6030\u6031\u6032\u6033\u6034\u6035\u6036\u6037\u6038\u6039\u603a\u603b\u603c\u603d\u603e\u603f\u6040\u6041\u6042\u6043\u6044\u6045\u6046\u6047\u6048\u6049\u604a\u604b\u604c\u604d\u604e\u604f\u6050\u6051\u6052\u6053\u6054\u6055\u6056\u6057\u6058\u6059\u605a\u605b\u605c\u605d\u605e\u605f\u6060\u6061\u6062\u6063\u6064\u6065\u6066\u6067\u6068\u6069\u606a\u606b\u606c\u606d\u606e\u606f\u6070\u6071\u6072\u6073\u6074\u6075\u6076\u6077\u6078\u6079\u607a\u607b\u607c\u607d\u607e\u607f\u6080\u6081\u6082\u6083\u6084\u6085\u6086\u6087\u6088\u6089\u608a\u608b\u608c\u608d\u608e\u608f\u6090\u6091\u6092\u6093\u6094\u6095\u6096\u6097\u6098\u6099\u609a\u609b\u609c\u609d\u609e\u609f\u60a0\u60a1\u60a2\u60a3\u60a4\u60a5\u60a6\u60a7\u60a8\u60a9\u60aa\u60ab\u60ac\u60ad\u60ae\u60af\u60b0\u60b1\u60b2\u60b3\u60b4\u60b5\u60b6\u60b7\u60b8\u60b9\u60ba\u60bb\u60bc\u60bd\u60be\u60bf\u60c0\u60c1\u60c2\u60c3\u60c4\u60c5\u60c6\u60c7\u60c8\u60c9\u60ca\u60cb\u60cc\u60cd\u60ce\u60cf\u60d0\u60d1\u60d2\u60d3\u60d4\u60d5\u60d6\u60d7\u60d8\u60d9\u60da\u60db\u60dc\u60dd\u60de\u60df\u60e0\u60e1\u60e2\u60e3\u60e4\u60e5\u60e6\u60e7\u60e8\u60e9\u60ea\u60eb\u60ec\u60ed\u60ee\u60ef\u60f0\u60f1\u60f2\u60f3\u60f4\u60f5\u60f6\u60f7\u60f8\u60f9\u60fa\u60fb\u60fc\u60fd\u60fe\u60ff\u6100\u6101\u6102\u6103\u6104\u6105\u6106\u6107\u6108\u6109\u610a\u610b\u610c\u610d\u610e\u610f\u6110\u6111\u6112\u6113\u6114\u6115\u6116\u6117\u6118\u6119\u611a\u611b\u611c\u611d\u611e\u611f\u6120\u6121\u6122\u6123\u6124\u6125\u6126\u6127\u6128\u6129\u612a\u612b\u612c\u612d\u612e\u612f\u6130\u6131\u6132\u6133\u6134\u6135\u6136\u6137\u6138\u6139\u613a\u613b\u613c\u613d\u613e\u613f\u6140\u6141\u6142\u6143\u6144\u6145\u6146\u6147\u6148\u6149\u614a\u614b\u614c\u614d\u614e\u614f\u6150\u6151\u6152\u6153\u6154\u6155\u6156\u6157\u6158\u6159\u615a\u615b\u615c\u615d\u615e\u615f\u6160\u6161\u6162\u6163\u6164\u6165\u6166\u6167\u6168\u6169\u616a\u616b\u616c\u616d\u616e\u616f\u6170\u6171\u6172\u6173\u6174\u6175\u6176\u6177\u6178\u6179\u617a\u617b\u617c\u617d\u617e\u617f\u6180\u6181\u6182\u6183\u6184\u6185\u6186\u6187\u6188\u6189\u618a\u618b\u618c\u618d\u618e\u618f\u6190\u6191\u6192\u6193\u6194\u6195\u6196\u6197\u6198\u6199\u619a\u619b\u619c\u619d\u619e\u619f\u61a0\u61a1\u61a2\u61a3\u61a4\u61a5\u61a6\u61a7\u61a8\u61a9\u61aa\u61ab\u61ac\u61ad\u61ae\u61af\u61b0\u61b1\u61b2\u61b3\u61b4\u61b5\u61b6\u61b7\u61b8\u61b9\u61ba\u61bb\u61bc\u61bd\u61be\u61bf\u61c0\u61c1\u61c2\u61c3\u61c4\u61c5\u61c6\u61c7\u61c8\u61c9\u61ca\u61cb\u61cc\u61cd\u61ce\u61cf\u61d0\u61d1\u61d2\u61d3\u61d4\u61d5\u61d6\u61d7\u61d8\u61d9\u61da\u61db\u61dc\u61dd\u61de\u61df\u61e0\u61e1\u61e2\u61e3\u61e4\u61e5\u61e6\u61e7\u61e8\u61e9\u61ea\u61eb\u61ec\u61ed\u61ee\u61ef\u61f0\u61f1\u61f2\u61f3\u61f4\u61f5\u61f6\u61f7\u61f8\u61f9\u61fa\u61fb\u61fc\u61fd\u61fe\u61ff\u6200\u6201\u6202\u6203\u6204\u6205\u6206\u6207\u6208\u6209\u620a\u620b\u620c\u620d\u620e\u620f\u6210\u6211\u6212\u6213\u6214\u6215\u6216\u6217\u6218\u6219\u621a\u621b\u621c\u621d\u621e\u621f\u6220\u6221\u6222\u6223\u6224\u6225\u6226\u6227\u6228\u6229\u622a\u622b\u622c\u622d\u622e\u622f\u6230\u6231\u6232\u6233\u6234\u6235\u6236\u6237\u6238\u6239\u623a\u623b\u623c\u623d\u623e\u623f\u6240\u6241\u6242\u6243\u6244\u6245\u6246\u6247\u6248\u6249\u624a\u624b\u624c\u624d\u624e\u624f\u6250\u6251\u6252\u6253\u6254\u6255\u6256\u6257\u6258\u6259\u625a\u625b\u625c\u625d\u625e\u625f\u6260\u6261\u6262\u6263\u6264\u6265\u6266\u6267\u6268\u6269\u626a\u626b\u626c\u626d\u626e\u626f\u6270\u6271\u6272\u6273\u6274\u6275\u6276\u6277\u6278\u6279\u627a\u627b\u627c\u627d\u627e\u627f\u6280\u6281\u6282\u6283\u6284\u6285\u6286\u6287\u6288\u6289\u628a\u628b\u628c\u628d\u628e\u628f\u6290\u6291\u6292\u6293\u6294\u6295\u6296\u6297\u6298\u6299\u629a\u629b\u629c\u629d\u629e\u629f\u62a0\u62a1\u62a2\u62a3\u62a4\u62a5\u62a6\u62a7\u62a8\u62a9\u62aa\u62ab\u62ac\u62ad\u62ae\u62af\u62b0\u62b1\u62b2\u62b3\u62b4\u62b5\u62b6\u62b7\u62b8\u62b9\u62ba\u62bb\u62bc\u62bd\u62be\u62bf\u62c0\u62c1\u62c2\u62c3\u62c4\u62c5\u62c6\u62c7\u62c8\u62c9\u62ca\u62cb\u62cc\u62cd\u62ce\u62cf\u62d0\u62d1\u62d2\u62d3\u62d4\u62d5\u62d6\u62d7\u62d8\u62d9\u62da\u62db\u62dc\u62dd\u62de\u62df\u62e0\u62e1\u62e2\u62e3\u62e4\u62e5\u62e6\u62e7\u62e8\u62e9\u62ea\u62eb\u62ec\u62ed\u62ee\u62ef\u62f0\u62f1\u62f2\u62f3\u62f4\u62f5\u62f6\u62f7\u62f8\u62f9\u62fa\u62fb\u62fc\u62fd\u62fe\u62ff\u6300\u6301\u6302\u6303\u6304\u6305\u6306\u6307\u6308\u6309\u630a\u630b\u630c\u630d\u630e\u630f\u6310\u6311\u6312\u6313\u6314\u6315\u6316\u6317\u6318\u6319\u631a\u631b\u631c\u631d\u631e\u631f\u6320\u6321\u6322\u6323\u6324\u6325\u6326\u6327\u6328\u6329\u632a\u632b\u632c\u632d\u632e\u632f\u6330\u6331\u6332\u6333\u6334\u6335\u6336\u6337\u6338\u6339\u633a\u633b\u633c\u633d\u633e\u633f\u6340\u6341\u6342\u6343\u6344\u6345\u6346\u6347\u6348\u6349\u634a\u634b\u634c\u634d\u634e\u634f\u6350\u6351\u6352\u6353\u6354\u6355\u6356\u6357\u6358\u6359\u635a\u635b\u635c\u635d\u635e\u635f\u6360\u6361\u6362\u6363\u6364\u6365\u6366\u6367\u6368\u6369\u636a\u636b\u636c\u636d\u636e\u636f\u6370\u6371\u6372\u6373\u6374\u6375\u6376\u6377\u6378\u6379\u637a\u637b\u637c\u637d\u637e\u637f\u6380\u6381\u6382\u6383\u6384\u6385\u6386\u6387\u6388\u6389\u638a\u638b\u638c\u638d\u638e\u638f\u6390\u6391\u6392\u6393\u6394\u6395\u6396\u6397\u6398\u6399\u639a\u639b\u639c\u639d\u639e\u639f\u63a0\u63a1\u63a2\u63a3\u63a4\u63a5\u63a6\u63a7\u63a8\u63a9\u63aa\u63ab\u63ac\u63ad\u63ae\u63af\u63b0\u63b1\u63b2\u63b3\u63b4\u63b5\u63b6\u63b7\u63b8\u63b9\u63ba\u63bb\u63bc\u63bd\u63be\u63bf\u63c0\u63c1\u63c2\u63c3\u63c4\u63c5\u63c6\u63c7\u63c8\u63c9\u63ca\u63cb\u63cc\u63cd\u63ce\u63cf\u63d0\u63d1\u63d2\u63d3\u63d4\u63d5\u63d6\u63d7\u63d8\u63d9\u63da\u63db\u63dc\u63dd\u63de\u63df\u63e0\u63e1\u63e2\u63e3\u63e4\u63e5\u63e6\u63e7\u63e8\u63e9\u63ea\u63eb\u63ec\u63ed\u63ee\u63ef\u63f0\u63f1\u63f2\u63f3\u63f4\u63f5\u63f6\u63f7\u63f8\u63f9\u63fa\u63fb\u63fc\u63fd\u63fe\u63ff\u6400\u6401\u6402\u6403\u6404\u6405\u6406\u6407\u6408\u6409\u640a\u640b\u640c\u640d\u640e\u640f\u6410\u6411\u6412\u6413\u6414\u6415\u6416\u6417\u6418\u6419\u641a\u641b\u641c\u641d\u641e\u641f\u6420\u6421\u6422\u6423\u6424\u6425\u6426\u6427\u6428\u6429\u642a\u642b\u642c\u642d\u642e\u642f\u6430\u6431\u6432\u6433\u6434\u6435\u6436\u6437\u6438\u6439\u643a\u643b\u643c\u643d\u643e\u643f\u6440\u6441\u6442\u6443\u6444\u6445\u6446\u6447\u6448\u6449\u644a\u644b\u644c\u644d\u644e\u644f\u6450\u6451\u6452\u6453\u6454\u6455\u6456\u6457\u6458\u6459\u645a\u645b\u645c\u645d\u645e\u645f\u6460\u6461\u6462\u6463\u6464\u6465\u6466\u6467\u6468\u6469\u646a\u646b\u646c\u646d\u646e\u646f\u6470\u6471\u6472\u6473\u6474\u6475\u6476\u6477\u6478\u6479\u647a\u647b\u647c\u647d\u647e\u647f\u6480\u6481\u6482\u6483\u6484\u6485\u6486\u6487\u6488\u6489\u648a\u648b\u648c\u648d\u648e\u648f\u6490\u6491\u6492\u6493\u6494\u6495\u6496\u6497\u6498\u6499\u649a\u649b\u649c\u649d\u649e\u649f\u64a0\u64a1\u64a2\u64a3\u64a4\u64a5\u64a6\u64a7\u64a8\u64a9\u64aa\u64ab\u64ac\u64ad\u64ae\u64af\u64b0\u64b1\u64b2\u64b3\u64b4\u64b5\u64b6\u64b7\u64b8\u64b9\u64ba\u64bb\u64bc\u64bd\u64be\u64bf\u64c0\u64c1\u64c2\u64c3\u64c4\u64c5\u64c6\u64c7\u64c8\u64c9\u64ca\u64cb\u64cc\u64cd\u64ce\u64cf\u64d0\u64d1\u64d2\u64d3\u64d4\u64d5\u64d6\u64d7\u64d8\u64d9\u64da\u64db\u64dc\u64dd\u64de\u64df\u64e0\u64e1\u64e2\u64e3\u64e4\u64e5\u64e6\u64e7\u64e8\u64e9\u64ea\u64eb\u64ec\u64ed\u64ee\u64ef\u64f0\u64f1\u64f2\u64f3\u64f4\u64f5\u64f6\u64f7\u64f8\u64f9\u64fa\u64fb\u64fc\u64fd\u64fe\u64ff\u6500\u6501\u6502\u6503\u6504\u6505\u6506\u6507\u6508\u6509\u650a\u650b\u650c\u650d\u650e\u650f\u6510\u6511\u6512\u6513\u6514\u6515\u6516\u6517\u6518\u6519\u651a\u651b\u651c\u651d\u651e\u651f\u6520\u6521\u6522\u6523\u6524\u6525\u6526\u6527\u6528\u6529\u652a\u652b\u652c\u652d\u652e\u652f\u6530\u6531\u6532\u6533\u6534\u6535\u6536\u6537\u6538\u6539\u653a\u653b\u653c\u653d\u653e\u653f\u6540\u6541\u6542\u6543\u6544\u6545\u6546\u6547\u6548\u6549\u654a\u654b\u654c\u654d\u654e\u654f\u6550\u6551\u6552\u6553\u6554\u6555\u6556\u6557\u6558\u6559\u655a\u655b\u655c\u655d\u655e\u655f\u6560\u6561\u6562\u6563\u6564\u6565\u6566\u6567\u6568\u6569\u656a\u656b\u656c\u656d\u656e\u656f\u6570\u6571\u6572\u6573\u6574\u6575\u6576\u6577\u6578\u6579\u657a\u657b\u657c\u657d\u657e\u657f\u6580\u6581\u6582\u6583\u6584\u6585\u6586\u6587\u6588\u6589\u658a\u658b\u658c\u658d\u658e\u658f\u6590\u6591\u6592\u6593\u6594\u6595\u6596\u6597\u6598\u6599\u659a\u659b\u659c\u659d\u659e\u659f\u65a0\u65a1\u65a2\u65a3\u65a4\u65a5\u65a6\u65a7\u65a8\u65a9\u65aa\u65ab\u65ac\u65ad\u65ae\u65af\u65b0\u65b1\u65b2\u65b3\u65b4\u65b5\u65b6\u65b7\u65b8\u65b9\u65ba\u65bb\u65bc\u65bd\u65be\u65bf\u65c0\u65c1\u65c2\u65c3\u65c4\u65c5\u65c6\u65c7\u65c8\u65c9\u65ca\u65cb\u65cc\u65cd\u65ce\u65cf\u65d0\u65d1\u65d2\u65d3\u65d4\u65d5\u65d6\u65d7\u65d8\u65d9\u65da\u65db\u65dc\u65dd\u65de\u65df\u65e0\u65e1\u65e2\u65e3\u65e4\u65e5\u65e6\u65e7\u65e8\u65e9\u65ea\u65eb\u65ec\u65ed\u65ee\u65ef\u65f0\u65f1\u65f2\u65f3\u65f4\u65f5\u65f6\u65f7\u65f8\u65f9\u65fa\u65fb\u65fc\u65fd\u65fe\u65ff\u6600\u6601\u6602\u6603\u6604\u6605\u6606\u6607\u6608\u6609\u660a\u660b\u660c\u660d\u660e\u660f\u6610\u6611\u6612\u6613\u6614\u6615\u6616\u6617\u6618\u6619\u661a\u661b\u661c\u661d\u661e\u661f\u6620\u6621\u6622\u6623\u6624\u6625\u6626\u6627\u6628\u6629\u662a\u662b\u662c\u662d\u662e\u662f\u6630\u6631\u6632\u6633\u6634\u6635\u6636\u6637\u6638\u6639\u663a\u663b\u663c\u663d\u663e\u663f\u6640\u6641\u6642\u6643\u6644\u6645\u6646\u6647\u6648\u6649\u664a\u664b\u664c\u664d\u664e\u664f\u6650\u6651\u6652\u6653\u6654\u6655\u6656\u6657\u6658\u6659\u665a\u665b\u665c\u665d\u665e\u665f\u6660\u6661\u6662\u6663\u6664\u6665\u6666\u6667\u6668\u6669\u666a\u666b\u666c\u666d\u666e\u666f\u6670\u6671\u6672\u6673\u6674\u6675\u6676\u6677\u6678\u6679\u667a\u667b\u667c\u667d\u667e\u667f\u6680\u6681\u6682\u6683\u6684\u6685\u6686\u6687\u6688\u6689\u668a\u668b\u668c\u668d\u668e\u668f\u6690\u6691\u6692\u6693\u6694\u6695\u6696\u6697\u6698\u6699\u669a\u669b\u669c\u669d\u669e\u669f\u66a0\u66a1\u66a2\u66a3\u66a4\u66a5\u66a6\u66a7\u66a8\u66a9\u66aa\u66ab\u66ac\u66ad\u66ae\u66af\u66b0\u66b1\u66b2\u66b3\u66b4\u66b5\u66b6\u66b7\u66b8\u66b9\u66ba\u66bb\u66bc\u66bd\u66be\u66bf\u66c0\u66c1\u66c2\u66c3\u66c4\u66c5\u66c6\u66c7\u66c8\u66c9\u66ca\u66cb\u66cc\u66cd\u66ce\u66cf\u66d0\u66d1\u66d2\u66d3\u66d4\u66d5\u66d6\u66d7\u66d8\u66d9\u66da\u66db\u66dc\u66dd\u66de\u66df\u66e0\u66e1\u66e2\u66e3\u66e4\u66e5\u66e6\u66e7\u66e8\u66e9\u66ea\u66eb\u66ec\u66ed\u66ee\u66ef\u66f0\u66f1\u66f2\u66f3\u66f4\u66f5\u66f6\u66f7\u66f8\u66f9\u66fa\u66fb\u66fc\u66fd\u66fe\u66ff\u6700\u6701\u6702\u6703\u6704\u6705\u6706\u6707\u6708\u6709\u670a\u670b\u670c\u670d\u670e\u670f\u6710\u6711\u6712\u6713\u6714\u6715\u6716\u6717\u6718\u6719\u671a\u671b\u671c\u671d\u671e\u671f\u6720\u6721\u6722\u6723\u6724\u6725\u6726\u6727\u6728\u6729\u672a\u672b\u672c\u672d\u672e\u672f\u6730\u6731\u6732\u6733\u6734\u6735\u6736\u6737\u6738\u6739\u673a\u673b\u673c\u673d\u673e\u673f\u6740\u6741\u6742\u6743\u6744\u6745\u6746\u6747\u6748\u6749\u674a\u674b\u674c\u674d\u674e\u674f\u6750\u6751\u6752\u6753\u6754\u6755\u6756\u6757\u6758\u6759\u675a\u675b\u675c\u675d\u675e\u675f\u6760\u6761\u6762\u6763\u6764\u6765\u6766\u6767\u6768\u6769\u676a\u676b\u676c\u676d\u676e\u676f\u6770\u6771\u6772\u6773\u6774\u6775\u6776\u6777\u6778\u6779\u677a\u677b\u677c\u677d\u677e\u677f\u6780\u6781\u6782\u6783\u6784\u6785\u6786\u6787\u6788\u6789\u678a\u678b\u678c\u678d\u678e\u678f\u6790\u6791\u6792\u6793\u6794\u6795\u6796\u6797\u6798\u6799\u679a\u679b\u679c\u679d\u679e\u679f\u67a0\u67a1\u67a2\u67a3\u67a4\u67a5\u67a6\u67a7\u67a8\u67a9\u67aa\u67ab\u67ac\u67ad\u67ae\u67af\u67b0\u67b1\u67b2\u67b3\u67b4\u67b5\u67b6\u67b7\u67b8\u67b9\u67ba\u67bb\u67bc\u67bd\u67be\u67bf\u67c0\u67c1\u67c2\u67c3\u67c4\u67c5\u67c6\u67c7\u67c8\u67c9\u67ca\u67cb\u67cc\u67cd\u67ce\u67cf\u67d0\u67d1\u67d2\u67d3\u67d4\u67d5\u67d6\u67d7\u67d8\u67d9\u67da\u67db\u67dc\u67dd\u67de\u67df\u67e0\u67e1\u67e2\u67e3\u67e4\u67e5\u67e6\u67e7\u67e8\u67e9\u67ea\u67eb\u67ec\u67ed\u67ee\u67ef\u67f0\u67f1\u67f2\u67f3\u67f4\u67f5\u67f6\u67f7\u67f8\u67f9\u67fa\u67fb\u67fc\u67fd\u67fe\u67ff\u6800\u6801\u6802\u6803\u6804\u6805\u6806\u6807\u6808\u6809\u680a\u680b\u680c\u680d\u680e\u680f\u6810\u6811\u6812\u6813\u6814\u6815\u6816\u6817\u6818\u6819\u681a\u681b\u681c\u681d\u681e\u681f\u6820\u6821\u6822\u6823\u6824\u6825\u6826\u6827\u6828\u6829\u682a\u682b\u682c\u682d\u682e\u682f\u6830\u6831\u6832\u6833\u6834\u6835\u6836\u6837\u6838\u6839\u683a\u683b\u683c\u683d\u683e\u683f\u6840\u6841\u6842\u6843\u6844\u6845\u6846\u6847\u6848\u6849\u684a\u684b\u684c\u684d\u684e\u684f\u6850\u6851\u6852\u6853\u6854\u6855\u6856\u6857\u6858\u6859\u685a\u685b\u685c\u685d\u685e\u685f\u6860\u6861\u6862\u6863\u6864\u6865\u6866\u6867\u6868\u6869\u686a\u686b\u686c\u686d\u686e\u686f\u6870\u6871\u6872\u6873\u6874\u6875\u6876\u6877\u6878\u6879\u687a\u687b\u687c\u687d\u687e\u687f\u6880\u6881\u6882\u6883\u6884\u6885\u6886\u6887\u6888\u6889\u688a\u688b\u688c\u688d\u688e\u688f\u6890\u6891\u6892\u6893\u6894\u6895\u6896\u6897\u6898\u6899\u689a\u689b\u689c\u689d\u689e\u689f\u68a0\u68a1\u68a2\u68a3\u68a4\u68a5\u68a6\u68a7\u68a8\u68a9\u68aa\u68ab\u68ac\u68ad\u68ae\u68af\u68b0\u68b1\u68b2\u68b3\u68b4\u68b5\u68b6\u68b7\u68b8\u68b9\u68ba\u68bb\u68bc\u68bd\u68be\u68bf\u68c0\u68c1\u68c2\u68c3\u68c4\u68c5\u68c6\u68c7\u68c8\u68c9\u68ca\u68cb\u68cc\u68cd\u68ce\u68cf\u68d0\u68d1\u68d2\u68d3\u68d4\u68d5\u68d6\u68d7\u68d8\u68d9\u68da\u68db\u68dc\u68dd\u68de\u68df\u68e0\u68e1\u68e2\u68e3\u68e4\u68e5\u68e6\u68e7\u68e8\u68e9\u68ea\u68eb\u68ec\u68ed\u68ee\u68ef\u68f0\u68f1\u68f2\u68f3\u68f4\u68f5\u68f6\u68f7\u68f8\u68f9\u68fa\u68fb\u68fc\u68fd\u68fe\u68ff\u6900\u6901\u6902\u6903\u6904\u6905\u6906\u6907\u6908\u6909\u690a\u690b\u690c\u690d\u690e\u690f\u6910\u6911\u6912\u6913\u6914\u6915\u6916\u6917\u6918\u6919\u691a\u691b\u691c\u691d\u691e\u691f\u6920\u6921\u6922\u6923\u6924\u6925\u6926\u6927\u6928\u6929\u692a\u692b\u692c\u692d\u692e\u692f\u6930\u6931\u6932\u6933\u6934\u6935\u6936\u6937\u6938\u6939\u693a\u693b\u693c\u693d\u693e\u693f\u6940\u6941\u6942\u6943\u6944\u6945\u6946\u6947\u6948\u6949\u694a\u694b\u694c\u694d\u694e\u694f\u6950\u6951\u6952\u6953\u6954\u6955\u6956\u6957\u6958\u6959\u695a\u695b\u695c\u695d\u695e\u695f\u6960\u6961\u6962\u6963\u6964\u6965\u6966\u6967\u6968\u6969\u696a\u696b\u696c\u696d\u696e\u696f\u6970\u6971\u6972\u6973\u6974\u6975\u6976\u6977\u6978\u6979\u697a\u697b\u697c\u697d\u697e\u697f\u6980\u6981\u6982\u6983\u6984\u6985\u6986\u6987\u6988\u6989\u698a\u698b\u698c\u698d\u698e\u698f\u6990\u6991\u6992\u6993\u6994\u6995\u6996\u6997\u6998\u6999\u699a\u699b\u699c\u699d\u699e\u699f\u69a0\u69a1\u69a2\u69a3\u69a4\u69a5\u69a6\u69a7\u69a8\u69a9\u69aa\u69ab\u69ac\u69ad\u69ae\u69af\u69b0\u69b1\u69b2\u69b3\u69b4\u69b5\u69b6\u69b7\u69b8\u69b9\u69ba\u69bb\u69bc\u69bd\u69be\u69bf\u69c0\u69c1\u69c2\u69c3\u69c4\u69c5\u69c6\u69c7\u69c8\u69c9\u69ca\u69cb\u69cc\u69cd\u69ce\u69cf\u69d0\u69d1\u69d2\u69d3\u69d4\u69d5\u69d6\u69d7\u69d8\u69d9\u69da\u69db\u69dc\u69dd\u69de\u69df\u69e0\u69e1\u69e2\u69e3\u69e4\u69e5\u69e6\u69e7\u69e8\u69e9\u69ea\u69eb\u69ec\u69ed\u69ee\u69ef\u69f0\u69f1\u69f2\u69f3\u69f4\u69f5\u69f6\u69f7\u69f8\u69f9\u69fa\u69fb\u69fc\u69fd\u69fe\u69ff\u6a00\u6a01\u6a02\u6a03\u6a04\u6a05\u6a06\u6a07\u6a08\u6a09\u6a0a\u6a0b\u6a0c\u6a0d\u6a0e\u6a0f\u6a10\u6a11\u6a12\u6a13\u6a14\u6a15\u6a16\u6a17\u6a18\u6a19\u6a1a\u6a1b\u6a1c\u6a1d\u6a1e\u6a1f\u6a20\u6a21\u6a22\u6a23\u6a24\u6a25\u6a26\u6a27\u6a28\u6a29\u6a2a\u6a2b\u6a2c\u6a2d\u6a2e\u6a2f\u6a30\u6a31\u6a32\u6a33\u6a34\u6a35\u6a36\u6a37\u6a38\u6a39\u6a3a\u6a3b\u6a3c\u6a3d\u6a3e\u6a3f\u6a40\u6a41\u6a42\u6a43\u6a44\u6a45\u6a46\u6a47\u6a48\u6a49\u6a4a\u6a4b\u6a4c\u6a4d\u6a4e\u6a4f\u6a50\u6a51\u6a52\u6a53\u6a54\u6a55\u6a56\u6a57\u6a58\u6a59\u6a5a\u6a5b\u6a5c\u6a5d\u6a5e\u6a5f\u6a60\u6a61\u6a62\u6a63\u6a64\u6a65\u6a66\u6a67\u6a68\u6a69\u6a6a\u6a6b\u6a6c\u6a6d\u6a6e\u6a6f\u6a70\u6a71\u6a72\u6a73\u6a74\u6a75\u6a76\u6a77\u6a78\u6a79\u6a7a\u6a7b\u6a7c\u6a7d\u6a7e\u6a7f\u6a80\u6a81\u6a82\u6a83\u6a84\u6a85\u6a86\u6a87\u6a88\u6a89\u6a8a\u6a8b\u6a8c\u6a8d\u6a8e\u6a8f\u6a90\u6a91\u6a92\u6a93\u6a94\u6a95\u6a96\u6a97\u6a98\u6a99\u6a9a\u6a9b\u6a9c\u6a9d\u6a9e\u6a9f\u6aa0\u6aa1\u6aa2\u6aa3\u6aa4\u6aa5\u6aa6\u6aa7\u6aa8\u6aa9\u6aaa\u6aab\u6aac\u6aad\u6aae\u6aaf\u6ab0\u6ab1\u6ab2\u6ab3\u6ab4\u6ab5\u6ab6\u6ab7\u6ab8\u6ab9\u6aba\u6abb\u6abc\u6abd\u6abe\u6abf\u6ac0\u6ac1\u6ac2\u6ac3\u6ac4\u6ac5\u6ac6\u6ac7\u6ac8\u6ac9\u6aca\u6acb\u6acc\u6acd\u6ace\u6acf\u6ad0\u6ad1\u6ad2\u6ad3\u6ad4\u6ad5\u6ad6\u6ad7\u6ad8\u6ad9\u6ada\u6adb\u6adc\u6add\u6ade\u6adf\u6ae0\u6ae1\u6ae2\u6ae3\u6ae4\u6ae5\u6ae6\u6ae7\u6ae8\u6ae9\u6aea\u6aeb\u6aec\u6aed\u6aee\u6aef\u6af0\u6af1\u6af2\u6af3\u6af4\u6af5\u6af6\u6af7\u6af8\u6af9\u6afa\u6afb\u6afc\u6afd\u6afe\u6aff\u6b00\u6b01\u6b02\u6b03\u6b04\u6b05\u6b06\u6b07\u6b08\u6b09\u6b0a\u6b0b\u6b0c\u6b0d\u6b0e\u6b0f\u6b10\u6b11\u6b12\u6b13\u6b14\u6b15\u6b16\u6b17\u6b18\u6b19\u6b1a\u6b1b\u6b1c\u6b1d\u6b1e\u6b1f\u6b20\u6b21\u6b22\u6b23\u6b24\u6b25\u6b26\u6b27\u6b28\u6b29\u6b2a\u6b2b\u6b2c\u6b2d\u6b2e\u6b2f\u6b30\u6b31\u6b32\u6b33\u6b34\u6b35\u6b36\u6b37\u6b38\u6b39\u6b3a\u6b3b\u6b3c\u6b3d\u6b3e\u6b3f\u6b40\u6b41\u6b42\u6b43\u6b44\u6b45\u6b46\u6b47\u6b48\u6b49\u6b4a\u6b4b\u6b4c\u6b4d\u6b4e\u6b4f\u6b50\u6b51\u6b52\u6b53\u6b54\u6b55\u6b56\u6b57\u6b58\u6b59\u6b5a\u6b5b\u6b5c\u6b5d\u6b5e\u6b5f\u6b60\u6b61\u6b62\u6b63\u6b64\u6b65\u6b66\u6b67\u6b68\u6b69\u6b6a\u6b6b\u6b6c\u6b6d\u6b6e\u6b6f\u6b70\u6b71\u6b72\u6b73\u6b74\u6b75\u6b76\u6b77\u6b78\u6b79\u6b7a\u6b7b\u6b7c\u6b7d\u6b7e\u6b7f\u6b80\u6b81\u6b82\u6b83\u6b84\u6b85\u6b86\u6b87\u6b88\u6b89\u6b8a\u6b8b\u6b8c\u6b8d\u6b8e\u6b8f\u6b90\u6b91\u6b92\u6b93\u6b94\u6b95\u6b96\u6b97\u6b98\u6b99\u6b9a\u6b9b\u6b9c\u6b9d\u6b9e\u6b9f\u6ba0\u6ba1\u6ba2\u6ba3\u6ba4\u6ba5\u6ba6\u6ba7\u6ba8\u6ba9\u6baa\u6bab\u6bac\u6bad\u6bae\u6baf\u6bb0\u6bb1\u6bb2\u6bb3\u6bb4\u6bb5\u6bb6\u6bb7\u6bb8\u6bb9\u6bba\u6bbb\u6bbc\u6bbd\u6bbe\u6bbf\u6bc0\u6bc1\u6bc2\u6bc3\u6bc4\u6bc5\u6bc6\u6bc7\u6bc8\u6bc9\u6bca\u6bcb\u6bcc\u6bcd\u6bce\u6bcf\u6bd0\u6bd1\u6bd2\u6bd3\u6bd4\u6bd5\u6bd6\u6bd7\u6bd8\u6bd9\u6bda\u6bdb\u6bdc\u6bdd\u6bde\u6bdf\u6be0\u6be1\u6be2\u6be3\u6be4\u6be5\u6be6\u6be7\u6be8\u6be9\u6bea\u6beb\u6bec\u6bed\u6bee\u6bef\u6bf0\u6bf1\u6bf2\u6bf3\u6bf4\u6bf5\u6bf6\u6bf7\u6bf8\u6bf9\u6bfa\u6bfb\u6bfc\u6bfd\u6bfe\u6bff\u6c00\u6c01\u6c02\u6c03\u6c04\u6c05\u6c06\u6c07\u6c08\u6c09\u6c0a\u6c0b\u6c0c\u6c0d\u6c0e\u6c0f\u6c10\u6c11\u6c12\u6c13\u6c14\u6c15\u6c16\u6c17\u6c18\u6c19\u6c1a\u6c1b\u6c1c\u6c1d\u6c1e\u6c1f\u6c20\u6c21\u6c22\u6c23\u6c24\u6c25\u6c26\u6c27\u6c28\u6c29\u6c2a\u6c2b\u6c2c\u6c2d\u6c2e\u6c2f\u6c30\u6c31\u6c32\u6c33\u6c34\u6c35\u6c36\u6c37\u6c38\u6c39\u6c3a\u6c3b\u6c3c\u6c3d\u6c3e\u6c3f\u6c40\u6c41\u6c42\u6c43\u6c44\u6c45\u6c46\u6c47\u6c48\u6c49\u6c4a\u6c4b\u6c4c\u6c4d\u6c4e\u6c4f\u6c50\u6c51\u6c52\u6c53\u6c54\u6c55\u6c56\u6c57\u6c58\u6c59\u6c5a\u6c5b\u6c5c\u6c5d\u6c5e\u6c5f\u6c60\u6c61\u6c62\u6c63\u6c64\u6c65\u6c66\u6c67\u6c68\u6c69\u6c6a\u6c6b\u6c6c\u6c6d\u6c6e\u6c6f\u6c70\u6c71\u6c72\u6c73\u6c74\u6c75\u6c76\u6c77\u6c78\u6c79\u6c7a\u6c7b\u6c7c\u6c7d\u6c7e\u6c7f\u6c80\u6c81\u6c82\u6c83\u6c84\u6c85\u6c86\u6c87\u6c88\u6c89\u6c8a\u6c8b\u6c8c\u6c8d\u6c8e\u6c8f\u6c90\u6c91\u6c92\u6c93\u6c94\u6c95\u6c96\u6c97\u6c98\u6c99\u6c9a\u6c9b\u6c9c\u6c9d\u6c9e\u6c9f\u6ca0\u6ca1\u6ca2\u6ca3\u6ca4\u6ca5\u6ca6\u6ca7\u6ca8\u6ca9\u6caa\u6cab\u6cac\u6cad\u6cae\u6caf\u6cb0\u6cb1\u6cb2\u6cb3\u6cb4\u6cb5\u6cb6\u6cb7\u6cb8\u6cb9\u6cba\u6cbb\u6cbc\u6cbd\u6cbe\u6cbf\u6cc0\u6cc1\u6cc2\u6cc3\u6cc4\u6cc5\u6cc6\u6cc7\u6cc8\u6cc9\u6cca\u6ccb\u6ccc\u6ccd\u6cce\u6ccf\u6cd0\u6cd1\u6cd2\u6cd3\u6cd4\u6cd5\u6cd6\u6cd7\u6cd8\u6cd9\u6cda\u6cdb\u6cdc\u6cdd\u6cde\u6cdf\u6ce0\u6ce1\u6ce2\u6ce3\u6ce4\u6ce5\u6ce6\u6ce7\u6ce8\u6ce9\u6cea\u6ceb\u6cec\u6ced\u6cee\u6cef\u6cf0\u6cf1\u6cf2\u6cf3\u6cf4\u6cf5\u6cf6\u6cf7\u6cf8\u6cf9\u6cfa\u6cfb\u6cfc\u6cfd\u6cfe\u6cff\u6d00\u6d01\u6d02\u6d03\u6d04\u6d05\u6d06\u6d07\u6d08\u6d09\u6d0a\u6d0b\u6d0c\u6d0d\u6d0e\u6d0f\u6d10\u6d11\u6d12\u6d13\u6d14\u6d15\u6d16\u6d17\u6d18\u6d19\u6d1a\u6d1b\u6d1c\u6d1d\u6d1e\u6d1f\u6d20\u6d21\u6d22\u6d23\u6d24\u6d25\u6d26\u6d27\u6d28\u6d29\u6d2a\u6d2b\u6d2c\u6d2d\u6d2e\u6d2f\u6d30\u6d31\u6d32\u6d33\u6d34\u6d35\u6d36\u6d37\u6d38\u6d39\u6d3a\u6d3b\u6d3c\u6d3d\u6d3e\u6d3f\u6d40\u6d41\u6d42\u6d43\u6d44\u6d45\u6d46\u6d47\u6d48\u6d49\u6d4a\u6d4b\u6d4c\u6d4d\u6d4e\u6d4f\u6d50\u6d51\u6d52\u6d53\u6d54\u6d55\u6d56\u6d57\u6d58\u6d59\u6d5a\u6d5b\u6d5c\u6d5d\u6d5e\u6d5f\u6d60\u6d61\u6d62\u6d63\u6d64\u6d65\u6d66\u6d67\u6d68\u6d69\u6d6a\u6d6b\u6d6c\u6d6d\u6d6e\u6d6f\u6d70\u6d71\u6d72\u6d73\u6d74\u6d75\u6d76\u6d77\u6d78\u6d79\u6d7a\u6d7b\u6d7c\u6d7d\u6d7e\u6d7f\u6d80\u6d81\u6d82\u6d83\u6d84\u6d85\u6d86\u6d87\u6d88\u6d89\u6d8a\u6d8b\u6d8c\u6d8d\u6d8e\u6d8f\u6d90\u6d91\u6d92\u6d93\u6d94\u6d95\u6d96\u6d97\u6d98\u6d99\u6d9a\u6d9b\u6d9c\u6d9d\u6d9e\u6d9f\u6da0\u6da1\u6da2\u6da3\u6da4\u6da5\u6da6\u6da7\u6da8\u6da9\u6daa\u6dab\u6dac\u6dad\u6dae\u6daf\u6db0\u6db1\u6db2\u6db3\u6db4\u6db5\u6db6\u6db7\u6db8\u6db9\u6dba\u6dbb\u6dbc\u6dbd\u6dbe\u6dbf\u6dc0\u6dc1\u6dc2\u6dc3\u6dc4\u6dc5\u6dc6\u6dc7\u6dc8\u6dc9\u6dca\u6dcb\u6dcc\u6dcd\u6dce\u6dcf\u6dd0\u6dd1\u6dd2\u6dd3\u6dd4\u6dd5\u6dd6\u6dd7\u6dd8\u6dd9\u6dda\u6ddb\u6ddc\u6ddd\u6dde\u6ddf\u6de0\u6de1\u6de2\u6de3\u6de4\u6de5\u6de6\u6de7\u6de8\u6de9\u6dea\u6deb\u6dec\u6ded\u6dee\u6def\u6df0\u6df1\u6df2\u6df3\u6df4\u6df5\u6df6\u6df7\u6df8\u6df9\u6dfa\u6dfb\u6dfc\u6dfd\u6dfe\u6dff\u6e00\u6e01\u6e02\u6e03\u6e04\u6e05\u6e06\u6e07\u6e08\u6e09\u6e0a\u6e0b\u6e0c\u6e0d\u6e0e\u6e0f\u6e10\u6e11\u6e12\u6e13\u6e14\u6e15\u6e16\u6e17\u6e18\u6e19\u6e1a\u6e1b\u6e1c\u6e1d\u6e1e\u6e1f\u6e20\u6e21\u6e22\u6e23\u6e24\u6e25\u6e26\u6e27\u6e28\u6e29\u6e2a\u6e2b\u6e2c\u6e2d\u6e2e\u6e2f\u6e30\u6e31\u6e32\u6e33\u6e34\u6e35\u6e36\u6e37\u6e38\u6e39\u6e3a\u6e3b\u6e3c\u6e3d\u6e3e\u6e3f\u6e40\u6e41\u6e42\u6e43\u6e44\u6e45\u6e46\u6e47\u6e48\u6e49\u6e4a\u6e4b\u6e4c\u6e4d\u6e4e\u6e4f\u6e50\u6e51\u6e52\u6e53\u6e54\u6e55\u6e56\u6e57\u6e58\u6e59\u6e5a\u6e5b\u6e5c\u6e5d\u6e5e\u6e5f\u6e60\u6e61\u6e62\u6e63\u6e64\u6e65\u6e66\u6e67\u6e68\u6e69\u6e6a\u6e6b\u6e6c\u6e6d\u6e6e\u6e6f\u6e70\u6e71\u6e72\u6e73\u6e74\u6e75\u6e76\u6e77\u6e78\u6e79\u6e7a\u6e7b\u6e7c\u6e7d\u6e7e\u6e7f\u6e80\u6e81\u6e82\u6e83\u6e84\u6e85\u6e86\u6e87\u6e88\u6e89\u6e8a\u6e8b\u6e8c\u6e8d\u6e8e\u6e8f\u6e90\u6e91\u6e92\u6e93\u6e94\u6e95\u6e96\u6e97\u6e98\u6e99\u6e9a\u6e9b\u6e9c\u6e9d\u6e9e\u6e9f\u6ea0\u6ea1\u6ea2\u6ea3\u6ea4\u6ea5\u6ea6\u6ea7\u6ea8\u6ea9\u6eaa\u6eab\u6eac\u6ead\u6eae\u6eaf\u6eb0\u6eb1\u6eb2\u6eb3\u6eb4\u6eb5\u6eb6\u6eb7\u6eb8\u6eb9\u6eba\u6ebb\u6ebc\u6ebd\u6ebe\u6ebf\u6ec0\u6ec1\u6ec2\u6ec3\u6ec4\u6ec5\u6ec6\u6ec7\u6ec8\u6ec9\u6eca\u6ecb\u6ecc\u6ecd\u6ece\u6ecf\u6ed0\u6ed1\u6ed2\u6ed3\u6ed4\u6ed5\u6ed6\u6ed7\u6ed8\u6ed9\u6eda\u6edb\u6edc\u6edd\u6ede\u6edf\u6ee0\u6ee1\u6ee2\u6ee3\u6ee4\u6ee5\u6ee6\u6ee7\u6ee8\u6ee9\u6eea\u6eeb\u6eec\u6eed\u6eee\u6eef\u6ef0\u6ef1\u6ef2\u6ef3\u6ef4\u6ef5\u6ef6\u6ef7\u6ef8\u6ef9\u6efa\u6efb\u6efc\u6efd\u6efe\u6eff\u6f00\u6f01\u6f02\u6f03\u6f04\u6f05\u6f06\u6f07\u6f08\u6f09\u6f0a\u6f0b\u6f0c\u6f0d\u6f0e\u6f0f\u6f10\u6f11\u6f12\u6f13\u6f14\u6f15\u6f16\u6f17\u6f18\u6f19\u6f1a\u6f1b\u6f1c\u6f1d\u6f1e\u6f1f\u6f20\u6f21\u6f22\u6f23\u6f24\u6f25\u6f26\u6f27\u6f28\u6f29\u6f2a\u6f2b\u6f2c\u6f2d\u6f2e\u6f2f\u6f30\u6f31\u6f32\u6f33\u6f34\u6f35\u6f36\u6f37\u6f38\u6f39\u6f3a\u6f3b\u6f3c\u6f3d\u6f3e\u6f3f\u6f40\u6f41\u6f42\u6f43\u6f44\u6f45\u6f46\u6f47\u6f48\u6f49\u6f4a\u6f4b\u6f4c\u6f4d\u6f4e\u6f4f\u6f50\u6f51\u6f52\u6f53\u6f54\u6f55\u6f56\u6f57\u6f58\u6f59\u6f5a\u6f5b\u6f5c\u6f5d\u6f5e\u6f5f\u6f60\u6f61\u6f62\u6f63\u6f64\u6f65\u6f66\u6f67\u6f68\u6f69\u6f6a\u6f6b\u6f6c\u6f6d\u6f6e\u6f6f\u6f70\u6f71\u6f72\u6f73\u6f74\u6f75\u6f76\u6f77\u6f78\u6f79\u6f7a\u6f7b\u6f7c\u6f7d\u6f7e\u6f7f\u6f80\u6f81\u6f82\u6f83\u6f84\u6f85\u6f86\u6f87\u6f88\u6f89\u6f8a\u6f8b\u6f8c\u6f8d\u6f8e\u6f8f\u6f90\u6f91\u6f92\u6f93\u6f94\u6f95\u6f96\u6f97\u6f98\u6f99\u6f9a\u6f9b\u6f9c\u6f9d\u6f9e\u6f9f\u6fa0\u6fa1\u6fa2\u6fa3\u6fa4\u6fa5\u6fa6\u6fa7\u6fa8\u6fa9\u6faa\u6fab\u6fac\u6fad\u6fae\u6faf\u6fb0\u6fb1\u6fb2\u6fb3\u6fb4\u6fb5\u6fb6\u6fb7\u6fb8\u6fb9\u6fba\u6fbb\u6fbc\u6fbd\u6fbe\u6fbf\u6fc0\u6fc1\u6fc2\u6fc3\u6fc4\u6fc5\u6fc6\u6fc7\u6fc8\u6fc9\u6fca\u6fcb\u6fcc\u6fcd\u6fce\u6fcf\u6fd0\u6fd1\u6fd2\u6fd3\u6fd4\u6fd5\u6fd6\u6fd7\u6fd8\u6fd9\u6fda\u6fdb\u6fdc\u6fdd\u6fde\u6fdf\u6fe0\u6fe1\u6fe2\u6fe3\u6fe4\u6fe5\u6fe6\u6fe7\u6fe8\u6fe9\u6fea\u6feb\u6fec\u6fed\u6fee\u6fef\u6ff0\u6ff1\u6ff2\u6ff3\u6ff4\u6ff5\u6ff6\u6ff7\u6ff8\u6ff9\u6ffa\u6ffb\u6ffc\u6ffd\u6ffe\u6fff\u7000\u7001\u7002\u7003\u7004\u7005\u7006\u7007\u7008\u7009\u700a\u700b\u700c\u700d\u700e\u700f\u7010\u7011\u7012\u7013\u7014\u7015\u7016\u7017\u7018\u7019\u701a\u701b\u701c\u701d\u701e\u701f\u7020\u7021\u7022\u7023\u7024\u7025\u7026\u7027\u7028\u7029\u702a\u702b\u702c\u702d\u702e\u702f\u7030\u7031\u7032\u7033\u7034\u7035\u7036\u7037\u7038\u7039\u703a\u703b\u703c\u703d\u703e\u703f\u7040\u7041\u7042\u7043\u7044\u7045\u7046\u7047\u7048\u7049\u704a\u704b\u704c\u704d\u704e\u704f\u7050\u7051\u7052\u7053\u7054\u7055\u7056\u7057\u7058\u7059\u705a\u705b\u705c\u705d\u705e\u705f\u7060\u7061\u7062\u7063\u7064\u7065\u7066\u7067\u7068\u7069\u706a\u706b\u706c\u706d\u706e\u706f\u7070\u7071\u7072\u7073\u7074\u7075\u7076\u7077\u7078\u7079\u707a\u707b\u707c\u707d\u707e\u707f\u7080\u7081\u7082\u7083\u7084\u7085\u7086\u7087\u7088\u7089\u708a\u708b\u708c\u708d\u708e\u708f\u7090\u7091\u7092\u7093\u7094\u7095\u7096\u7097\u7098\u7099\u709a\u709b\u709c\u709d\u709e\u709f\u70a0\u70a1\u70a2\u70a3\u70a4\u70a5\u70a6\u70a7\u70a8\u70a9\u70aa\u70ab\u70ac\u70ad\u70ae\u70af\u70b0\u70b1\u70b2\u70b3\u70b4\u70b5\u70b6\u70b7\u70b8\u70b9\u70ba\u70bb\u70bc\u70bd\u70be\u70bf\u70c0\u70c1\u70c2\u70c3\u70c4\u70c5\u70c6\u70c7\u70c8\u70c9\u70ca\u70cb\u70cc\u70cd\u70ce\u70cf\u70d0\u70d1\u70d2\u70d3\u70d4\u70d5\u70d6\u70d7\u70d8\u70d9\u70da\u70db\u70dc\u70dd\u70de\u70df\u70e0\u70e1\u70e2\u70e3\u70e4\u70e5\u70e6\u70e7\u70e8\u70e9\u70ea\u70eb\u70ec\u70ed\u70ee\u70ef\u70f0\u70f1\u70f2\u70f3\u70f4\u70f5\u70f6\u70f7\u70f8\u70f9\u70fa\u70fb\u70fc\u70fd\u70fe\u70ff\u7100\u7101\u7102\u7103\u7104\u7105\u7106\u7107\u7108\u7109\u710a\u710b\u710c\u710d\u710e\u710f\u7110\u7111\u7112\u7113\u7114\u7115\u7116\u7117\u7118\u7119\u711a\u711b\u711c\u711d\u711e\u711f\u7120\u7121\u7122\u7123\u7124\u7125\u7126\u7127\u7128\u7129\u712a\u712b\u712c\u712d\u712e\u712f\u7130\u7131\u7132\u7133\u7134\u7135\u7136\u7137\u7138\u7139\u713a\u713b\u713c\u713d\u713e\u713f\u7140\u7141\u7142\u7143\u7144\u7145\u7146\u7147\u7148\u7149\u714a\u714b\u714c\u714d\u714e\u714f\u7150\u7151\u7152\u7153\u7154\u7155\u7156\u7157\u7158\u7159\u715a\u715b\u715c\u715d\u715e\u715f\u7160\u7161\u7162\u7163\u7164\u7165\u7166\u7167\u7168\u7169\u716a\u716b\u716c\u716d\u716e\u716f\u7170\u7171\u7172\u7173\u7174\u7175\u7176\u7177\u7178\u7179\u717a\u717b\u717c\u717d\u717e\u717f\u7180\u7181\u7182\u7183\u7184\u7185\u7186\u7187\u7188\u7189\u718a\u718b\u718c\u718d\u718e\u718f\u7190\u7191\u7192\u7193\u7194\u7195\u7196\u7197\u7198\u7199\u719a\u719b\u719c\u719d\u719e\u719f\u71a0\u71a1\u71a2\u71a3\u71a4\u71a5\u71a6\u71a7\u71a8\u71a9\u71aa\u71ab\u71ac\u71ad\u71ae\u71af\u71b0\u71b1\u71b2\u71b3\u71b4\u71b5\u71b6\u71b7\u71b8\u71b9\u71ba\u71bb\u71bc\u71bd\u71be\u71bf\u71c0\u71c1\u71c2\u71c3\u71c4\u71c5\u71c6\u71c7\u71c8\u71c9\u71ca\u71cb\u71cc\u71cd\u71ce\u71cf\u71d0\u71d1\u71d2\u71d3\u71d4\u71d5\u71d6\u71d7\u71d8\u71d9\u71da\u71db\u71dc\u71dd\u71de\u71df\u71e0\u71e1\u71e2\u71e3\u71e4\u71e5\u71e6\u71e7\u71e8\u71e9\u71ea\u71eb\u71ec\u71ed\u71ee\u71ef\u71f0\u71f1\u71f2\u71f3\u71f4\u71f5\u71f6\u71f7\u71f8\u71f9\u71fa\u71fb\u71fc\u71fd\u71fe\u71ff\u7200\u7201\u7202\u7203\u7204\u7205\u7206\u7207\u7208\u7209\u720a\u720b\u720c\u720d\u720e\u720f\u7210\u7211\u7212\u7213\u7214\u7215\u7216\u7217\u7218\u7219\u721a\u721b\u721c\u721d\u721e\u721f\u7220\u7221\u7222\u7223\u7224\u7225\u7226\u7227\u7228\u7229\u722a\u722b\u722c\u722d\u722e\u722f\u7230\u7231\u7232\u7233\u7234\u7235\u7236\u7237\u7238\u7239\u723a\u723b\u723c\u723d\u723e\u723f\u7240\u7241\u7242\u7243\u7244\u7245\u7246\u7247\u7248\u7249\u724a\u724b\u724c\u724d\u724e\u724f\u7250\u7251\u7252\u7253\u7254\u7255\u7256\u7257\u7258\u7259\u725a\u725b\u725c\u725d\u725e\u725f\u7260\u7261\u7262\u7263\u7264\u7265\u7266\u7267\u7268\u7269\u726a\u726b\u726c\u726d\u726e\u726f\u7270\u7271\u7272\u7273\u7274\u7275\u7276\u7277\u7278\u7279\u727a\u727b\u727c\u727d\u727e\u727f\u7280\u7281\u7282\u7283\u7284\u7285\u7286\u7287\u7288\u7289\u728a\u728b\u728c\u728d\u728e\u728f\u7290\u7291\u7292\u7293\u7294\u7295\u7296\u7297\u7298\u7299\u729a\u729b\u729c\u729d\u729e\u729f\u72a0\u72a1\u72a2\u72a3\u72a4\u72a5\u72a6\u72a7\u72a8\u72a9\u72aa\u72ab\u72ac\u72ad\u72ae\u72af\u72b0\u72b1\u72b2\u72b3\u72b4\u72b5\u72b6\u72b7\u72b8\u72b9\u72ba\u72bb\u72bc\u72bd\u72be\u72bf\u72c0\u72c1\u72c2\u72c3\u72c4\u72c5\u72c6\u72c7\u72c8\u72c9\u72ca\u72cb\u72cc\u72cd\u72ce\u72cf\u72d0\u72d1\u72d2\u72d3\u72d4\u72d5\u72d6\u72d7\u72d8\u72d9\u72da\u72db\u72dc\u72dd\u72de\u72df\u72e0\u72e1\u72e2\u72e3\u72e4\u72e5\u72e6\u72e7\u72e8\u72e9\u72ea\u72eb\u72ec\u72ed\u72ee\u72ef\u72f0\u72f1\u72f2\u72f3\u72f4\u72f5\u72f6\u72f7\u72f8\u72f9\u72fa\u72fb\u72fc\u72fd\u72fe\u72ff\u7300\u7301\u7302\u7303\u7304\u7305\u7306\u7307\u7308\u7309\u730a\u730b\u730c\u730d\u730e\u730f\u7310\u7311\u7312\u7313\u7314\u7315\u7316\u7317\u7318\u7319\u731a\u731b\u731c\u731d\u731e\u731f\u7320\u7321\u7322\u7323\u7324\u7325\u7326\u7327\u7328\u7329\u732a\u732b\u732c\u732d\u732e\u732f\u7330\u7331\u7332\u7333\u7334\u7335\u7336\u7337\u7338\u7339\u733a\u733b\u733c\u733d\u733e\u733f\u7340\u7341\u7342\u7343\u7344\u7345\u7346\u7347\u7348\u7349\u734a\u734b\u734c\u734d\u734e\u734f\u7350\u7351\u7352\u7353\u7354\u7355\u7356\u7357\u7358\u7359\u735a\u735b\u735c\u735d\u735e\u735f\u7360\u7361\u7362\u7363\u7364\u7365\u7366\u7367\u7368\u7369\u736a\u736b\u736c\u736d\u736e\u736f\u7370\u7371\u7372\u7373\u7374\u7375\u7376\u7377\u7378\u7379\u737a\u737b\u737c\u737d\u737e\u737f\u7380\u7381\u7382\u7383\u7384\u7385\u7386\u7387\u7388\u7389\u738a\u738b\u738c\u738d\u738e\u738f\u7390\u7391\u7392\u7393\u7394\u7395\u7396\u7397\u7398\u7399\u739a\u739b\u739c\u739d\u739e\u739f\u73a0\u73a1\u73a2\u73a3\u73a4\u73a5\u73a6\u73a7\u73a8\u73a9\u73aa\u73ab\u73ac\u73ad\u73ae\u73af\u73b0\u73b1\u73b2\u73b3\u73b4\u73b5\u73b6\u73b7\u73b8\u73b9\u73ba\u73bb\u73bc\u73bd\u73be\u73bf\u73c0\u73c1\u73c2\u73c3\u73c4\u73c5\u73c6\u73c7\u73c8\u73c9\u73ca\u73cb\u73cc\u73cd\u73ce\u73cf\u73d0\u73d1\u73d2\u73d3\u73d4\u73d5\u73d6\u73d7\u73d8\u73d9\u73da\u73db\u73dc\u73dd\u73de\u73df\u73e0\u73e1\u73e2\u73e3\u73e4\u73e5\u73e6\u73e7\u73e8\u73e9\u73ea\u73eb\u73ec\u73ed\u73ee\u73ef\u73f0\u73f1\u73f2\u73f3\u73f4\u73f5\u73f6\u73f7\u73f8\u73f9\u73fa\u73fb\u73fc\u73fd\u73fe\u73ff\u7400\u7401\u7402\u7403\u7404\u7405\u7406\u7407\u7408\u7409\u740a\u740b\u740c\u740d\u740e\u740f\u7410\u7411\u7412\u7413\u7414\u7415\u7416\u7417\u7418\u7419\u741a\u741b\u741c\u741d\u741e\u741f\u7420\u7421\u7422\u7423\u7424\u7425\u7426\u7427\u7428\u7429\u742a\u742b\u742c\u742d\u742e\u742f\u7430\u7431\u7432\u7433\u7434\u7435\u7436\u7437\u7438\u7439\u743a\u743b\u743c\u743d\u743e\u743f\u7440\u7441\u7442\u7443\u7444\u7445\u7446\u7447\u7448\u7449\u744a\u744b\u744c\u744d\u744e\u744f\u7450\u7451\u7452\u7453\u7454\u7455\u7456\u7457\u7458\u7459\u745a\u745b\u745c\u745d\u745e\u745f\u7460\u7461\u7462\u7463\u7464\u7465\u7466\u7467\u7468\u7469\u746a\u746b\u746c\u746d\u746e\u746f\u7470\u7471\u7472\u7473\u7474\u7475\u7476\u7477\u7478\u7479\u747a\u747b\u747c\u747d\u747e\u747f\u7480\u7481\u7482\u7483\u7484\u7485\u7486\u7487\u7488\u7489\u748a\u748b\u748c\u748d\u748e\u748f\u7490\u7491\u7492\u7493\u7494\u7495\u7496\u7497\u7498\u7499\u749a\u749b\u749c\u749d\u749e\u749f\u74a0\u74a1\u74a2\u74a3\u74a4\u74a5\u74a6\u74a7\u74a8\u74a9\u74aa\u74ab\u74ac\u74ad\u74ae\u74af\u74b0\u74b1\u74b2\u74b3\u74b4\u74b5\u74b6\u74b7\u74b8\u74b9\u74ba\u74bb\u74bc\u74bd\u74be\u74bf\u74c0\u74c1\u74c2\u74c3\u74c4\u74c5\u74c6\u74c7\u74c8\u74c9\u74ca\u74cb\u74cc\u74cd\u74ce\u74cf\u74d0\u74d1\u74d2\u74d3\u74d4\u74d5\u74d6\u74d7\u74d8\u74d9\u74da\u74db\u74dc\u74dd\u74de\u74df\u74e0\u74e1\u74e2\u74e3\u74e4\u74e5\u74e6\u74e7\u74e8\u74e9\u74ea\u74eb\u74ec\u74ed\u74ee\u74ef\u74f0\u74f1\u74f2\u74f3\u74f4\u74f5\u74f6\u74f7\u74f8\u74f9\u74fa\u74fb\u74fc\u74fd\u74fe\u74ff\u7500\u7501\u7502\u7503\u7504\u7505\u7506\u7507\u7508\u7509\u750a\u750b\u750c\u750d\u750e\u750f\u7510\u7511\u7512\u7513\u7514\u7515\u7516\u7517\u7518\u7519\u751a\u751b\u751c\u751d\u751e\u751f\u7520\u7521\u7522\u7523\u7524\u7525\u7526\u7527\u7528\u7529\u752a\u752b\u752c\u752d\u752e\u752f\u7530\u7531\u7532\u7533\u7534\u7535\u7536\u7537\u7538\u7539\u753a\u753b\u753c\u753d\u753e\u753f\u7540\u7541\u7542\u7543\u7544\u7545\u7546\u7547\u7548\u7549\u754a\u754b\u754c\u754d\u754e\u754f\u7550\u7551\u7552\u7553\u7554\u7555\u7556\u7557\u7558\u7559\u755a\u755b\u755c\u755d\u755e\u755f\u7560\u7561\u7562\u7563\u7564\u7565\u7566\u7567\u7568\u7569\u756a\u756b\u756c\u756d\u756e\u756f\u7570\u7571\u7572\u7573\u7574\u7575\u7576\u7577\u7578\u7579\u757a\u757b\u757c\u757d\u757e\u757f\u7580\u7581\u7582\u7583\u7584\u7585\u7586\u7587\u7588\u7589\u758a\u758b\u758c\u758d\u758e\u758f\u7590\u7591\u7592\u7593\u7594\u7595\u7596\u7597\u7598\u7599\u759a\u759b\u759c\u759d\u759e\u759f\u75a0\u75a1\u75a2\u75a3\u75a4\u75a5\u75a6\u75a7\u75a8\u75a9\u75aa\u75ab\u75ac\u75ad\u75ae\u75af\u75b0\u75b1\u75b2\u75b3\u75b4\u75b5\u75b6\u75b7\u75b8\u75b9\u75ba\u75bb\u75bc\u75bd\u75be\u75bf\u75c0\u75c1\u75c2\u75c3\u75c4\u75c5\u75c6\u75c7\u75c8\u75c9\u75ca\u75cb\u75cc\u75cd\u75ce\u75cf\u75d0\u75d1\u75d2\u75d3\u75d4\u75d5\u75d6\u75d7\u75d8\u75d9\u75da\u75db\u75dc\u75dd\u75de\u75df\u75e0\u75e1\u75e2\u75e3\u75e4\u75e5\u75e6\u75e7\u75e8\u75e9\u75ea\u75eb\u75ec\u75ed\u75ee\u75ef\u75f0\u75f1\u75f2\u75f3\u75f4\u75f5\u75f6\u75f7\u75f8\u75f9\u75fa\u75fb\u75fc\u75fd\u75fe\u75ff\u7600\u7601\u7602\u7603\u7604\u7605\u7606\u7607\u7608\u7609\u760a\u760b\u760c\u760d\u760e\u760f\u7610\u7611\u7612\u7613\u7614\u7615\u7616\u7617\u7618\u7619\u761a\u761b\u761c\u761d\u761e\u761f\u7620\u7621\u7622\u7623\u7624\u7625\u7626\u7627\u7628\u7629\u762a\u762b\u762c\u762d\u762e\u762f\u7630\u7631\u7632\u7633\u7634\u7635\u7636\u7637\u7638\u7639\u763a\u763b\u763c\u763d\u763e\u763f\u7640\u7641\u7642\u7643\u7644\u7645\u7646\u7647\u7648\u7649\u764a\u764b\u764c\u764d\u764e\u764f\u7650\u7651\u7652\u7653\u7654\u7655\u7656\u7657\u7658\u7659\u765a\u765b\u765c\u765d\u765e\u765f\u7660\u7661\u7662\u7663\u7664\u7665\u7666\u7667\u7668\u7669\u766a\u766b\u766c\u766d\u766e\u766f\u7670\u7671\u7672\u7673\u7674\u7675\u7676\u7677\u7678\u7679\u767a\u767b\u767c\u767d\u767e\u767f\u7680\u7681\u7682\u7683\u7684\u7685\u7686\u7687\u7688\u7689\u768a\u768b\u768c\u768d\u768e\u768f\u7690\u7691\u7692\u7693\u7694\u7695\u7696\u7697\u7698\u7699\u769a\u769b\u769c\u769d\u769e\u769f\u76a0\u76a1\u76a2\u76a3\u76a4\u76a5\u76a6\u76a7\u76a8\u76a9\u76aa\u76ab\u76ac\u76ad\u76ae\u76af\u76b0\u76b1\u76b2\u76b3\u76b4\u76b5\u76b6\u76b7\u76b8\u76b9\u76ba\u76bb\u76bc\u76bd\u76be\u76bf\u76c0\u76c1\u76c2\u76c3\u76c4\u76c5\u76c6\u76c7\u76c8\u76c9\u76ca\u76cb\u76cc\u76cd\u76ce\u76cf\u76d0\u76d1\u76d2\u76d3\u76d4\u76d5\u76d6\u76d7\u76d8\u76d9\u76da\u76db\u76dc\u76dd\u76de\u76df\u76e0\u76e1\u76e2\u76e3\u76e4\u76e5\u76e6\u76e7\u76e8\u76e9\u76ea\u76eb\u76ec\u76ed\u76ee\u76ef\u76f0\u76f1\u76f2\u76f3\u76f4\u76f5\u76f6\u76f7\u76f8\u76f9\u76fa\u76fb\u76fc\u76fd\u76fe\u76ff\u7700\u7701\u7702\u7703\u7704\u7705\u7706\u7707\u7708\u7709\u770a\u770b\u770c\u770d\u770e\u770f\u7710\u7711\u7712\u7713\u7714\u7715\u7716\u7717\u7718\u7719\u771a\u771b\u771c\u771d\u771e\u771f\u7720\u7721\u7722\u7723\u7724\u7725\u7726\u7727\u7728\u7729\u772a\u772b\u772c\u772d\u772e\u772f\u7730\u7731\u7732\u7733\u7734\u7735\u7736\u7737\u7738\u7739\u773a\u773b\u773c\u773d\u773e\u773f\u7740\u7741\u7742\u7743\u7744\u7745\u7746\u7747\u7748\u7749\u774a\u774b\u774c\u774d\u774e\u774f\u7750\u7751\u7752\u7753\u7754\u7755\u7756\u7757\u7758\u7759\u775a\u775b\u775c\u775d\u775e\u775f\u7760\u7761\u7762\u7763\u7764\u7765\u7766\u7767\u7768\u7769\u776a\u776b\u776c\u776d\u776e\u776f\u7770\u7771\u7772\u7773\u7774\u7775\u7776\u7777\u7778\u7779\u777a\u777b\u777c\u777d\u777e\u777f\u7780\u7781\u7782\u7783\u7784\u7785\u7786\u7787\u7788\u7789\u778a\u778b\u778c\u778d\u778e\u778f\u7790\u7791\u7792\u7793\u7794\u7795\u7796\u7797\u7798\u7799\u779a\u779b\u779c\u779d\u779e\u779f\u77a0\u77a1\u77a2\u77a3\u77a4\u77a5\u77a6\u77a7\u77a8\u77a9\u77aa\u77ab\u77ac\u77ad\u77ae\u77af\u77b0\u77b1\u77b2\u77b3\u77b4\u77b5\u77b6\u77b7\u77b8\u77b9\u77ba\u77bb\u77bc\u77bd\u77be\u77bf\u77c0\u77c1\u77c2\u77c3\u77c4\u77c5\u77c6\u77c7\u77c8\u77c9\u77ca\u77cb\u77cc\u77cd\u77ce\u77cf\u77d0\u77d1\u77d2\u77d3\u77d4\u77d5\u77d6\u77d7\u77d8\u77d9\u77da\u77db\u77dc\u77dd\u77de\u77df\u77e0\u77e1\u77e2\u77e3\u77e4\u77e5\u77e6\u77e7\u77e8\u77e9\u77ea\u77eb\u77ec\u77ed\u77ee\u77ef\u77f0\u77f1\u77f2\u77f3\u77f4\u77f5\u77f6\u77f7\u77f8\u77f9\u77fa\u77fb\u77fc\u77fd\u77fe\u77ff\u7800\u7801\u7802\u7803\u7804\u7805\u7806\u7807\u7808\u7809\u780a\u780b\u780c\u780d\u780e\u780f\u7810\u7811\u7812\u7813\u7814\u7815\u7816\u7817\u7818\u7819\u781a\u781b\u781c\u781d\u781e\u781f\u7820\u7821\u7822\u7823\u7824\u7825\u7826\u7827\u7828\u7829\u782a\u782b\u782c\u782d\u782e\u782f\u7830\u7831\u7832\u7833\u7834\u7835\u7836\u7837\u7838\u7839\u783a\u783b\u783c\u783d\u783e\u783f\u7840\u7841\u7842\u7843\u7844\u7845\u7846\u7847\u7848\u7849\u784a\u784b\u784c\u784d\u784e\u784f\u7850\u7851\u7852\u7853\u7854\u7855\u7856\u7857\u7858\u7859\u785a\u785b\u785c\u785d\u785e\u785f\u7860\u7861\u7862\u7863\u7864\u7865\u7866\u7867\u7868\u7869\u786a\u786b\u786c\u786d\u786e\u786f\u7870\u7871\u7872\u7873\u7874\u7875\u7876\u7877\u7878\u7879\u787a\u787b\u787c\u787d\u787e\u787f\u7880\u7881\u7882\u7883\u7884\u7885\u7886\u7887\u7888\u7889\u788a\u788b\u788c\u788d\u788e\u788f\u7890\u7891\u7892\u7893\u7894\u7895\u7896\u7897\u7898\u7899\u789a\u789b\u789c\u789d\u789e\u789f\u78a0\u78a1\u78a2\u78a3\u78a4\u78a5\u78a6\u78a7\u78a8\u78a9\u78aa\u78ab\u78ac\u78ad\u78ae\u78af\u78b0\u78b1\u78b2\u78b3\u78b4\u78b5\u78b6\u78b7\u78b8\u78b9\u78ba\u78bb\u78bc\u78bd\u78be\u78bf\u78c0\u78c1\u78c2\u78c3\u78c4\u78c5\u78c6\u78c7\u78c8\u78c9\u78ca\u78cb\u78cc\u78cd\u78ce\u78cf\u78d0\u78d1\u78d2\u78d3\u78d4\u78d5\u78d6\u78d7\u78d8\u78d9\u78da\u78db\u78dc\u78dd\u78de\u78df\u78e0\u78e1\u78e2\u78e3\u78e4\u78e5\u78e6\u78e7\u78e8\u78e9\u78ea\u78eb\u78ec\u78ed\u78ee\u78ef\u78f0\u78f1\u78f2\u78f3\u78f4\u78f5\u78f6\u78f7\u78f8\u78f9\u78fa\u78fb\u78fc\u78fd\u78fe\u78ff\u7900\u7901\u7902\u7903\u7904\u7905\u7906\u7907\u7908\u7909\u790a\u790b\u790c\u790d\u790e\u790f\u7910\u7911\u7912\u7913\u7914\u7915\u7916\u7917\u7918\u7919\u791a\u791b\u791c\u791d\u791e\u791f\u7920\u7921\u7922\u7923\u7924\u7925\u7926\u7927\u7928\u7929\u792a\u792b\u792c\u792d\u792e\u792f\u7930\u7931\u7932\u7933\u7934\u7935\u7936\u7937\u7938\u7939\u793a\u793b\u793c\u793d\u793e\u793f\u7940\u7941\u7942\u7943\u7944\u7945\u7946\u7947\u7948\u7949\u794a\u794b\u794c\u794d\u794e\u794f\u7950\u7951\u7952\u7953\u7954\u7955\u7956\u7957\u7958\u7959\u795a\u795b\u795c\u795d\u795e\u795f\u7960\u7961\u7962\u7963\u7964\u7965\u7966\u7967\u7968\u7969\u796a\u796b\u796c\u796d\u796e\u796f\u7970\u7971\u7972\u7973\u7974\u7975\u7976\u7977\u7978\u7979\u797a\u797b\u797c\u797d\u797e\u797f\u7980\u7981\u7982\u7983\u7984\u7985\u7986\u7987\u7988\u7989\u798a\u798b\u798c\u798d\u798e\u798f\u7990\u7991\u7992\u7993\u7994\u7995\u7996\u7997\u7998\u7999\u799a\u799b\u799c\u799d\u799e\u799f\u79a0\u79a1\u79a2\u79a3\u79a4\u79a5\u79a6\u79a7\u79a8\u79a9\u79aa\u79ab\u79ac\u79ad\u79ae\u79af\u79b0\u79b1\u79b2\u79b3\u79b4\u79b5\u79b6\u79b7\u79b8\u79b9\u79ba\u79bb\u79bc\u79bd\u79be\u79bf\u79c0\u79c1\u79c2\u79c3\u79c4\u79c5\u79c6\u79c7\u79c8\u79c9\u79ca\u79cb\u79cc\u79cd\u79ce\u79cf\u79d0\u79d1\u79d2\u79d3\u79d4\u79d5\u79d6\u79d7\u79d8\u79d9\u79da\u79db\u79dc\u79dd\u79de\u79df\u79e0\u79e1\u79e2\u79e3\u79e4\u79e5\u79e6\u79e7\u79e8\u79e9\u79ea\u79eb\u79ec\u79ed\u79ee\u79ef\u79f0\u79f1\u79f2\u79f3\u79f4\u79f5\u79f6\u79f7\u79f8\u79f9\u79fa\u79fb\u79fc\u79fd\u79fe\u79ff\u7a00\u7a01\u7a02\u7a03\u7a04\u7a05\u7a06\u7a07\u7a08\u7a09\u7a0a\u7a0b\u7a0c\u7a0d\u7a0e\u7a0f\u7a10\u7a11\u7a12\u7a13\u7a14\u7a15\u7a16\u7a17\u7a18\u7a19\u7a1a\u7a1b\u7a1c\u7a1d\u7a1e\u7a1f\u7a20\u7a21\u7a22\u7a23\u7a24\u7a25\u7a26\u7a27\u7a28\u7a29\u7a2a\u7a2b\u7a2c\u7a2d\u7a2e\u7a2f\u7a30\u7a31\u7a32\u7a33\u7a34\u7a35\u7a36\u7a37\u7a38\u7a39\u7a3a\u7a3b\u7a3c\u7a3d\u7a3e\u7a3f\u7a40\u7a41\u7a42\u7a43\u7a44\u7a45\u7a46\u7a47\u7a48\u7a49\u7a4a\u7a4b\u7a4c\u7a4d\u7a4e\u7a4f\u7a50\u7a51\u7a52\u7a53\u7a54\u7a55\u7a56\u7a57\u7a58\u7a59\u7a5a\u7a5b\u7a5c\u7a5d\u7a5e\u7a5f\u7a60\u7a61\u7a62\u7a63\u7a64\u7a65\u7a66\u7a67\u7a68\u7a69\u7a6a\u7a6b\u7a6c\u7a6d\u7a6e\u7a6f\u7a70\u7a71\u7a72\u7a73\u7a74\u7a75\u7a76\u7a77\u7a78\u7a79\u7a7a\u7a7b\u7a7c\u7a7d\u7a7e\u7a7f\u7a80\u7a81\u7a82\u7a83\u7a84\u7a85\u7a86\u7a87\u7a88\u7a89\u7a8a\u7a8b\u7a8c\u7a8d\u7a8e\u7a8f\u7a90\u7a91\u7a92\u7a93\u7a94\u7a95\u7a96\u7a97\u7a98\u7a99\u7a9a\u7a9b\u7a9c\u7a9d\u7a9e\u7a9f\u7aa0\u7aa1\u7aa2\u7aa3\u7aa4\u7aa5\u7aa6\u7aa7\u7aa8\u7aa9\u7aaa\u7aab\u7aac\u7aad\u7aae\u7aaf\u7ab0\u7ab1\u7ab2\u7ab3\u7ab4\u7ab5\u7ab6\u7ab7\u7ab8\u7ab9\u7aba\u7abb\u7abc\u7abd\u7abe\u7abf\u7ac0\u7ac1\u7ac2\u7ac3\u7ac4\u7ac5\u7ac6\u7ac7\u7ac8\u7ac9\u7aca\u7acb\u7acc\u7acd\u7ace\u7acf\u7ad0\u7ad1\u7ad2\u7ad3\u7ad4\u7ad5\u7ad6\u7ad7\u7ad8\u7ad9\u7ada\u7adb\u7adc\u7add\u7ade\u7adf\u7ae0\u7ae1\u7ae2\u7ae3\u7ae4\u7ae5\u7ae6\u7ae7\u7ae8\u7ae9\u7aea\u7aeb\u7aec\u7aed\u7aee\u7aef\u7af0\u7af1\u7af2\u7af3\u7af4\u7af5\u7af6\u7af7\u7af8\u7af9\u7afa\u7afb\u7afc\u7afd\u7afe\u7aff\u7b00\u7b01\u7b02\u7b03\u7b04\u7b05\u7b06\u7b07\u7b08\u7b09\u7b0a\u7b0b\u7b0c\u7b0d\u7b0e\u7b0f\u7b10\u7b11\u7b12\u7b13\u7b14\u7b15\u7b16\u7b17\u7b18\u7b19\u7b1a\u7b1b\u7b1c\u7b1d\u7b1e\u7b1f\u7b20\u7b21\u7b22\u7b23\u7b24\u7b25\u7b26\u7b27\u7b28\u7b29\u7b2a\u7b2b\u7b2c\u7b2d\u7b2e\u7b2f\u7b30\u7b31\u7b32\u7b33\u7b34\u7b35\u7b36\u7b37\u7b38\u7b39\u7b3a\u7b3b\u7b3c\u7b3d\u7b3e\u7b3f\u7b40\u7b41\u7b42\u7b43\u7b44\u7b45\u7b46\u7b47\u7b48\u7b49\u7b4a\u7b4b\u7b4c\u7b4d\u7b4e\u7b4f\u7b50\u7b51\u7b52\u7b53\u7b54\u7b55\u7b56\u7b57\u7b58\u7b59\u7b5a\u7b5b\u7b5c\u7b5d\u7b5e\u7b5f\u7b60\u7b61\u7b62\u7b63\u7b64\u7b65\u7b66\u7b67\u7b68\u7b69\u7b6a\u7b6b\u7b6c\u7b6d\u7b6e\u7b6f\u7b70\u7b71\u7b72\u7b73\u7b74\u7b75\u7b76\u7b77\u7b78\u7b79\u7b7a\u7b7b\u7b7c\u7b7d\u7b7e\u7b7f\u7b80\u7b81\u7b82\u7b83\u7b84\u7b85\u7b86\u7b87\u7b88\u7b89\u7b8a\u7b8b\u7b8c\u7b8d\u7b8e\u7b8f\u7b90\u7b91\u7b92\u7b93\u7b94\u7b95\u7b96\u7b97\u7b98\u7b99\u7b9a\u7b9b\u7b9c\u7b9d\u7b9e\u7b9f\u7ba0\u7ba1\u7ba2\u7ba3\u7ba4\u7ba5\u7ba6\u7ba7\u7ba8\u7ba9\u7baa\u7bab\u7bac\u7bad\u7bae\u7baf\u7bb0\u7bb1\u7bb2\u7bb3\u7bb4\u7bb5\u7bb6\u7bb7\u7bb8\u7bb9\u7bba\u7bbb\u7bbc\u7bbd\u7bbe\u7bbf\u7bc0\u7bc1\u7bc2\u7bc3\u7bc4\u7bc5\u7bc6\u7bc7\u7bc8\u7bc9\u7bca\u7bcb\u7bcc\u7bcd\u7bce\u7bcf\u7bd0\u7bd1\u7bd2\u7bd3\u7bd4\u7bd5\u7bd6\u7bd7\u7bd8\u7bd9\u7bda\u7bdb\u7bdc\u7bdd\u7bde\u7bdf\u7be0\u7be1\u7be2\u7be3\u7be4\u7be5\u7be6\u7be7\u7be8\u7be9\u7bea\u7beb\u7bec\u7bed\u7bee\u7bef\u7bf0\u7bf1\u7bf2\u7bf3\u7bf4\u7bf5\u7bf6\u7bf7\u7bf8\u7bf9\u7bfa\u7bfb\u7bfc\u7bfd\u7bfe\u7bff\u7c00\u7c01\u7c02\u7c03\u7c04\u7c05\u7c06\u7c07\u7c08\u7c09\u7c0a\u7c0b\u7c0c\u7c0d\u7c0e\u7c0f\u7c10\u7c11\u7c12\u7c13\u7c14\u7c15\u7c16\u7c17\u7c18\u7c19\u7c1a\u7c1b\u7c1c\u7c1d\u7c1e\u7c1f\u7c20\u7c21\u7c22\u7c23\u7c24\u7c25\u7c26\u7c27\u7c28\u7c29\u7c2a\u7c2b\u7c2c\u7c2d\u7c2e\u7c2f\u7c30\u7c31\u7c32\u7c33\u7c34\u7c35\u7c36\u7c37\u7c38\u7c39\u7c3a\u7c3b\u7c3c\u7c3d\u7c3e\u7c3f\u7c40\u7c41\u7c42\u7c43\u7c44\u7c45\u7c46\u7c47\u7c48\u7c49\u7c4a\u7c4b\u7c4c\u7c4d\u7c4e\u7c4f\u7c50\u7c51\u7c52\u7c53\u7c54\u7c55\u7c56\u7c57\u7c58\u7c59\u7c5a\u7c5b\u7c5c\u7c5d\u7c5e\u7c5f\u7c60\u7c61\u7c62\u7c63\u7c64\u7c65\u7c66\u7c67\u7c68\u7c69\u7c6a\u7c6b\u7c6c\u7c6d\u7c6e\u7c6f\u7c70\u7c71\u7c72\u7c73\u7c74\u7c75\u7c76\u7c77\u7c78\u7c79\u7c7a\u7c7b\u7c7c\u7c7d\u7c7e\u7c7f\u7c80\u7c81\u7c82\u7c83\u7c84\u7c85\u7c86\u7c87\u7c88\u7c89\u7c8a\u7c8b\u7c8c\u7c8d\u7c8e\u7c8f\u7c90\u7c91\u7c92\u7c93\u7c94\u7c95\u7c96\u7c97\u7c98\u7c99\u7c9a\u7c9b\u7c9c\u7c9d\u7c9e\u7c9f\u7ca0\u7ca1\u7ca2\u7ca3\u7ca4\u7ca5\u7ca6\u7ca7\u7ca8\u7ca9\u7caa\u7cab\u7cac\u7cad\u7cae\u7caf\u7cb0\u7cb1\u7cb2\u7cb3\u7cb4\u7cb5\u7cb6\u7cb7\u7cb8\u7cb9\u7cba\u7cbb\u7cbc\u7cbd\u7cbe\u7cbf\u7cc0\u7cc1\u7cc2\u7cc3\u7cc4\u7cc5\u7cc6\u7cc7\u7cc8\u7cc9\u7cca\u7ccb\u7ccc\u7ccd\u7cce\u7ccf\u7cd0\u7cd1\u7cd2\u7cd3\u7cd4\u7cd5\u7cd6\u7cd7\u7cd8\u7cd9\u7cda\u7cdb\u7cdc\u7cdd\u7cde\u7cdf\u7ce0\u7ce1\u7ce2\u7ce3\u7ce4\u7ce5\u7ce6\u7ce7\u7ce8\u7ce9\u7cea\u7ceb\u7cec\u7ced\u7cee\u7cef\u7cf0\u7cf1\u7cf2\u7cf3\u7cf4\u7cf5\u7cf6\u7cf7\u7cf8\u7cf9\u7cfa\u7cfb\u7cfc\u7cfd\u7cfe\u7cff\u7d00\u7d01\u7d02\u7d03\u7d04\u7d05\u7d06\u7d07\u7d08\u7d09\u7d0a\u7d0b\u7d0c\u7d0d\u7d0e\u7d0f\u7d10\u7d11\u7d12\u7d13\u7d14\u7d15\u7d16\u7d17\u7d18\u7d19\u7d1a\u7d1b\u7d1c\u7d1d\u7d1e\u7d1f\u7d20\u7d21\u7d22\u7d23\u7d24\u7d25\u7d26\u7d27\u7d28\u7d29\u7d2a\u7d2b\u7d2c\u7d2d\u7d2e\u7d2f\u7d30\u7d31\u7d32\u7d33\u7d34\u7d35\u7d36\u7d37\u7d38\u7d39\u7d3a\u7d3b\u7d3c\u7d3d\u7d3e\u7d3f\u7d40\u7d41\u7d42\u7d43\u7d44\u7d45\u7d46\u7d47\u7d48\u7d49\u7d4a\u7d4b\u7d4c\u7d4d\u7d4e\u7d4f\u7d50\u7d51\u7d52\u7d53\u7d54\u7d55\u7d56\u7d57\u7d58\u7d59\u7d5a\u7d5b\u7d5c\u7d5d\u7d5e\u7d5f\u7d60\u7d61\u7d62\u7d63\u7d64\u7d65\u7d66\u7d67\u7d68\u7d69\u7d6a\u7d6b\u7d6c\u7d6d\u7d6e\u7d6f\u7d70\u7d71\u7d72\u7d73\u7d74\u7d75\u7d76\u7d77\u7d78\u7d79\u7d7a\u7d7b\u7d7c\u7d7d\u7d7e\u7d7f\u7d80\u7d81\u7d82\u7d83\u7d84\u7d85\u7d86\u7d87\u7d88\u7d89\u7d8a\u7d8b\u7d8c\u7d8d\u7d8e\u7d8f\u7d90\u7d91\u7d92\u7d93\u7d94\u7d95\u7d96\u7d97\u7d98\u7d99\u7d9a\u7d9b\u7d9c\u7d9d\u7d9e\u7d9f\u7da0\u7da1\u7da2\u7da3\u7da4\u7da5\u7da6\u7da7\u7da8\u7da9\u7daa\u7dab\u7dac\u7dad\u7dae\u7daf\u7db0\u7db1\u7db2\u7db3\u7db4\u7db5\u7db6\u7db7\u7db8\u7db9\u7dba\u7dbb\u7dbc\u7dbd\u7dbe\u7dbf\u7dc0\u7dc1\u7dc2\u7dc3\u7dc4\u7dc5\u7dc6\u7dc7\u7dc8\u7dc9\u7dca\u7dcb\u7dcc\u7dcd\u7dce\u7dcf\u7dd0\u7dd1\u7dd2\u7dd3\u7dd4\u7dd5\u7dd6\u7dd7\u7dd8\u7dd9\u7dda\u7ddb\u7ddc\u7ddd\u7dde\u7ddf\u7de0\u7de1\u7de2\u7de3\u7de4\u7de5\u7de6\u7de7\u7de8\u7de9\u7dea\u7deb\u7dec\u7ded\u7dee\u7def\u7df0\u7df1\u7df2\u7df3\u7df4\u7df5\u7df6\u7df7\u7df8\u7df9\u7dfa\u7dfb\u7dfc\u7dfd\u7dfe\u7dff\u7e00\u7e01\u7e02\u7e03\u7e04\u7e05\u7e06\u7e07\u7e08\u7e09\u7e0a\u7e0b\u7e0c\u7e0d\u7e0e\u7e0f\u7e10\u7e11\u7e12\u7e13\u7e14\u7e15\u7e16\u7e17\u7e18\u7e19\u7e1a\u7e1b\u7e1c\u7e1d\u7e1e\u7e1f\u7e20\u7e21\u7e22\u7e23\u7e24\u7e25\u7e26\u7e27\u7e28\u7e29\u7e2a\u7e2b\u7e2c\u7e2d\u7e2e\u7e2f\u7e30\u7e31\u7e32\u7e33\u7e34\u7e35\u7e36\u7e37\u7e38\u7e39\u7e3a\u7e3b\u7e3c\u7e3d\u7e3e\u7e3f\u7e40\u7e41\u7e42\u7e43\u7e44\u7e45\u7e46\u7e47\u7e48\u7e49\u7e4a\u7e4b\u7e4c\u7e4d\u7e4e\u7e4f\u7e50\u7e51\u7e52\u7e53\u7e54\u7e55\u7e56\u7e57\u7e58\u7e59\u7e5a\u7e5b\u7e5c\u7e5d\u7e5e\u7e5f\u7e60\u7e61\u7e62\u7e63\u7e64\u7e65\u7e66\u7e67\u7e68\u7e69\u7e6a\u7e6b\u7e6c\u7e6d\u7e6e\u7e6f\u7e70\u7e71\u7e72\u7e73\u7e74\u7e75\u7e76\u7e77\u7e78\u7e79\u7e7a\u7e7b\u7e7c\u7e7d\u7e7e\u7e7f\u7e80\u7e81\u7e82\u7e83\u7e84\u7e85\u7e86\u7e87\u7e88\u7e89\u7e8a\u7e8b\u7e8c\u7e8d\u7e8e\u7e8f\u7e90\u7e91\u7e92\u7e93\u7e94\u7e95\u7e96\u7e97\u7e98\u7e99\u7e9a\u7e9b\u7e9c\u7e9d\u7e9e\u7e9f\u7ea0\u7ea1\u7ea2\u7ea3\u7ea4\u7ea5\u7ea6\u7ea7\u7ea8\u7ea9\u7eaa\u7eab\u7eac\u7ead\u7eae\u7eaf\u7eb0\u7eb1\u7eb2\u7eb3\u7eb4\u7eb5\u7eb6\u7eb7\u7eb8\u7eb9\u7eba\u7ebb\u7ebc\u7ebd\u7ebe\u7ebf\u7ec0\u7ec1\u7ec2\u7ec3\u7ec4\u7ec5\u7ec6\u7ec7\u7ec8\u7ec9\u7eca\u7ecb\u7ecc\u7ecd\u7ece\u7ecf\u7ed0\u7ed1\u7ed2\u7ed3\u7ed4\u7ed5\u7ed6\u7ed7\u7ed8\u7ed9\u7eda\u7edb\u7edc\u7edd\u7ede\u7edf\u7ee0\u7ee1\u7ee2\u7ee3\u7ee4\u7ee5\u7ee6\u7ee7\u7ee8\u7ee9\u7eea\u7eeb\u7eec\u7eed\u7eee\u7eef\u7ef0\u7ef1\u7ef2\u7ef3\u7ef4\u7ef5\u7ef6\u7ef7\u7ef8\u7ef9\u7efa\u7efb\u7efc\u7efd\u7efe\u7eff\u7f00\u7f01\u7f02\u7f03\u7f04\u7f05\u7f06\u7f07\u7f08\u7f09\u7f0a\u7f0b\u7f0c\u7f0d\u7f0e\u7f0f\u7f10\u7f11\u7f12\u7f13\u7f14\u7f15\u7f16\u7f17\u7f18\u7f19\u7f1a\u7f1b\u7f1c\u7f1d\u7f1e\u7f1f\u7f20\u7f21\u7f22\u7f23\u7f24\u7f25\u7f26\u7f27\u7f28\u7f29\u7f2a\u7f2b\u7f2c\u7f2d\u7f2e\u7f2f\u7f30\u7f31\u7f32\u7f33\u7f34\u7f35\u7f36\u7f37\u7f38\u7f39\u7f3a\u7f3b\u7f3c\u7f3d\u7f3e\u7f3f\u7f40\u7f41\u7f42\u7f43\u7f44\u7f45\u7f46\u7f47\u7f48\u7f49\u7f4a\u7f4b\u7f4c\u7f4d\u7f4e\u7f4f\u7f50\u7f51\u7f52\u7f53\u7f54\u7f55\u7f56\u7f57\u7f58\u7f59\u7f5a\u7f5b\u7f5c\u7f5d\u7f5e\u7f5f\u7f60\u7f61\u7f62\u7f63\u7f64\u7f65\u7f66\u7f67\u7f68\u7f69\u7f6a\u7f6b\u7f6c\u7f6d\u7f6e\u7f6f\u7f70\u7f71\u7f72\u7f73\u7f74\u7f75\u7f76\u7f77\u7f78\u7f79\u7f7a\u7f7b\u7f7c\u7f7d\u7f7e\u7f7f\u7f80\u7f81\u7f82\u7f83\u7f84\u7f85\u7f86\u7f87\u7f88\u7f89\u7f8a\u7f8b\u7f8c\u7f8d\u7f8e\u7f8f\u7f90\u7f91\u7f92\u7f93\u7f94\u7f95\u7f96\u7f97\u7f98\u7f99\u7f9a\u7f9b\u7f9c\u7f9d\u7f9e\u7f9f\u7fa0\u7fa1\u7fa2\u7fa3\u7fa4\u7fa5\u7fa6\u7fa7\u7fa8\u7fa9\u7faa\u7fab\u7fac\u7fad\u7fae\u7faf\u7fb0\u7fb1\u7fb2\u7fb3\u7fb4\u7fb5\u7fb6\u7fb7\u7fb8\u7fb9\u7fba\u7fbb\u7fbc\u7fbd\u7fbe\u7fbf\u7fc0\u7fc1\u7fc2\u7fc3\u7fc4\u7fc5\u7fc6\u7fc7\u7fc8\u7fc9\u7fca\u7fcb\u7fcc\u7fcd\u7fce\u7fcf\u7fd0\u7fd1\u7fd2\u7fd3\u7fd4\u7fd5\u7fd6\u7fd7\u7fd8\u7fd9\u7fda\u7fdb\u7fdc\u7fdd\u7fde\u7fdf\u7fe0\u7fe1\u7fe2\u7fe3\u7fe4\u7fe5\u7fe6\u7fe7\u7fe8\u7fe9\u7fea\u7feb\u7fec\u7fed\u7fee\u7fef\u7ff0\u7ff1\u7ff2\u7ff3\u7ff4\u7ff5\u7ff6\u7ff7\u7ff8\u7ff9\u7ffa\u7ffb\u7ffc\u7ffd\u7ffe\u7fff\u8000\u8001\u8002\u8003\u8004\u8005\u8006\u8007\u8008\u8009\u800a\u800b\u800c\u800d\u800e\u800f\u8010\u8011\u8012\u8013\u8014\u8015\u8016\u8017\u8018\u8019\u801a\u801b\u801c\u801d\u801e\u801f\u8020\u8021\u8022\u8023\u8024\u8025\u8026\u8027\u8028\u8029\u802a\u802b\u802c\u802d\u802e\u802f\u8030\u8031\u8032\u8033\u8034\u8035\u8036\u8037\u8038\u8039\u803a\u803b\u803c\u803d\u803e\u803f\u8040\u8041\u8042\u8043\u8044\u8045\u8046\u8047\u8048\u8049\u804a\u804b\u804c\u804d\u804e\u804f\u8050\u8051\u8052\u8053\u8054\u8055\u8056\u8057\u8058\u8059\u805a\u805b\u805c\u805d\u805e\u805f\u8060\u8061\u8062\u8063\u8064\u8065\u8066\u8067\u8068\u8069\u806a\u806b\u806c\u806d\u806e\u806f\u8070\u8071\u8072\u8073\u8074\u8075\u8076\u8077\u8078\u8079\u807a\u807b\u807c\u807d\u807e\u807f\u8080\u8081\u8082\u8083\u8084\u8085\u8086\u8087\u8088\u8089\u808a\u808b\u808c\u808d\u808e\u808f\u8090\u8091\u8092\u8093\u8094\u8095\u8096\u8097\u8098\u8099\u809a\u809b\u809c\u809d\u809e\u809f\u80a0\u80a1\u80a2\u80a3\u80a4\u80a5\u80a6\u80a7\u80a8\u80a9\u80aa\u80ab\u80ac\u80ad\u80ae\u80af\u80b0\u80b1\u80b2\u80b3\u80b4\u80b5\u80b6\u80b7\u80b8\u80b9\u80ba\u80bb\u80bc\u80bd\u80be\u80bf\u80c0\u80c1\u80c2\u80c3\u80c4\u80c5\u80c6\u80c7\u80c8\u80c9\u80ca\u80cb\u80cc\u80cd\u80ce\u80cf\u80d0\u80d1\u80d2\u80d3\u80d4\u80d5\u80d6\u80d7\u80d8\u80d9\u80da\u80db\u80dc\u80dd\u80de\u80df\u80e0\u80e1\u80e2\u80e3\u80e4\u80e5\u80e6\u80e7\u80e8\u80e9\u80ea\u80eb\u80ec\u80ed\u80ee\u80ef\u80f0\u80f1\u80f2\u80f3\u80f4\u80f5\u80f6\u80f7\u80f8\u80f9\u80fa\u80fb\u80fc\u80fd\u80fe\u80ff\u8100\u8101\u8102\u8103\u8104\u8105\u8106\u8107\u8108\u8109\u810a\u810b\u810c\u810d\u810e\u810f\u8110\u8111\u8112\u8113\u8114\u8115\u8116\u8117\u8118\u8119\u811a\u811b\u811c\u811d\u811e\u811f\u8120\u8121\u8122\u8123\u8124\u8125\u8126\u8127\u8128\u8129\u812a\u812b\u812c\u812d\u812e\u812f\u8130\u8131\u8132\u8133\u8134\u8135\u8136\u8137\u8138\u8139\u813a\u813b\u813c\u813d\u813e\u813f\u8140\u8141\u8142\u8143\u8144\u8145\u8146\u8147\u8148\u8149\u814a\u814b\u814c\u814d\u814e\u814f\u8150\u8151\u8152\u8153\u8154\u8155\u8156\u8157\u8158\u8159\u815a\u815b\u815c\u815d\u815e\u815f\u8160\u8161\u8162\u8163\u8164\u8165\u8166\u8167\u8168\u8169\u816a\u816b\u816c\u816d\u816e\u816f\u8170\u8171\u8172\u8173\u8174\u8175\u8176\u8177\u8178\u8179\u817a\u817b\u817c\u817d\u817e\u817f\u8180\u8181\u8182\u8183\u8184\u8185\u8186\u8187\u8188\u8189\u818a\u818b\u818c\u818d\u818e\u818f\u8190\u8191\u8192\u8193\u8194\u8195\u8196\u8197\u8198\u8199\u819a\u819b\u819c\u819d\u819e\u819f\u81a0\u81a1\u81a2\u81a3\u81a4\u81a5\u81a6\u81a7\u81a8\u81a9\u81aa\u81ab\u81ac\u81ad\u81ae\u81af\u81b0\u81b1\u81b2\u81b3\u81b4\u81b5\u81b6\u81b7\u81b8\u81b9\u81ba\u81bb\u81bc\u81bd\u81be\u81bf\u81c0\u81c1\u81c2\u81c3\u81c4\u81c5\u81c6\u81c7\u81c8\u81c9\u81ca\u81cb\u81cc\u81cd\u81ce\u81cf\u81d0\u81d1\u81d2\u81d3\u81d4\u81d5\u81d6\u81d7\u81d8\u81d9\u81da\u81db\u81dc\u81dd\u81de\u81df\u81e0\u81e1\u81e2\u81e3\u81e4\u81e5\u81e6\u81e7\u81e8\u81e9\u81ea\u81eb\u81ec\u81ed\u81ee\u81ef\u81f0\u81f1\u81f2\u81f3\u81f4\u81f5\u81f6\u81f7\u81f8\u81f9\u81fa\u81fb\u81fc\u81fd\u81fe\u81ff\u8200\u8201\u8202\u8203\u8204\u8205\u8206\u8207\u8208\u8209\u820a\u820b\u820c\u820d\u820e\u820f\u8210\u8211\u8212\u8213\u8214\u8215\u8216\u8217\u8218\u8219\u821a\u821b\u821c\u821d\u821e\u821f\u8220\u8221\u8222\u8223\u8224\u8225\u8226\u8227\u8228\u8229\u822a\u822b\u822c\u822d\u822e\u822f\u8230\u8231\u8232\u8233\u8234\u8235\u8236\u8237\u8238\u8239\u823a\u823b\u823c\u823d\u823e\u823f\u8240\u8241\u8242\u8243\u8244\u8245\u8246\u8247\u8248\u8249\u824a\u824b\u824c\u824d\u824e\u824f\u8250\u8251\u8252\u8253\u8254\u8255\u8256\u8257\u8258\u8259\u825a\u825b\u825c\u825d\u825e\u825f\u8260\u8261\u8262\u8263\u8264\u8265\u8266\u8267\u8268\u8269\u826a\u826b\u826c\u826d\u826e\u826f\u8270\u8271\u8272\u8273\u8274\u8275\u8276\u8277\u8278\u8279\u827a\u827b\u827c\u827d\u827e\u827f\u8280\u8281\u8282\u8283\u8284\u8285\u8286\u8287\u8288\u8289\u828a\u828b\u828c\u828d\u828e\u828f\u8290\u8291\u8292\u8293\u8294\u8295\u8296\u8297\u8298\u8299\u829a\u829b\u829c\u829d\u829e\u829f\u82a0\u82a1\u82a2\u82a3\u82a4\u82a5\u82a6\u82a7\u82a8\u82a9\u82aa\u82ab\u82ac\u82ad\u82ae\u82af\u82b0\u82b1\u82b2\u82b3\u82b4\u82b5\u82b6\u82b7\u82b8\u82b9\u82ba\u82bb\u82bc\u82bd\u82be\u82bf\u82c0\u82c1\u82c2\u82c3\u82c4\u82c5\u82c6\u82c7\u82c8\u82c9\u82ca\u82cb\u82cc\u82cd\u82ce\u82cf\u82d0\u82d1\u82d2\u82d3\u82d4\u82d5\u82d6\u82d7\u82d8\u82d9\u82da\u82db\u82dc\u82dd\u82de\u82df\u82e0\u82e1\u82e2\u82e3\u82e4\u82e5\u82e6\u82e7\u82e8\u82e9\u82ea\u82eb\u82ec\u82ed\u82ee\u82ef\u82f0\u82f1\u82f2\u82f3\u82f4\u82f5\u82f6\u82f7\u82f8\u82f9\u82fa\u82fb\u82fc\u82fd\u82fe\u82ff\u8300\u8301\u8302\u8303\u8304\u8305\u8306\u8307\u8308\u8309\u830a\u830b\u830c\u830d\u830e\u830f\u8310\u8311\u8312\u8313\u8314\u8315\u8316\u8317\u8318\u8319\u831a\u831b\u831c\u831d\u831e\u831f\u8320\u8321\u8322\u8323\u8324\u8325\u8326\u8327\u8328\u8329\u832a\u832b\u832c\u832d\u832e\u832f\u8330\u8331\u8332\u8333\u8334\u8335\u8336\u8337\u8338\u8339\u833a\u833b\u833c\u833d\u833e\u833f\u8340\u8341\u8342\u8343\u8344\u8345\u8346\u8347\u8348\u8349\u834a\u834b\u834c\u834d\u834e\u834f\u8350\u8351\u8352\u8353\u8354\u8355\u8356\u8357\u8358\u8359\u835a\u835b\u835c\u835d\u835e\u835f\u8360\u8361\u8362\u8363\u8364\u8365\u8366\u8367\u8368\u8369\u836a\u836b\u836c\u836d\u836e\u836f\u8370\u8371\u8372\u8373\u8374\u8375\u8376\u8377\u8378\u8379\u837a\u837b\u837c\u837d\u837e\u837f\u8380\u8381\u8382\u8383\u8384\u8385\u8386\u8387\u8388\u8389\u838a\u838b\u838c\u838d\u838e\u838f\u8390\u8391\u8392\u8393\u8394\u8395\u8396\u8397\u8398\u8399\u839a\u839b\u839c\u839d\u839e\u839f\u83a0\u83a1\u83a2\u83a3\u83a4\u83a5\u83a6\u83a7\u83a8\u83a9\u83aa\u83ab\u83ac\u83ad\u83ae\u83af\u83b0\u83b1\u83b2\u83b3\u83b4\u83b5\u83b6\u83b7\u83b8\u83b9\u83ba\u83bb\u83bc\u83bd\u83be\u83bf\u83c0\u83c1\u83c2\u83c3\u83c4\u83c5\u83c6\u83c7\u83c8\u83c9\u83ca\u83cb\u83cc\u83cd\u83ce\u83cf\u83d0\u83d1\u83d2\u83d3\u83d4\u83d5\u83d6\u83d7\u83d8\u83d9\u83da\u83db\u83dc\u83dd\u83de\u83df\u83e0\u83e1\u83e2\u83e3\u83e4\u83e5\u83e6\u83e7\u83e8\u83e9\u83ea\u83eb\u83ec\u83ed\u83ee\u83ef\u83f0\u83f1\u83f2\u83f3\u83f4\u83f5\u83f6\u83f7\u83f8\u83f9\u83fa\u83fb\u83fc\u83fd\u83fe\u83ff\u8400\u8401\u8402\u8403\u8404\u8405\u8406\u8407\u8408\u8409\u840a\u840b\u840c\u840d\u840e\u840f\u8410\u8411\u8412\u8413\u8414\u8415\u8416\u8417\u8418\u8419\u841a\u841b\u841c\u841d\u841e\u841f\u8420\u8421\u8422\u8423\u8424\u8425\u8426\u8427\u8428\u8429\u842a\u842b\u842c\u842d\u842e\u842f\u8430\u8431\u8432\u8433\u8434\u8435\u8436\u8437\u8438\u8439\u843a\u843b\u843c\u843d\u843e\u843f\u8440\u8441\u8442\u8443\u8444\u8445\u8446\u8447\u8448\u8449\u844a\u844b\u844c\u844d\u844e\u844f\u8450\u8451\u8452\u8453\u8454\u8455\u8456\u8457\u8458\u8459\u845a\u845b\u845c\u845d\u845e\u845f\u8460\u8461\u8462\u8463\u8464\u8465\u8466\u8467\u8468\u8469\u846a\u846b\u846c\u846d\u846e\u846f\u8470\u8471\u8472\u8473\u8474\u8475\u8476\u8477\u8478\u8479\u847a\u847b\u847c\u847d\u847e\u847f\u8480\u8481\u8482\u8483\u8484\u8485\u8486\u8487\u8488\u8489\u848a\u848b\u848c\u848d\u848e\u848f\u8490\u8491\u8492\u8493\u8494\u8495\u8496\u8497\u8498\u8499\u849a\u849b\u849c\u849d\u849e\u849f\u84a0\u84a1\u84a2\u84a3\u84a4\u84a5\u84a6\u84a7\u84a8\u84a9\u84aa\u84ab\u84ac\u84ad\u84ae\u84af\u84b0\u84b1\u84b2\u84b3\u84b4\u84b5\u84b6\u84b7\u84b8\u84b9\u84ba\u84bb\u84bc\u84bd\u84be\u84bf\u84c0\u84c1\u84c2\u84c3\u84c4\u84c5\u84c6\u84c7\u84c8\u84c9\u84ca\u84cb\u84cc\u84cd\u84ce\u84cf\u84d0\u84d1\u84d2\u84d3\u84d4\u84d5\u84d6\u84d7\u84d8\u84d9\u84da\u84db\u84dc\u84dd\u84de\u84df\u84e0\u84e1\u84e2\u84e3\u84e4\u84e5\u84e6\u84e7\u84e8\u84e9\u84ea\u84eb\u84ec\u84ed\u84ee\u84ef\u84f0\u84f1\u84f2\u84f3\u84f4\u84f5\u84f6\u84f7\u84f8\u84f9\u84fa\u84fb\u84fc\u84fd\u84fe\u84ff\u8500\u8501\u8502\u8503\u8504\u8505\u8506\u8507\u8508\u8509\u850a\u850b\u850c\u850d\u850e\u850f\u8510\u8511\u8512\u8513\u8514\u8515\u8516\u8517\u8518\u8519\u851a\u851b\u851c\u851d\u851e\u851f\u8520\u8521\u8522\u8523\u8524\u8525\u8526\u8527\u8528\u8529\u852a\u852b\u852c\u852d\u852e\u852f\u8530\u8531\u8532\u8533\u8534\u8535\u8536\u8537\u8538\u8539\u853a\u853b\u853c\u853d\u853e\u853f\u8540\u8541\u8542\u8543\u8544\u8545\u8546\u8547\u8548\u8549\u854a\u854b\u854c\u854d\u854e\u854f\u8550\u8551\u8552\u8553\u8554\u8555\u8556\u8557\u8558\u8559\u855a\u855b\u855c\u855d\u855e\u855f\u8560\u8561\u8562\u8563\u8564\u8565\u8566\u8567\u8568\u8569\u856a\u856b\u856c\u856d\u856e\u856f\u8570\u8571\u8572\u8573\u8574\u8575\u8576\u8577\u8578\u8579\u857a\u857b\u857c\u857d\u857e\u857f\u8580\u8581\u8582\u8583\u8584\u8585\u8586\u8587\u8588\u8589\u858a\u858b\u858c\u858d\u858e\u858f\u8590\u8591\u8592\u8593\u8594\u8595\u8596\u8597\u8598\u8599\u859a\u859b\u859c\u859d\u859e\u859f\u85a0\u85a1\u85a2\u85a3\u85a4\u85a5\u85a6\u85a7\u85a8\u85a9\u85aa\u85ab\u85ac\u85ad\u85ae\u85af\u85b0\u85b1\u85b2\u85b3\u85b4\u85b5\u85b6\u85b7\u85b8\u85b9\u85ba\u85bb\u85bc\u85bd\u85be\u85bf\u85c0\u85c1\u85c2\u85c3\u85c4\u85c5\u85c6\u85c7\u85c8\u85c9\u85ca\u85cb\u85cc\u85cd\u85ce\u85cf\u85d0\u85d1\u85d2\u85d3\u85d4\u85d5\u85d6\u85d7\u85d8\u85d9\u85da\u85db\u85dc\u85dd\u85de\u85df\u85e0\u85e1\u85e2\u85e3\u85e4\u85e5\u85e6\u85e7\u85e8\u85e9\u85ea\u85eb\u85ec\u85ed\u85ee\u85ef\u85f0\u85f1\u85f2\u85f3\u85f4\u85f5\u85f6\u85f7\u85f8\u85f9\u85fa\u85fb\u85fc\u85fd\u85fe\u85ff\u8600\u8601\u8602\u8603\u8604\u8605\u8606\u8607\u8608\u8609\u860a\u860b\u860c\u860d\u860e\u860f\u8610\u8611\u8612\u8613\u8614\u8615\u8616\u8617\u8618\u8619\u861a\u861b\u861c\u861d\u861e\u861f\u8620\u8621\u8622\u8623\u8624\u8625\u8626\u8627\u8628\u8629\u862a\u862b\u862c\u862d\u862e\u862f\u8630\u8631\u8632\u8633\u8634\u8635\u8636\u8637\u8638\u8639\u863a\u863b\u863c\u863d\u863e\u863f\u8640\u8641\u8642\u8643\u8644\u8645\u8646\u8647\u8648\u8649\u864a\u864b\u864c\u864d\u864e\u864f\u8650\u8651\u8652\u8653\u8654\u8655\u8656\u8657\u8658\u8659\u865a\u865b\u865c\u865d\u865e\u865f\u8660\u8661\u8662\u8663\u8664\u8665\u8666\u8667\u8668\u8669\u866a\u866b\u866c\u866d\u866e\u866f\u8670\u8671\u8672\u8673\u8674\u8675\u8676\u8677\u8678\u8679\u867a\u867b\u867c\u867d\u867e\u867f\u8680\u8681\u8682\u8683\u8684\u8685\u8686\u8687\u8688\u8689\u868a\u868b\u868c\u868d\u868e\u868f\u8690\u8691\u8692\u8693\u8694\u8695\u8696\u8697\u8698\u8699\u869a\u869b\u869c\u869d\u869e\u869f\u86a0\u86a1\u86a2\u86a3\u86a4\u86a5\u86a6\u86a7\u86a8\u86a9\u86aa\u86ab\u86ac\u86ad\u86ae\u86af\u86b0\u86b1\u86b2\u86b3\u86b4\u86b5\u86b6\u86b7\u86b8\u86b9\u86ba\u86bb\u86bc\u86bd\u86be\u86bf\u86c0\u86c1\u86c2\u86c3\u86c4\u86c5\u86c6\u86c7\u86c8\u86c9\u86ca\u86cb\u86cc\u86cd\u86ce\u86cf\u86d0\u86d1\u86d2\u86d3\u86d4\u86d5\u86d6\u86d7\u86d8\u86d9\u86da\u86db\u86dc\u86dd\u86de\u86df\u86e0\u86e1\u86e2\u86e3\u86e4\u86e5\u86e6\u86e7\u86e8\u86e9\u86ea\u86eb\u86ec\u86ed\u86ee\u86ef\u86f0\u86f1\u86f2\u86f3\u86f4\u86f5\u86f6\u86f7\u86f8\u86f9\u86fa\u86fb\u86fc\u86fd\u86fe\u86ff\u8700\u8701\u8702\u8703\u8704\u8705\u8706\u8707\u8708\u8709\u870a\u870b\u870c\u870d\u870e\u870f\u8710\u8711\u8712\u8713\u8714\u8715\u8716\u8717\u8718\u8719\u871a\u871b\u871c\u871d\u871e\u871f\u8720\u8721\u8722\u8723\u8724\u8725\u8726\u8727\u8728\u8729\u872a\u872b\u872c\u872d\u872e\u872f\u8730\u8731\u8732\u8733\u8734\u8735\u8736\u8737\u8738\u8739\u873a\u873b\u873c\u873d\u873e\u873f\u8740\u8741\u8742\u8743\u8744\u8745\u8746\u8747\u8748\u8749\u874a\u874b\u874c\u874d\u874e\u874f\u8750\u8751\u8752\u8753\u8754\u8755\u8756\u8757\u8758\u8759\u875a\u875b\u875c\u875d\u875e\u875f\u8760\u8761\u8762\u8763\u8764\u8765\u8766\u8767\u8768\u8769\u876a\u876b\u876c\u876d\u876e\u876f\u8770\u8771\u8772\u8773\u8774\u8775\u8776\u8777\u8778\u8779\u877a\u877b\u877c\u877d\u877e\u877f\u8780\u8781\u8782\u8783\u8784\u8785\u8786\u8787\u8788\u8789\u878a\u878b\u878c\u878d\u878e\u878f\u8790\u8791\u8792\u8793\u8794\u8795\u8796\u8797\u8798\u8799\u879a\u879b\u879c\u879d\u879e\u879f\u87a0\u87a1\u87a2\u87a3\u87a4\u87a5\u87a6\u87a7\u87a8\u87a9\u87aa\u87ab\u87ac\u87ad\u87ae\u87af\u87b0\u87b1\u87b2\u87b3\u87b4\u87b5\u87b6\u87b7\u87b8\u87b9\u87ba\u87bb\u87bc\u87bd\u87be\u87bf\u87c0\u87c1\u87c2\u87c3\u87c4\u87c5\u87c6\u87c7\u87c8\u87c9\u87ca\u87cb\u87cc\u87cd\u87ce\u87cf\u87d0\u87d1\u87d2\u87d3\u87d4\u87d5\u87d6\u87d7\u87d8\u87d9\u87da\u87db\u87dc\u87dd\u87de\u87df\u87e0\u87e1\u87e2\u87e3\u87e4\u87e5\u87e6\u87e7\u87e8\u87e9\u87ea\u87eb\u87ec\u87ed\u87ee\u87ef\u87f0\u87f1\u87f2\u87f3\u87f4\u87f5\u87f6\u87f7\u87f8\u87f9\u87fa\u87fb\u87fc\u87fd\u87fe\u87ff\u8800\u8801\u8802\u8803\u8804\u8805\u8806\u8807\u8808\u8809\u880a\u880b\u880c\u880d\u880e\u880f\u8810\u8811\u8812\u8813\u8814\u8815\u8816\u8817\u8818\u8819\u881a\u881b\u881c\u881d\u881e\u881f\u8820\u8821\u8822\u8823\u8824\u8825\u8826\u8827\u8828\u8829\u882a\u882b\u882c\u882d\u882e\u882f\u8830\u8831\u8832\u8833\u8834\u8835\u8836\u8837\u8838\u8839\u883a\u883b\u883c\u883d\u883e\u883f\u8840\u8841\u8842\u8843\u8844\u8845\u8846\u8847\u8848\u8849\u884a\u884b\u884c\u884d\u884e\u884f\u8850\u8851\u8852\u8853\u8854\u8855\u8856\u8857\u8858\u8859\u885a\u885b\u885c\u885d\u885e\u885f\u8860\u8861\u8862\u8863\u8864\u8865\u8866\u8867\u8868\u8869\u886a\u886b\u886c\u886d\u886e\u886f\u8870\u8871\u8872\u8873\u8874\u8875\u8876\u8877\u8878\u8879\u887a\u887b\u887c\u887d\u887e\u887f\u8880\u8881\u8882\u8883\u8884\u8885\u8886\u8887\u8888\u8889\u888a\u888b\u888c\u888d\u888e\u888f\u8890\u8891\u8892\u8893\u8894\u8895\u8896\u8897\u8898\u8899\u889a\u889b\u889c\u889d\u889e\u889f\u88a0\u88a1\u88a2\u88a3\u88a4\u88a5\u88a6\u88a7\u88a8\u88a9\u88aa\u88ab\u88ac\u88ad\u88ae\u88af\u88b0\u88b1\u88b2\u88b3\u88b4\u88b5\u88b6\u88b7\u88b8\u88b9\u88ba\u88bb\u88bc\u88bd\u88be\u88bf\u88c0\u88c1\u88c2\u88c3\u88c4\u88c5\u88c6\u88c7\u88c8\u88c9\u88ca\u88cb\u88cc\u88cd\u88ce\u88cf\u88d0\u88d1\u88d2\u88d3\u88d4\u88d5\u88d6\u88d7\u88d8\u88d9\u88da\u88db\u88dc\u88dd\u88de\u88df\u88e0\u88e1\u88e2\u88e3\u88e4\u88e5\u88e6\u88e7\u88e8\u88e9\u88ea\u88eb\u88ec\u88ed\u88ee\u88ef\u88f0\u88f1\u88f2\u88f3\u88f4\u88f5\u88f6\u88f7\u88f8\u88f9\u88fa\u88fb\u88fc\u88fd\u88fe\u88ff\u8900\u8901\u8902\u8903\u8904\u8905\u8906\u8907\u8908\u8909\u890a\u890b\u890c\u890d\u890e\u890f\u8910\u8911\u8912\u8913\u8914\u8915\u8916\u8917\u8918\u8919\u891a\u891b\u891c\u891d\u891e\u891f\u8920\u8921\u8922\u8923\u8924\u8925\u8926\u8927\u8928\u8929\u892a\u892b\u892c\u892d\u892e\u892f\u8930\u8931\u8932\u8933\u8934\u8935\u8936\u8937\u8938\u8939\u893a\u893b\u893c\u893d\u893e\u893f\u8940\u8941\u8942\u8943\u8944\u8945\u8946\u8947\u8948\u8949\u894a\u894b\u894c\u894d\u894e\u894f\u8950\u8951\u8952\u8953\u8954\u8955\u8956\u8957\u8958\u8959\u895a\u895b\u895c\u895d\u895e\u895f\u8960\u8961\u8962\u8963\u8964\u8965\u8966\u8967\u8968\u8969\u896a\u896b\u896c\u896d\u896e\u896f\u8970\u8971\u8972\u8973\u8974\u8975\u8976\u8977\u8978\u8979\u897a\u897b\u897c\u897d\u897e\u897f\u8980\u8981\u8982\u8983\u8984\u8985\u8986\u8987\u8988\u8989\u898a\u898b\u898c\u898d\u898e\u898f\u8990\u8991\u8992\u8993\u8994\u8995\u8996\u8997\u8998\u8999\u899a\u899b\u899c\u899d\u899e\u899f\u89a0\u89a1\u89a2\u89a3\u89a4\u89a5\u89a6\u89a7\u89a8\u89a9\u89aa\u89ab\u89ac\u89ad\u89ae\u89af\u89b0\u89b1\u89b2\u89b3\u89b4\u89b5\u89b6\u89b7\u89b8\u89b9\u89ba\u89bb\u89bc\u89bd\u89be\u89bf\u89c0\u89c1\u89c2\u89c3\u89c4\u89c5\u89c6\u89c7\u89c8\u89c9\u89ca\u89cb\u89cc\u89cd\u89ce\u89cf\u89d0\u89d1\u89d2\u89d3\u89d4\u89d5\u89d6\u89d7\u89d8\u89d9\u89da\u89db\u89dc\u89dd\u89de\u89df\u89e0\u89e1\u89e2\u89e3\u89e4\u89e5\u89e6\u89e7\u89e8\u89e9\u89ea\u89eb\u89ec\u89ed\u89ee\u89ef\u89f0\u89f1\u89f2\u89f3\u89f4\u89f5\u89f6\u89f7\u89f8\u89f9\u89fa\u89fb\u89fc\u89fd\u89fe\u89ff\u8a00\u8a01\u8a02\u8a03\u8a04\u8a05\u8a06\u8a07\u8a08\u8a09\u8a0a\u8a0b\u8a0c\u8a0d\u8a0e\u8a0f\u8a10\u8a11\u8a12\u8a13\u8a14\u8a15\u8a16\u8a17\u8a18\u8a19\u8a1a\u8a1b\u8a1c\u8a1d\u8a1e\u8a1f\u8a20\u8a21\u8a22\u8a23\u8a24\u8a25\u8a26\u8a27\u8a28\u8a29\u8a2a\u8a2b\u8a2c\u8a2d\u8a2e\u8a2f\u8a30\u8a31\u8a32\u8a33\u8a34\u8a35\u8a36\u8a37\u8a38\u8a39\u8a3a\u8a3b\u8a3c\u8a3d\u8a3e\u8a3f\u8a40\u8a41\u8a42\u8a43\u8a44\u8a45\u8a46\u8a47\u8a48\u8a49\u8a4a\u8a4b\u8a4c\u8a4d\u8a4e\u8a4f\u8a50\u8a51\u8a52\u8a53\u8a54\u8a55\u8a56\u8a57\u8a58\u8a59\u8a5a\u8a5b\u8a5c\u8a5d\u8a5e\u8a5f\u8a60\u8a61\u8a62\u8a63\u8a64\u8a65\u8a66\u8a67\u8a68\u8a69\u8a6a\u8a6b\u8a6c\u8a6d\u8a6e\u8a6f\u8a70\u8a71\u8a72\u8a73\u8a74\u8a75\u8a76\u8a77\u8a78\u8a79\u8a7a\u8a7b\u8a7c\u8a7d\u8a7e\u8a7f\u8a80\u8a81\u8a82\u8a83\u8a84\u8a85\u8a86\u8a87\u8a88\u8a89\u8a8a\u8a8b\u8a8c\u8a8d\u8a8e\u8a8f\u8a90\u8a91\u8a92\u8a93\u8a94\u8a95\u8a96\u8a97\u8a98\u8a99\u8a9a\u8a9b\u8a9c\u8a9d\u8a9e\u8a9f\u8aa0\u8aa1\u8aa2\u8aa3\u8aa4\u8aa5\u8aa6\u8aa7\u8aa8\u8aa9\u8aaa\u8aab\u8aac\u8aad\u8aae\u8aaf\u8ab0\u8ab1\u8ab2\u8ab3\u8ab4\u8ab5\u8ab6\u8ab7\u8ab8\u8ab9\u8aba\u8abb\u8abc\u8abd\u8abe\u8abf\u8ac0\u8ac1\u8ac2\u8ac3\u8ac4\u8ac5\u8ac6\u8ac7\u8ac8\u8ac9\u8aca\u8acb\u8acc\u8acd\u8ace\u8acf\u8ad0\u8ad1\u8ad2\u8ad3\u8ad4\u8ad5\u8ad6\u8ad7\u8ad8\u8ad9\u8ada\u8adb\u8adc\u8add\u8ade\u8adf\u8ae0\u8ae1\u8ae2\u8ae3\u8ae4\u8ae5\u8ae6\u8ae7\u8ae8\u8ae9\u8aea\u8aeb\u8aec\u8aed\u8aee\u8aef\u8af0\u8af1\u8af2\u8af3\u8af4\u8af5\u8af6\u8af7\u8af8\u8af9\u8afa\u8afb\u8afc\u8afd\u8afe\u8aff\u8b00\u8b01\u8b02\u8b03\u8b04\u8b05\u8b06\u8b07\u8b08\u8b09\u8b0a\u8b0b\u8b0c\u8b0d\u8b0e\u8b0f\u8b10\u8b11\u8b12\u8b13\u8b14\u8b15\u8b16\u8b17\u8b18\u8b19\u8b1a\u8b1b\u8b1c\u8b1d\u8b1e\u8b1f\u8b20\u8b21\u8b22\u8b23\u8b24\u8b25\u8b26\u8b27\u8b28\u8b29\u8b2a\u8b2b\u8b2c\u8b2d\u8b2e\u8b2f\u8b30\u8b31\u8b32\u8b33\u8b34\u8b35\u8b36\u8b37\u8b38\u8b39\u8b3a\u8b3b\u8b3c\u8b3d\u8b3e\u8b3f\u8b40\u8b41\u8b42\u8b43\u8b44\u8b45\u8b46\u8b47\u8b48\u8b49\u8b4a\u8b4b\u8b4c\u8b4d\u8b4e\u8b4f\u8b50\u8b51\u8b52\u8b53\u8b54\u8b55\u8b56\u8b57\u8b58\u8b59\u8b5a\u8b5b\u8b5c\u8b5d\u8b5e\u8b5f\u8b60\u8b61\u8b62\u8b63\u8b64\u8b65\u8b66\u8b67\u8b68\u8b69\u8b6a\u8b6b\u8b6c\u8b6d\u8b6e\u8b6f\u8b70\u8b71\u8b72\u8b73\u8b74\u8b75\u8b76\u8b77\u8b78\u8b79\u8b7a\u8b7b\u8b7c\u8b7d\u8b7e\u8b7f\u8b80\u8b81\u8b82\u8b83\u8b84\u8b85\u8b86\u8b87\u8b88\u8b89\u8b8a\u8b8b\u8b8c\u8b8d\u8b8e\u8b8f\u8b90\u8b91\u8b92\u8b93\u8b94\u8b95\u8b96\u8b97\u8b98\u8b99\u8b9a\u8b9b\u8b9c\u8b9d\u8b9e\u8b9f\u8ba0\u8ba1\u8ba2\u8ba3\u8ba4\u8ba5\u8ba6\u8ba7\u8ba8\u8ba9\u8baa\u8bab\u8bac\u8bad\u8bae\u8baf\u8bb0\u8bb1\u8bb2\u8bb3\u8bb4\u8bb5\u8bb6\u8bb7\u8bb8\u8bb9\u8bba\u8bbb\u8bbc\u8bbd\u8bbe\u8bbf\u8bc0\u8bc1\u8bc2\u8bc3\u8bc4\u8bc5\u8bc6\u8bc7\u8bc8\u8bc9\u8bca\u8bcb\u8bcc\u8bcd\u8bce\u8bcf\u8bd0\u8bd1\u8bd2\u8bd3\u8bd4\u8bd5\u8bd6\u8bd7\u8bd8\u8bd9\u8bda\u8bdb\u8bdc\u8bdd\u8bde\u8bdf\u8be0\u8be1\u8be2\u8be3\u8be4\u8be5\u8be6\u8be7\u8be8\u8be9\u8bea\u8beb\u8bec\u8bed\u8bee\u8bef\u8bf0\u8bf1\u8bf2\u8bf3\u8bf4\u8bf5\u8bf6\u8bf7\u8bf8\u8bf9\u8bfa\u8bfb\u8bfc\u8bfd\u8bfe\u8bff\u8c00\u8c01\u8c02\u8c03\u8c04\u8c05\u8c06\u8c07\u8c08\u8c09\u8c0a\u8c0b\u8c0c\u8c0d\u8c0e\u8c0f\u8c10\u8c11\u8c12\u8c13\u8c14\u8c15\u8c16\u8c17\u8c18\u8c19\u8c1a\u8c1b\u8c1c\u8c1d\u8c1e\u8c1f\u8c20\u8c21\u8c22\u8c23\u8c24\u8c25\u8c26\u8c27\u8c28\u8c29\u8c2a\u8c2b\u8c2c\u8c2d\u8c2e\u8c2f\u8c30\u8c31\u8c32\u8c33\u8c34\u8c35\u8c36\u8c37\u8c38\u8c39\u8c3a\u8c3b\u8c3c\u8c3d\u8c3e\u8c3f\u8c40\u8c41\u8c42\u8c43\u8c44\u8c45\u8c46\u8c47\u8c48\u8c49\u8c4a\u8c4b\u8c4c\u8c4d\u8c4e\u8c4f\u8c50\u8c51\u8c52\u8c53\u8c54\u8c55\u8c56\u8c57\u8c58\u8c59\u8c5a\u8c5b\u8c5c\u8c5d\u8c5e\u8c5f\u8c60\u8c61\u8c62\u8c63\u8c64\u8c65\u8c66\u8c67\u8c68\u8c69\u8c6a\u8c6b\u8c6c\u8c6d\u8c6e\u8c6f\u8c70\u8c71\u8c72\u8c73\u8c74\u8c75\u8c76\u8c77\u8c78\u8c79\u8c7a\u8c7b\u8c7c\u8c7d\u8c7e\u8c7f\u8c80\u8c81\u8c82\u8c83\u8c84\u8c85\u8c86\u8c87\u8c88\u8c89\u8c8a\u8c8b\u8c8c\u8c8d\u8c8e\u8c8f\u8c90\u8c91\u8c92\u8c93\u8c94\u8c95\u8c96\u8c97\u8c98\u8c99\u8c9a\u8c9b\u8c9c\u8c9d\u8c9e\u8c9f\u8ca0\u8ca1\u8ca2\u8ca3\u8ca4\u8ca5\u8ca6\u8ca7\u8ca8\u8ca9\u8caa\u8cab\u8cac\u8cad\u8cae\u8caf\u8cb0\u8cb1\u8cb2\u8cb3\u8cb4\u8cb5\u8cb6\u8cb7\u8cb8\u8cb9\u8cba\u8cbb\u8cbc\u8cbd\u8cbe\u8cbf\u8cc0\u8cc1\u8cc2\u8cc3\u8cc4\u8cc5\u8cc6\u8cc7\u8cc8\u8cc9\u8cca\u8ccb\u8ccc\u8ccd\u8cce\u8ccf\u8cd0\u8cd1\u8cd2\u8cd3\u8cd4\u8cd5\u8cd6\u8cd7\u8cd8\u8cd9\u8cda\u8cdb\u8cdc\u8cdd\u8cde\u8cdf\u8ce0\u8ce1\u8ce2\u8ce3\u8ce4\u8ce5\u8ce6\u8ce7\u8ce8\u8ce9\u8cea\u8ceb\u8cec\u8ced\u8cee\u8cef\u8cf0\u8cf1\u8cf2\u8cf3\u8cf4\u8cf5\u8cf6\u8cf7\u8cf8\u8cf9\u8cfa\u8cfb\u8cfc\u8cfd\u8cfe\u8cff\u8d00\u8d01\u8d02\u8d03\u8d04\u8d05\u8d06\u8d07\u8d08\u8d09\u8d0a\u8d0b\u8d0c\u8d0d\u8d0e\u8d0f\u8d10\u8d11\u8d12\u8d13\u8d14\u8d15\u8d16\u8d17\u8d18\u8d19\u8d1a\u8d1b\u8d1c\u8d1d\u8d1e\u8d1f\u8d20\u8d21\u8d22\u8d23\u8d24\u8d25\u8d26\u8d27\u8d28\u8d29\u8d2a\u8d2b\u8d2c\u8d2d\u8d2e\u8d2f\u8d30\u8d31\u8d32\u8d33\u8d34\u8d35\u8d36\u8d37\u8d38\u8d39\u8d3a\u8d3b\u8d3c\u8d3d\u8d3e\u8d3f\u8d40\u8d41\u8d42\u8d43\u8d44\u8d45\u8d46\u8d47\u8d48\u8d49\u8d4a\u8d4b\u8d4c\u8d4d\u8d4e\u8d4f\u8d50\u8d51\u8d52\u8d53\u8d54\u8d55\u8d56\u8d57\u8d58\u8d59\u8d5a\u8d5b\u8d5c\u8d5d\u8d5e\u8d5f\u8d60\u8d61\u8d62\u8d63\u8d64\u8d65\u8d66\u8d67\u8d68\u8d69\u8d6a\u8d6b\u8d6c\u8d6d\u8d6e\u8d6f\u8d70\u8d71\u8d72\u8d73\u8d74\u8d75\u8d76\u8d77\u8d78\u8d79\u8d7a\u8d7b\u8d7c\u8d7d\u8d7e\u8d7f\u8d80\u8d81\u8d82\u8d83\u8d84\u8d85\u8d86\u8d87\u8d88\u8d89\u8d8a\u8d8b\u8d8c\u8d8d\u8d8e\u8d8f\u8d90\u8d91\u8d92\u8d93\u8d94\u8d95\u8d96\u8d97\u8d98\u8d99\u8d9a\u8d9b\u8d9c\u8d9d\u8d9e\u8d9f\u8da0\u8da1\u8da2\u8da3\u8da4\u8da5\u8da6\u8da7\u8da8\u8da9\u8daa\u8dab\u8dac\u8dad\u8dae\u8daf\u8db0\u8db1\u8db2\u8db3\u8db4\u8db5\u8db6\u8db7\u8db8\u8db9\u8dba\u8dbb\u8dbc\u8dbd\u8dbe\u8dbf\u8dc0\u8dc1\u8dc2\u8dc3\u8dc4\u8dc5\u8dc6\u8dc7\u8dc8\u8dc9\u8dca\u8dcb\u8dcc\u8dcd\u8dce\u8dcf\u8dd0\u8dd1\u8dd2\u8dd3\u8dd4\u8dd5\u8dd6\u8dd7\u8dd8\u8dd9\u8dda\u8ddb\u8ddc\u8ddd\u8dde\u8ddf\u8de0\u8de1\u8de2\u8de3\u8de4\u8de5\u8de6\u8de7\u8de8\u8de9\u8dea\u8deb\u8dec\u8ded\u8dee\u8def\u8df0\u8df1\u8df2\u8df3\u8df4\u8df5\u8df6\u8df7\u8df8\u8df9\u8dfa\u8dfb\u8dfc\u8dfd\u8dfe\u8dff\u8e00\u8e01\u8e02\u8e03\u8e04\u8e05\u8e06\u8e07\u8e08\u8e09\u8e0a\u8e0b\u8e0c\u8e0d\u8e0e\u8e0f\u8e10\u8e11\u8e12\u8e13\u8e14\u8e15\u8e16\u8e17\u8e18\u8e19\u8e1a\u8e1b\u8e1c\u8e1d\u8e1e\u8e1f\u8e20\u8e21\u8e22\u8e23\u8e24\u8e25\u8e26\u8e27\u8e28\u8e29\u8e2a\u8e2b\u8e2c\u8e2d\u8e2e\u8e2f\u8e30\u8e31\u8e32\u8e33\u8e34\u8e35\u8e36\u8e37\u8e38\u8e39\u8e3a\u8e3b\u8e3c\u8e3d\u8e3e\u8e3f\u8e40\u8e41\u8e42\u8e43\u8e44\u8e45\u8e46\u8e47\u8e48\u8e49\u8e4a\u8e4b\u8e4c\u8e4d\u8e4e\u8e4f\u8e50\u8e51\u8e52\u8e53\u8e54\u8e55\u8e56\u8e57\u8e58\u8e59\u8e5a\u8e5b\u8e5c\u8e5d\u8e5e\u8e5f\u8e60\u8e61\u8e62\u8e63\u8e64\u8e65\u8e66\u8e67\u8e68\u8e69\u8e6a\u8e6b\u8e6c\u8e6d\u8e6e\u8e6f\u8e70\u8e71\u8e72\u8e73\u8e74\u8e75\u8e76\u8e77\u8e78\u8e79\u8e7a\u8e7b\u8e7c\u8e7d\u8e7e\u8e7f\u8e80\u8e81\u8e82\u8e83\u8e84\u8e85\u8e86\u8e87\u8e88\u8e89\u8e8a\u8e8b\u8e8c\u8e8d\u8e8e\u8e8f\u8e90\u8e91\u8e92\u8e93\u8e94\u8e95\u8e96\u8e97\u8e98\u8e99\u8e9a\u8e9b\u8e9c\u8e9d\u8e9e\u8e9f\u8ea0\u8ea1\u8ea2\u8ea3\u8ea4\u8ea5\u8ea6\u8ea7\u8ea8\u8ea9\u8eaa\u8eab\u8eac\u8ead\u8eae\u8eaf\u8eb0\u8eb1\u8eb2\u8eb3\u8eb4\u8eb5\u8eb6\u8eb7\u8eb8\u8eb9\u8eba\u8ebb\u8ebc\u8ebd\u8ebe\u8ebf\u8ec0\u8ec1\u8ec2\u8ec3\u8ec4\u8ec5\u8ec6\u8ec7\u8ec8\u8ec9\u8eca\u8ecb\u8ecc\u8ecd\u8ece\u8ecf\u8ed0\u8ed1\u8ed2\u8ed3\u8ed4\u8ed5\u8ed6\u8ed7\u8ed8\u8ed9\u8eda\u8edb\u8edc\u8edd\u8ede\u8edf\u8ee0\u8ee1\u8ee2\u8ee3\u8ee4\u8ee5\u8ee6\u8ee7\u8ee8\u8ee9\u8eea\u8eeb\u8eec\u8eed\u8eee\u8eef\u8ef0\u8ef1\u8ef2\u8ef3\u8ef4\u8ef5\u8ef6\u8ef7\u8ef8\u8ef9\u8efa\u8efb\u8efc\u8efd\u8efe\u8eff\u8f00\u8f01\u8f02\u8f03\u8f04\u8f05\u8f06\u8f07\u8f08\u8f09\u8f0a\u8f0b\u8f0c\u8f0d\u8f0e\u8f0f\u8f10\u8f11\u8f12\u8f13\u8f14\u8f15\u8f16\u8f17\u8f18\u8f19\u8f1a\u8f1b\u8f1c\u8f1d\u8f1e\u8f1f\u8f20\u8f21\u8f22\u8f23\u8f24\u8f25\u8f26\u8f27\u8f28\u8f29\u8f2a\u8f2b\u8f2c\u8f2d\u8f2e\u8f2f\u8f30\u8f31\u8f32\u8f33\u8f34\u8f35\u8f36\u8f37\u8f38\u8f39\u8f3a\u8f3b\u8f3c\u8f3d\u8f3e\u8f3f\u8f40\u8f41\u8f42\u8f43\u8f44\u8f45\u8f46\u8f47\u8f48\u8f49\u8f4a\u8f4b\u8f4c\u8f4d\u8f4e\u8f4f\u8f50\u8f51\u8f52\u8f53\u8f54\u8f55\u8f56\u8f57\u8f58\u8f59\u8f5a\u8f5b\u8f5c\u8f5d\u8f5e\u8f5f\u8f60\u8f61\u8f62\u8f63\u8f64\u8f65\u8f66\u8f67\u8f68\u8f69\u8f6a\u8f6b\u8f6c\u8f6d\u8f6e\u8f6f\u8f70\u8f71\u8f72\u8f73\u8f74\u8f75\u8f76\u8f77\u8f78\u8f79\u8f7a\u8f7b\u8f7c\u8f7d\u8f7e\u8f7f\u8f80\u8f81\u8f82\u8f83\u8f84\u8f85\u8f86\u8f87\u8f88\u8f89\u8f8a\u8f8b\u8f8c\u8f8d\u8f8e\u8f8f\u8f90\u8f91\u8f92\u8f93\u8f94\u8f95\u8f96\u8f97\u8f98\u8f99\u8f9a\u8f9b\u8f9c\u8f9d\u8f9e\u8f9f\u8fa0\u8fa1\u8fa2\u8fa3\u8fa4\u8fa5\u8fa6\u8fa7\u8fa8\u8fa9\u8faa\u8fab\u8fac\u8fad\u8fae\u8faf\u8fb0\u8fb1\u8fb2\u8fb3\u8fb4\u8fb5\u8fb6\u8fb7\u8fb8\u8fb9\u8fba\u8fbb\u8fbc\u8fbd\u8fbe\u8fbf\u8fc0\u8fc1\u8fc2\u8fc3\u8fc4\u8fc5\u8fc6\u8fc7\u8fc8\u8fc9\u8fca\u8fcb\u8fcc\u8fcd\u8fce\u8fcf\u8fd0\u8fd1\u8fd2\u8fd3\u8fd4\u8fd5\u8fd6\u8fd7\u8fd8\u8fd9\u8fda\u8fdb\u8fdc\u8fdd\u8fde\u8fdf\u8fe0\u8fe1\u8fe2\u8fe3\u8fe4\u8fe5\u8fe6\u8fe7\u8fe8\u8fe9\u8fea\u8feb\u8fec\u8fed\u8fee\u8fef\u8ff0\u8ff1\u8ff2\u8ff3\u8ff4\u8ff5\u8ff6\u8ff7\u8ff8\u8ff9\u8ffa\u8ffb\u8ffc\u8ffd\u8ffe\u8fff\u9000\u9001\u9002\u9003\u9004\u9005\u9006\u9007\u9008\u9009\u900a\u900b\u900c\u900d\u900e\u900f\u9010\u9011\u9012\u9013\u9014\u9015\u9016\u9017\u9018\u9019\u901a\u901b\u901c\u901d\u901e\u901f\u9020\u9021\u9022\u9023\u9024\u9025\u9026\u9027\u9028\u9029\u902a\u902b\u902c\u902d\u902e\u902f\u9030\u9031\u9032\u9033\u9034\u9035\u9036\u9037\u9038\u9039\u903a\u903b\u903c\u903d\u903e\u903f\u9040\u9041\u9042\u9043\u9044\u9045\u9046\u9047\u9048\u9049\u904a\u904b\u904c\u904d\u904e\u904f\u9050\u9051\u9052\u9053\u9054\u9055\u9056\u9057\u9058\u9059\u905a\u905b\u905c\u905d\u905e\u905f\u9060\u9061\u9062\u9063\u9064\u9065\u9066\u9067\u9068\u9069\u906a\u906b\u906c\u906d\u906e\u906f\u9070\u9071\u9072\u9073\u9074\u9075\u9076\u9077\u9078\u9079\u907a\u907b\u907c\u907d\u907e\u907f\u9080\u9081\u9082\u9083\u9084\u9085\u9086\u9087\u9088\u9089\u908a\u908b\u908c\u908d\u908e\u908f\u9090\u9091\u9092\u9093\u9094\u9095\u9096\u9097\u9098\u9099\u909a\u909b\u909c\u909d\u909e\u909f\u90a0\u90a1\u90a2\u90a3\u90a4\u90a5\u90a6\u90a7\u90a8\u90a9\u90aa\u90ab\u90ac\u90ad\u90ae\u90af\u90b0\u90b1\u90b2\u90b3\u90b4\u90b5\u90b6\u90b7\u90b8\u90b9\u90ba\u90bb\u90bc\u90bd\u90be\u90bf\u90c0\u90c1\u90c2\u90c3\u90c4\u90c5\u90c6\u90c7\u90c8\u90c9\u90ca\u90cb\u90cc\u90cd\u90ce\u90cf\u90d0\u90d1\u90d2\u90d3\u90d4\u90d5\u90d6\u90d7\u90d8\u90d9\u90da\u90db\u90dc\u90dd\u90de\u90df\u90e0\u90e1\u90e2\u90e3\u90e4\u90e5\u90e6\u90e7\u90e8\u90e9\u90ea\u90eb\u90ec\u90ed\u90ee\u90ef\u90f0\u90f1\u90f2\u90f3\u90f4\u90f5\u90f6\u90f7\u90f8\u90f9\u90fa\u90fb\u90fc\u90fd\u90fe\u90ff\u9100\u9101\u9102\u9103\u9104\u9105\u9106\u9107\u9108\u9109\u910a\u910b\u910c\u910d\u910e\u910f\u9110\u9111\u9112\u9113\u9114\u9115\u9116\u9117\u9118\u9119\u911a\u911b\u911c\u911d\u911e\u911f\u9120\u9121\u9122\u9123\u9124\u9125\u9126\u9127\u9128\u9129\u912a\u912b\u912c\u912d\u912e\u912f\u9130\u9131\u9132\u9133\u9134\u9135\u9136\u9137\u9138\u9139\u913a\u913b\u913c\u913d\u913e\u913f\u9140\u9141\u9142\u9143\u9144\u9145\u9146\u9147\u9148\u9149\u914a\u914b\u914c\u914d\u914e\u914f\u9150\u9151\u9152\u9153\u9154\u9155\u9156\u9157\u9158\u9159\u915a\u915b\u915c\u915d\u915e\u915f\u9160\u9161\u9162\u9163\u9164\u9165\u9166\u9167\u9168\u9169\u916a\u916b\u916c\u916d\u916e\u916f\u9170\u9171\u9172\u9173\u9174\u9175\u9176\u9177\u9178\u9179\u917a\u917b\u917c\u917d\u917e\u917f\u9180\u9181\u9182\u9183\u9184\u9185\u9186\u9187\u9188\u9189\u918a\u918b\u918c\u918d\u918e\u918f\u9190\u9191\u9192\u9193\u9194\u9195\u9196\u9197\u9198\u9199\u919a\u919b\u919c\u919d\u919e\u919f\u91a0\u91a1\u91a2\u91a3\u91a4\u91a5\u91a6\u91a7\u91a8\u91a9\u91aa\u91ab\u91ac\u91ad\u91ae\u91af\u91b0\u91b1\u91b2\u91b3\u91b4\u91b5\u91b6\u91b7\u91b8\u91b9\u91ba\u91bb\u91bc\u91bd\u91be\u91bf\u91c0\u91c1\u91c2\u91c3\u91c4\u91c5\u91c6\u91c7\u91c8\u91c9\u91ca\u91cb\u91cc\u91cd\u91ce\u91cf\u91d0\u91d1\u91d2\u91d3\u91d4\u91d5\u91d6\u91d7\u91d8\u91d9\u91da\u91db\u91dc\u91dd\u91de\u91df\u91e0\u91e1\u91e2\u91e3\u91e4\u91e5\u91e6\u91e7\u91e8\u91e9\u91ea\u91eb\u91ec\u91ed\u91ee\u91ef\u91f0\u91f1\u91f2\u91f3\u91f4\u91f5\u91f6\u91f7\u91f8\u91f9\u91fa\u91fb\u91fc\u91fd\u91fe\u91ff\u9200\u9201\u9202\u9203\u9204\u9205\u9206\u9207\u9208\u9209\u920a\u920b\u920c\u920d\u920e\u920f\u9210\u9211\u9212\u9213\u9214\u9215\u9216\u9217\u9218\u9219\u921a\u921b\u921c\u921d\u921e\u921f\u9220\u9221\u9222\u9223\u9224\u9225\u9226\u9227\u9228\u9229\u922a\u922b\u922c\u922d\u922e\u922f\u9230\u9231\u9232\u9233\u9234\u9235\u9236\u9237\u9238\u9239\u923a\u923b\u923c\u923d\u923e\u923f\u9240\u9241\u9242\u9243\u9244\u9245\u9246\u9247\u9248\u9249\u924a\u924b\u924c\u924d\u924e\u924f\u9250\u9251\u9252\u9253\u9254\u9255\u9256\u9257\u9258\u9259\u925a\u925b\u925c\u925d\u925e\u925f\u9260\u9261\u9262\u9263\u9264\u9265\u9266\u9267\u9268\u9269\u926a\u926b\u926c\u926d\u926e\u926f\u9270\u9271\u9272\u9273\u9274\u9275\u9276\u9277\u9278\u9279\u927a\u927b\u927c\u927d\u927e\u927f\u9280\u9281\u9282\u9283\u9284\u9285\u9286\u9287\u9288\u9289\u928a\u928b\u928c\u928d\u928e\u928f\u9290\u9291\u9292\u9293\u9294\u9295\u9296\u9297\u9298\u9299\u929a\u929b\u929c\u929d\u929e\u929f\u92a0\u92a1\u92a2\u92a3\u92a4\u92a5\u92a6\u92a7\u92a8\u92a9\u92aa\u92ab\u92ac\u92ad\u92ae\u92af\u92b0\u92b1\u92b2\u92b3\u92b4\u92b5\u92b6\u92b7\u92b8\u92b9\u92ba\u92bb\u92bc\u92bd\u92be\u92bf\u92c0\u92c1\u92c2\u92c3\u92c4\u92c5\u92c6\u92c7\u92c8\u92c9\u92ca\u92cb\u92cc\u92cd\u92ce\u92cf\u92d0\u92d1\u92d2\u92d3\u92d4\u92d5\u92d6\u92d7\u92d8\u92d9\u92da\u92db\u92dc\u92dd\u92de\u92df\u92e0\u92e1\u92e2\u92e3\u92e4\u92e5\u92e6\u92e7\u92e8\u92e9\u92ea\u92eb\u92ec\u92ed\u92ee\u92ef\u92f0\u92f1\u92f2\u92f3\u92f4\u92f5\u92f6\u92f7\u92f8\u92f9\u92fa\u92fb\u92fc\u92fd\u92fe\u92ff\u9300\u9301\u9302\u9303\u9304\u9305\u9306\u9307\u9308\u9309\u930a\u930b\u930c\u930d\u930e\u930f\u9310\u9311\u9312\u9313\u9314\u9315\u9316\u9317\u9318\u9319\u931a\u931b\u931c\u931d\u931e\u931f\u9320\u9321\u9322\u9323\u9324\u9325\u9326\u9327\u9328\u9329\u932a\u932b\u932c\u932d\u932e\u932f\u9330\u9331\u9332\u9333\u9334\u9335\u9336\u9337\u9338\u9339\u933a\u933b\u933c\u933d\u933e\u933f\u9340\u9341\u9342\u9343\u9344\u9345\u9346\u9347\u9348\u9349\u934a\u934b\u934c\u934d\u934e\u934f\u9350\u9351\u9352\u9353\u9354\u9355\u9356\u9357\u9358\u9359\u935a\u935b\u935c\u935d\u935e\u935f\u9360\u9361\u9362\u9363\u9364\u9365\u9366\u9367\u9368\u9369\u936a\u936b\u936c\u936d\u936e\u936f\u9370\u9371\u9372\u9373\u9374\u9375\u9376\u9377\u9378\u9379\u937a\u937b\u937c\u937d\u937e\u937f\u9380\u9381\u9382\u9383\u9384\u9385\u9386\u9387\u9388\u9389\u938a\u938b\u938c\u938d\u938e\u938f\u9390\u9391\u9392\u9393\u9394\u9395\u9396\u9397\u9398\u9399\u939a\u939b\u939c\u939d\u939e\u939f\u93a0\u93a1\u93a2\u93a3\u93a4\u93a5\u93a6\u93a7\u93a8\u93a9\u93aa\u93ab\u93ac\u93ad\u93ae\u93af\u93b0\u93b1\u93b2\u93b3\u93b4\u93b5\u93b6\u93b7\u93b8\u93b9\u93ba\u93bb\u93bc\u93bd\u93be\u93bf\u93c0\u93c1\u93c2\u93c3\u93c4\u93c5\u93c6\u93c7\u93c8\u93c9\u93ca\u93cb\u93cc\u93cd\u93ce\u93cf\u93d0\u93d1\u93d2\u93d3\u93d4\u93d5\u93d6\u93d7\u93d8\u93d9\u93da\u93db\u93dc\u93dd\u93de\u93df\u93e0\u93e1\u93e2\u93e3\u93e4\u93e5\u93e6\u93e7\u93e8\u93e9\u93ea\u93eb\u93ec\u93ed\u93ee\u93ef\u93f0\u93f1\u93f2\u93f3\u93f4\u93f5\u93f6\u93f7\u93f8\u93f9\u93fa\u93fb\u93fc\u93fd\u93fe\u93ff\u9400\u9401\u9402\u9403\u9404\u9405\u9406\u9407\u9408\u9409\u940a\u940b\u940c\u940d\u940e\u940f\u9410\u9411\u9412\u9413\u9414\u9415\u9416\u9417\u9418\u9419\u941a\u941b\u941c\u941d\u941e\u941f\u9420\u9421\u9422\u9423\u9424\u9425\u9426\u9427\u9428\u9429\u942a\u942b\u942c\u942d\u942e\u942f\u9430\u9431\u9432\u9433\u9434\u9435\u9436\u9437\u9438\u9439\u943a\u943b\u943c\u943d\u943e\u943f\u9440\u9441\u9442\u9443\u9444\u9445\u9446\u9447\u9448\u9449\u944a\u944b\u944c\u944d\u944e\u944f\u9450\u9451\u9452\u9453\u9454\u9455\u9456\u9457\u9458\u9459\u945a\u945b\u945c\u945d\u945e\u945f\u9460\u9461\u9462\u9463\u9464\u9465\u9466\u9467\u9468\u9469\u946a\u946b\u946c\u946d\u946e\u946f\u9470\u9471\u9472\u9473\u9474\u9475\u9476\u9477\u9478\u9479\u947a\u947b\u947c\u947d\u947e\u947f\u9480\u9481\u9482\u9483\u9484\u9485\u9486\u9487\u9488\u9489\u948a\u948b\u948c\u948d\u948e\u948f\u9490\u9491\u9492\u9493\u9494\u9495\u9496\u9497\u9498\u9499\u949a\u949b\u949c\u949d\u949e\u949f\u94a0\u94a1\u94a2\u94a3\u94a4\u94a5\u94a6\u94a7\u94a8\u94a9\u94aa\u94ab\u94ac\u94ad\u94ae\u94af\u94b0\u94b1\u94b2\u94b3\u94b4\u94b5\u94b6\u94b7\u94b8\u94b9\u94ba\u94bb\u94bc\u94bd\u94be\u94bf\u94c0\u94c1\u94c2\u94c3\u94c4\u94c5\u94c6\u94c7\u94c8\u94c9\u94ca\u94cb\u94cc\u94cd\u94ce\u94cf\u94d0\u94d1\u94d2\u94d3\u94d4\u94d5\u94d6\u94d7\u94d8\u94d9\u94da\u94db\u94dc\u94dd\u94de\u94df\u94e0\u94e1\u94e2\u94e3\u94e4\u94e5\u94e6\u94e7\u94e8\u94e9\u94ea\u94eb\u94ec\u94ed\u94ee\u94ef\u94f0\u94f1\u94f2\u94f3\u94f4\u94f5\u94f6\u94f7\u94f8\u94f9\u94fa\u94fb\u94fc\u94fd\u94fe\u94ff\u9500\u9501\u9502\u9503\u9504\u9505\u9506\u9507\u9508\u9509\u950a\u950b\u950c\u950d\u950e\u950f\u9510\u9511\u9512\u9513\u9514\u9515\u9516\u9517\u9518\u9519\u951a\u951b\u951c\u951d\u951e\u951f\u9520\u9521\u9522\u9523\u9524\u9525\u9526\u9527\u9528\u9529\u952a\u952b\u952c\u952d\u952e\u952f\u9530\u9531\u9532\u9533\u9534\u9535\u9536\u9537\u9538\u9539\u953a\u953b\u953c\u953d\u953e\u953f\u9540\u9541\u9542\u9543\u9544\u9545\u9546\u9547\u9548\u9549\u954a\u954b\u954c\u954d\u954e\u954f\u9550\u9551\u9552\u9553\u9554\u9555\u9556\u9557\u9558\u9559\u955a\u955b\u955c\u955d\u955e\u955f\u9560\u9561\u9562\u9563\u9564\u9565\u9566\u9567\u9568\u9569\u956a\u956b\u956c\u956d\u956e\u956f\u9570\u9571\u9572\u9573\u9574\u9575\u9576\u9577\u9578\u9579\u957a\u957b\u957c\u957d\u957e\u957f\u9580\u9581\u9582\u9583\u9584\u9585\u9586\u9587\u9588\u9589\u958a\u958b\u958c\u958d\u958e\u958f\u9590\u9591\u9592\u9593\u9594\u9595\u9596\u9597\u9598\u9599\u959a\u959b\u959c\u959d\u959e\u959f\u95a0\u95a1\u95a2\u95a3\u95a4\u95a5\u95a6\u95a7\u95a8\u95a9\u95aa\u95ab\u95ac\u95ad\u95ae\u95af\u95b0\u95b1\u95b2\u95b3\u95b4\u95b5\u95b6\u95b7\u95b8\u95b9\u95ba\u95bb\u95bc\u95bd\u95be\u95bf\u95c0\u95c1\u95c2\u95c3\u95c4\u95c5\u95c6\u95c7\u95c8\u95c9\u95ca\u95cb\u95cc\u95cd\u95ce\u95cf\u95d0\u95d1\u95d2\u95d3\u95d4\u95d5\u95d6\u95d7\u95d8\u95d9\u95da\u95db\u95dc\u95dd\u95de\u95df\u95e0\u95e1\u95e2\u95e3\u95e4\u95e5\u95e6\u95e7\u95e8\u95e9\u95ea\u95eb\u95ec\u95ed\u95ee\u95ef\u95f0\u95f1\u95f2\u95f3\u95f4\u95f5\u95f6\u95f7\u95f8\u95f9\u95fa\u95fb\u95fc\u95fd\u95fe\u95ff\u9600\u9601\u9602\u9603\u9604\u9605\u9606\u9607\u9608\u9609\u960a\u960b\u960c\u960d\u960e\u960f\u9610\u9611\u9612\u9613\u9614\u9615\u9616\u9617\u9618\u9619\u961a\u961b\u961c\u961d\u961e\u961f\u9620\u9621\u9622\u9623\u9624\u9625\u9626\u9627\u9628\u9629\u962a\u962b\u962c\u962d\u962e\u962f\u9630\u9631\u9632\u9633\u9634\u9635\u9636\u9637\u9638\u9639\u963a\u963b\u963c\u963d\u963e\u963f\u9640\u9641\u9642\u9643\u9644\u9645\u9646\u9647\u9648\u9649\u964a\u964b\u964c\u964d\u964e\u964f\u9650\u9651\u9652\u9653\u9654\u9655\u9656\u9657\u9658\u9659\u965a\u965b\u965c\u965d\u965e\u965f\u9660\u9661\u9662\u9663\u9664\u9665\u9666\u9667\u9668\u9669\u966a\u966b\u966c\u966d\u966e\u966f\u9670\u9671\u9672\u9673\u9674\u9675\u9676\u9677\u9678\u9679\u967a\u967b\u967c\u967d\u967e\u967f\u9680\u9681\u9682\u9683\u9684\u9685\u9686\u9687\u9688\u9689\u968a\u968b\u968c\u968d\u968e\u968f\u9690\u9691\u9692\u9693\u9694\u9695\u9696\u9697\u9698\u9699\u969a\u969b\u969c\u969d\u969e\u969f\u96a0\u96a1\u96a2\u96a3\u96a4\u96a5\u96a6\u96a7\u96a8\u96a9\u96aa\u96ab\u96ac\u96ad\u96ae\u96af\u96b0\u96b1\u96b2\u96b3\u96b4\u96b5\u96b6\u96b7\u96b8\u96b9\u96ba\u96bb\u96bc\u96bd\u96be\u96bf\u96c0\u96c1\u96c2\u96c3\u96c4\u96c5\u96c6\u96c7\u96c8\u96c9\u96ca\u96cb\u96cc\u96cd\u96ce\u96cf\u96d0\u96d1\u96d2\u96d3\u96d4\u96d5\u96d6\u96d7\u96d8\u96d9\u96da\u96db\u96dc\u96dd\u96de\u96df\u96e0\u96e1\u96e2\u96e3\u96e4\u96e5\u96e6\u96e7\u96e8\u96e9\u96ea\u96eb\u96ec\u96ed\u96ee\u96ef\u96f0\u96f1\u96f2\u96f3\u96f4\u96f5\u96f6\u96f7\u96f8\u96f9\u96fa\u96fb\u96fc\u96fd\u96fe\u96ff\u9700\u9701\u9702\u9703\u9704\u9705\u9706\u9707\u9708\u9709\u970a\u970b\u970c\u970d\u970e\u970f\u9710\u9711\u9712\u9713\u9714\u9715\u9716\u9717\u9718\u9719\u971a\u971b\u971c\u971d\u971e\u971f\u9720\u9721\u9722\u9723\u9724\u9725\u9726\u9727\u9728\u9729\u972a\u972b\u972c\u972d\u972e\u972f\u9730\u9731\u9732\u9733\u9734\u9735\u9736\u9737\u9738\u9739\u973a\u973b\u973c\u973d\u973e\u973f\u9740\u9741\u9742\u9743\u9744\u9745\u9746\u9747\u9748\u9749\u974a\u974b\u974c\u974d\u974e\u974f\u9750\u9751\u9752\u9753\u9754\u9755\u9756\u9757\u9758\u9759\u975a\u975b\u975c\u975d\u975e\u975f\u9760\u9761\u9762\u9763\u9764\u9765\u9766\u9767\u9768\u9769\u976a\u976b\u976c\u976d\u976e\u976f\u9770\u9771\u9772\u9773\u9774\u9775\u9776\u9777\u9778\u9779\u977a\u977b\u977c\u977d\u977e\u977f\u9780\u9781\u9782\u9783\u9784\u9785\u9786\u9787\u9788\u9789\u978a\u978b\u978c\u978d\u978e\u978f\u9790\u9791\u9792\u9793\u9794\u9795\u9796\u9797\u9798\u9799\u979a\u979b\u979c\u979d\u979e\u979f\u97a0\u97a1\u97a2\u97a3\u97a4\u97a5\u97a6\u97a7\u97a8\u97a9\u97aa\u97ab\u97ac\u97ad\u97ae\u97af\u97b0\u97b1\u97b2\u97b3\u97b4\u97b5\u97b6\u97b7\u97b8\u97b9\u97ba\u97bb\u97bc\u97bd\u97be\u97bf\u97c0\u97c1\u97c2\u97c3\u97c4\u97c5\u97c6\u97c7\u97c8\u97c9\u97ca\u97cb\u97cc\u97cd\u97ce\u97cf\u97d0\u97d1\u97d2\u97d3\u97d4\u97d5\u97d6\u97d7\u97d8\u97d9\u97da\u97db\u97dc\u97dd\u97de\u97df\u97e0\u97e1\u97e2\u97e3\u97e4\u97e5\u97e6\u97e7\u97e8\u97e9\u97ea\u97eb\u97ec\u97ed\u97ee\u97ef\u97f0\u97f1\u97f2\u97f3\u97f4\u97f5\u97f6\u97f7\u97f8\u97f9\u97fa\u97fb\u97fc\u97fd\u97fe\u97ff\u9800\u9801\u9802\u9803\u9804\u9805\u9806\u9807\u9808\u9809\u980a\u980b\u980c\u980d\u980e\u980f\u9810\u9811\u9812\u9813\u9814\u9815\u9816\u9817\u9818\u9819\u981a\u981b\u981c\u981d\u981e\u981f\u9820\u9821\u9822\u9823\u9824\u9825\u9826\u9827\u9828\u9829\u982a\u982b\u982c\u982d\u982e\u982f\u9830\u9831\u9832\u9833\u9834\u9835\u9836\u9837\u9838\u9839\u983a\u983b\u983c\u983d\u983e\u983f\u9840\u9841\u9842\u9843\u9844\u9845\u9846\u9847\u9848\u9849\u984a\u984b\u984c\u984d\u984e\u984f\u9850\u9851\u9852\u9853\u9854\u9855\u9856\u9857\u9858\u9859\u985a\u985b\u985c\u985d\u985e\u985f\u9860\u9861\u9862\u9863\u9864\u9865\u9866\u9867\u9868\u9869\u986a\u986b\u986c\u986d\u986e\u986f\u9870\u9871\u9872\u9873\u9874\u9875\u9876\u9877\u9878\u9879\u987a\u987b\u987c\u987d\u987e\u987f\u9880\u9881\u9882\u9883\u9884\u9885\u9886\u9887\u9888\u9889\u988a\u988b\u988c\u988d\u988e\u988f\u9890\u9891\u9892\u9893\u9894\u9895\u9896\u9897\u9898\u9899\u989a\u989b\u989c\u989d\u989e\u989f\u98a0\u98a1\u98a2\u98a3\u98a4\u98a5\u98a6\u98a7\u98a8\u98a9\u98aa\u98ab\u98ac\u98ad\u98ae\u98af\u98b0\u98b1\u98b2\u98b3\u98b4\u98b5\u98b6\u98b7\u98b8\u98b9\u98ba\u98bb\u98bc\u98bd\u98be\u98bf\u98c0\u98c1\u98c2\u98c3\u98c4\u98c5\u98c6\u98c7\u98c8\u98c9\u98ca\u98cb\u98cc\u98cd\u98ce\u98cf\u98d0\u98d1\u98d2\u98d3\u98d4\u98d5\u98d6\u98d7\u98d8\u98d9\u98da\u98db\u98dc\u98dd\u98de\u98df\u98e0\u98e1\u98e2\u98e3\u98e4\u98e5\u98e6\u98e7\u98e8\u98e9\u98ea\u98eb\u98ec\u98ed\u98ee\u98ef\u98f0\u98f1\u98f2\u98f3\u98f4\u98f5\u98f6\u98f7\u98f8\u98f9\u98fa\u98fb\u98fc\u98fd\u98fe\u98ff\u9900\u9901\u9902\u9903\u9904\u9905\u9906\u9907\u9908\u9909\u990a\u990b\u990c\u990d\u990e\u990f\u9910\u9911\u9912\u9913\u9914\u9915\u9916\u9917\u9918\u9919\u991a\u991b\u991c\u991d\u991e\u991f\u9920\u9921\u9922\u9923\u9924\u9925\u9926\u9927\u9928\u9929\u992a\u992b\u992c\u992d\u992e\u992f\u9930\u9931\u9932\u9933\u9934\u9935\u9936\u9937\u9938\u9939\u993a\u993b\u993c\u993d\u993e\u993f\u9940\u9941\u9942\u9943\u9944\u9945\u9946\u9947\u9948\u9949\u994a\u994b\u994c\u994d\u994e\u994f\u9950\u9951\u9952\u9953\u9954\u9955\u9956\u9957\u9958\u9959\u995a\u995b\u995c\u995d\u995e\u995f\u9960\u9961\u9962\u9963\u9964\u9965\u9966\u9967\u9968\u9969\u996a\u996b\u996c\u996d\u996e\u996f\u9970\u9971\u9972\u9973\u9974\u9975\u9976\u9977\u9978\u9979\u997a\u997b\u997c\u997d\u997e\u997f\u9980\u9981\u9982\u9983\u9984\u9985\u9986\u9987\u9988\u9989\u998a\u998b\u998c\u998d\u998e\u998f\u9990\u9991\u9992\u9993\u9994\u9995\u9996\u9997\u9998\u9999\u999a\u999b\u999c\u999d\u999e\u999f\u99a0\u99a1\u99a2\u99a3\u99a4\u99a5\u99a6\u99a7\u99a8\u99a9\u99aa\u99ab\u99ac\u99ad\u99ae\u99af\u99b0\u99b1\u99b2\u99b3\u99b4\u99b5\u99b6\u99b7\u99b8\u99b9\u99ba\u99bb\u99bc\u99bd\u99be\u99bf\u99c0\u99c1\u99c2\u99c3\u99c4\u99c5\u99c6\u99c7\u99c8\u99c9\u99ca\u99cb\u99cc\u99cd\u99ce\u99cf\u99d0\u99d1\u99d2\u99d3\u99d4\u99d5\u99d6\u99d7\u99d8\u99d9\u99da\u99db\u99dc\u99dd\u99de\u99df\u99e0\u99e1\u99e2\u99e3\u99e4\u99e5\u99e6\u99e7\u99e8\u99e9\u99ea\u99eb\u99ec\u99ed\u99ee\u99ef\u99f0\u99f1\u99f2\u99f3\u99f4\u99f5\u99f6\u99f7\u99f8\u99f9\u99fa\u99fb\u99fc\u99fd\u99fe\u99ff\u9a00\u9a01\u9a02\u9a03\u9a04\u9a05\u9a06\u9a07\u9a08\u9a09\u9a0a\u9a0b\u9a0c\u9a0d\u9a0e\u9a0f\u9a10\u9a11\u9a12\u9a13\u9a14\u9a15\u9a16\u9a17\u9a18\u9a19\u9a1a\u9a1b\u9a1c\u9a1d\u9a1e\u9a1f\u9a20\u9a21\u9a22\u9a23\u9a24\u9a25\u9a26\u9a27\u9a28\u9a29\u9a2a\u9a2b\u9a2c\u9a2d\u9a2e\u9a2f\u9a30\u9a31\u9a32\u9a33\u9a34\u9a35\u9a36\u9a37\u9a38\u9a39\u9a3a\u9a3b\u9a3c\u9a3d\u9a3e\u9a3f\u9a40\u9a41\u9a42\u9a43\u9a44\u9a45\u9a46\u9a47\u9a48\u9a49\u9a4a\u9a4b\u9a4c\u9a4d\u9a4e\u9a4f\u9a50\u9a51\u9a52\u9a53\u9a54\u9a55\u9a56\u9a57\u9a58\u9a59\u9a5a\u9a5b\u9a5c\u9a5d\u9a5e\u9a5f\u9a60\u9a61\u9a62\u9a63\u9a64\u9a65\u9a66\u9a67\u9a68\u9a69\u9a6a\u9a6b\u9a6c\u9a6d\u9a6e\u9a6f\u9a70\u9a71\u9a72\u9a73\u9a74\u9a75\u9a76\u9a77\u9a78\u9a79\u9a7a\u9a7b\u9a7c\u9a7d\u9a7e\u9a7f\u9a80\u9a81\u9a82\u9a83\u9a84\u9a85\u9a86\u9a87\u9a88\u9a89\u9a8a\u9a8b\u9a8c\u9a8d\u9a8e\u9a8f\u9a90\u9a91\u9a92\u9a93\u9a94\u9a95\u9a96\u9a97\u9a98\u9a99\u9a9a\u9a9b\u9a9c\u9a9d\u9a9e\u9a9f\u9aa0\u9aa1\u9aa2\u9aa3\u9aa4\u9aa5\u9aa6\u9aa7\u9aa8\u9aa9\u9aaa\u9aab\u9aac\u9aad\u9aae\u9aaf\u9ab0\u9ab1\u9ab2\u9ab3\u9ab4\u9ab5\u9ab6\u9ab7\u9ab8\u9ab9\u9aba\u9abb\u9abc\u9abd\u9abe\u9abf\u9ac0\u9ac1\u9ac2\u9ac3\u9ac4\u9ac5\u9ac6\u9ac7\u9ac8\u9ac9\u9aca\u9acb\u9acc\u9acd\u9ace\u9acf\u9ad0\u9ad1\u9ad2\u9ad3\u9ad4\u9ad5\u9ad6\u9ad7\u9ad8\u9ad9\u9ada\u9adb\u9adc\u9add\u9ade\u9adf\u9ae0\u9ae1\u9ae2\u9ae3\u9ae4\u9ae5\u9ae6\u9ae7\u9ae8\u9ae9\u9aea\u9aeb\u9aec\u9aed\u9aee\u9aef\u9af0\u9af1\u9af2\u9af3\u9af4\u9af5\u9af6\u9af7\u9af8\u9af9\u9afa\u9afb\u9afc\u9afd\u9afe\u9aff\u9b00\u9b01\u9b02\u9b03\u9b04\u9b05\u9b06\u9b07\u9b08\u9b09\u9b0a\u9b0b\u9b0c\u9b0d\u9b0e\u9b0f\u9b10\u9b11\u9b12\u9b13\u9b14\u9b15\u9b16\u9b17\u9b18\u9b19\u9b1a\u9b1b\u9b1c\u9b1d\u9b1e\u9b1f\u9b20\u9b21\u9b22\u9b23\u9b24\u9b25\u9b26\u9b27\u9b28\u9b29\u9b2a\u9b2b\u9b2c\u9b2d\u9b2e\u9b2f\u9b30\u9b31\u9b32\u9b33\u9b34\u9b35\u9b36\u9b37\u9b38\u9b39\u9b3a\u9b3b\u9b3c\u9b3d\u9b3e\u9b3f\u9b40\u9b41\u9b42\u9b43\u9b44\u9b45\u9b46\u9b47\u9b48\u9b49\u9b4a\u9b4b\u9b4c\u9b4d\u9b4e\u9b4f\u9b50\u9b51\u9b52\u9b53\u9b54\u9b55\u9b56\u9b57\u9b58\u9b59\u9b5a\u9b5b\u9b5c\u9b5d\u9b5e\u9b5f\u9b60\u9b61\u9b62\u9b63\u9b64\u9b65\u9b66\u9b67\u9b68\u9b69\u9b6a\u9b6b\u9b6c\u9b6d\u9b6e\u9b6f\u9b70\u9b71\u9b72\u9b73\u9b74\u9b75\u9b76\u9b77\u9b78\u9b79\u9b7a\u9b7b\u9b7c\u9b7d\u9b7e\u9b7f\u9b80\u9b81\u9b82\u9b83\u9b84\u9b85\u9b86\u9b87\u9b88\u9b89\u9b8a\u9b8b\u9b8c\u9b8d\u9b8e\u9b8f\u9b90\u9b91\u9b92\u9b93\u9b94\u9b95\u9b96\u9b97\u9b98\u9b99\u9b9a\u9b9b\u9b9c\u9b9d\u9b9e\u9b9f\u9ba0\u9ba1\u9ba2\u9ba3\u9ba4\u9ba5\u9ba6\u9ba7\u9ba8\u9ba9\u9baa\u9bab\u9bac\u9bad\u9bae\u9baf\u9bb0\u9bb1\u9bb2\u9bb3\u9bb4\u9bb5\u9bb6\u9bb7\u9bb8\u9bb9\u9bba\u9bbb\u9bbc\u9bbd\u9bbe\u9bbf\u9bc0\u9bc1\u9bc2\u9bc3\u9bc4\u9bc5\u9bc6\u9bc7\u9bc8\u9bc9\u9bca\u9bcb\u9bcc\u9bcd\u9bce\u9bcf\u9bd0\u9bd1\u9bd2\u9bd3\u9bd4\u9bd5\u9bd6\u9bd7\u9bd8\u9bd9\u9bda\u9bdb\u9bdc\u9bdd\u9bde\u9bdf\u9be0\u9be1\u9be2\u9be3\u9be4\u9be5\u9be6\u9be7\u9be8\u9be9\u9bea\u9beb\u9bec\u9bed\u9bee\u9bef\u9bf0\u9bf1\u9bf2\u9bf3\u9bf4\u9bf5\u9bf6\u9bf7\u9bf8\u9bf9\u9bfa\u9bfb\u9bfc\u9bfd\u9bfe\u9bff\u9c00\u9c01\u9c02\u9c03\u9c04\u9c05\u9c06\u9c07\u9c08\u9c09\u9c0a\u9c0b\u9c0c\u9c0d\u9c0e\u9c0f\u9c10\u9c11\u9c12\u9c13\u9c14\u9c15\u9c16\u9c17\u9c18\u9c19\u9c1a\u9c1b\u9c1c\u9c1d\u9c1e\u9c1f\u9c20\u9c21\u9c22\u9c23\u9c24\u9c25\u9c26\u9c27\u9c28\u9c29\u9c2a\u9c2b\u9c2c\u9c2d\u9c2e\u9c2f\u9c30\u9c31\u9c32\u9c33\u9c34\u9c35\u9c36\u9c37\u9c38\u9c39\u9c3a\u9c3b\u9c3c\u9c3d\u9c3e\u9c3f\u9c40\u9c41\u9c42\u9c43\u9c44\u9c45\u9c46\u9c47\u9c48\u9c49\u9c4a\u9c4b\u9c4c\u9c4d\u9c4e\u9c4f\u9c50\u9c51\u9c52\u9c53\u9c54\u9c55\u9c56\u9c57\u9c58\u9c59\u9c5a\u9c5b\u9c5c\u9c5d\u9c5e\u9c5f\u9c60\u9c61\u9c62\u9c63\u9c64\u9c65\u9c66\u9c67\u9c68\u9c69\u9c6a\u9c6b\u9c6c\u9c6d\u9c6e\u9c6f\u9c70\u9c71\u9c72\u9c73\u9c74\u9c75\u9c76\u9c77\u9c78\u9c79\u9c7a\u9c7b\u9c7c\u9c7d\u9c7e\u9c7f\u9c80\u9c81\u9c82\u9c83\u9c84\u9c85\u9c86\u9c87\u9c88\u9c89\u9c8a\u9c8b\u9c8c\u9c8d\u9c8e\u9c8f\u9c90\u9c91\u9c92\u9c93\u9c94\u9c95\u9c96\u9c97\u9c98\u9c99\u9c9a\u9c9b\u9c9c\u9c9d\u9c9e\u9c9f\u9ca0\u9ca1\u9ca2\u9ca3\u9ca4\u9ca5\u9ca6\u9ca7\u9ca8\u9ca9\u9caa\u9cab\u9cac\u9cad\u9cae\u9caf\u9cb0\u9cb1\u9cb2\u9cb3\u9cb4\u9cb5\u9cb6\u9cb7\u9cb8\u9cb9\u9cba\u9cbb\u9cbc\u9cbd\u9cbe\u9cbf\u9cc0\u9cc1\u9cc2\u9cc3\u9cc4\u9cc5\u9cc6\u9cc7\u9cc8\u9cc9\u9cca\u9ccb\u9ccc\u9ccd\u9cce\u9ccf\u9cd0\u9cd1\u9cd2\u9cd3\u9cd4\u9cd5\u9cd6\u9cd7\u9cd8\u9cd9\u9cda\u9cdb\u9cdc\u9cdd\u9cde\u9cdf\u9ce0\u9ce1\u9ce2\u9ce3\u9ce4\u9ce5\u9ce6\u9ce7\u9ce8\u9ce9\u9cea\u9ceb\u9cec\u9ced\u9cee\u9cef\u9cf0\u9cf1\u9cf2\u9cf3\u9cf4\u9cf5\u9cf6\u9cf7\u9cf8\u9cf9\u9cfa\u9cfb\u9cfc\u9cfd\u9cfe\u9cff\u9d00\u9d01\u9d02\u9d03\u9d04\u9d05\u9d06\u9d07\u9d08\u9d09\u9d0a\u9d0b\u9d0c\u9d0d\u9d0e\u9d0f\u9d10\u9d11\u9d12\u9d13\u9d14\u9d15\u9d16\u9d17\u9d18\u9d19\u9d1a\u9d1b\u9d1c\u9d1d\u9d1e\u9d1f\u9d20\u9d21\u9d22\u9d23\u9d24\u9d25\u9d26\u9d27\u9d28\u9d29\u9d2a\u9d2b\u9d2c\u9d2d\u9d2e\u9d2f\u9d30\u9d31\u9d32\u9d33\u9d34\u9d35\u9d36\u9d37\u9d38\u9d39\u9d3a\u9d3b\u9d3c\u9d3d\u9d3e\u9d3f\u9d40\u9d41\u9d42\u9d43\u9d44\u9d45\u9d46\u9d47\u9d48\u9d49\u9d4a\u9d4b\u9d4c\u9d4d\u9d4e\u9d4f\u9d50\u9d51\u9d52\u9d53\u9d54\u9d55\u9d56\u9d57\u9d58\u9d59\u9d5a\u9d5b\u9d5c\u9d5d\u9d5e\u9d5f\u9d60\u9d61\u9d62\u9d63\u9d64\u9d65\u9d66\u9d67\u9d68\u9d69\u9d6a\u9d6b\u9d6c\u9d6d\u9d6e\u9d6f\u9d70\u9d71\u9d72\u9d73\u9d74\u9d75\u9d76\u9d77\u9d78\u9d79\u9d7a\u9d7b\u9d7c\u9d7d\u9d7e\u9d7f\u9d80\u9d81\u9d82\u9d83\u9d84\u9d85\u9d86\u9d87\u9d88\u9d89\u9d8a\u9d8b\u9d8c\u9d8d\u9d8e\u9d8f\u9d90\u9d91\u9d92\u9d93\u9d94\u9d95\u9d96\u9d97\u9d98\u9d99\u9d9a\u9d9b\u9d9c\u9d9d\u9d9e\u9d9f\u9da0\u9da1\u9da2\u9da3\u9da4\u9da5\u9da6\u9da7\u9da8\u9da9\u9daa\u9dab\u9dac\u9dad\u9dae\u9daf\u9db0\u9db1\u9db2\u9db3\u9db4\u9db5\u9db6\u9db7\u9db8\u9db9\u9dba\u9dbb\u9dbc\u9dbd\u9dbe\u9dbf\u9dc0\u9dc1\u9dc2\u9dc3\u9dc4\u9dc5\u9dc6\u9dc7\u9dc8\u9dc9\u9dca\u9dcb\u9dcc\u9dcd\u9dce\u9dcf\u9dd0\u9dd1\u9dd2\u9dd3\u9dd4\u9dd5\u9dd6\u9dd7\u9dd8\u9dd9\u9dda\u9ddb\u9ddc\u9ddd\u9dde\u9ddf\u9de0\u9de1\u9de2\u9de3\u9de4\u9de5\u9de6\u9de7\u9de8\u9de9\u9dea\u9deb\u9dec\u9ded\u9dee\u9def\u9df0\u9df1\u9df2\u9df3\u9df4\u9df5\u9df6\u9df7\u9df8\u9df9\u9dfa\u9dfb\u9dfc\u9dfd\u9dfe\u9dff\u9e00\u9e01\u9e02\u9e03\u9e04\u9e05\u9e06\u9e07\u9e08\u9e09\u9e0a\u9e0b\u9e0c\u9e0d\u9e0e\u9e0f\u9e10\u9e11\u9e12\u9e13\u9e14\u9e15\u9e16\u9e17\u9e18\u9e19\u9e1a\u9e1b\u9e1c\u9e1d\u9e1e\u9e1f\u9e20\u9e21\u9e22\u9e23\u9e24\u9e25\u9e26\u9e27\u9e28\u9e29\u9e2a\u9e2b\u9e2c\u9e2d\u9e2e\u9e2f\u9e30\u9e31\u9e32\u9e33\u9e34\u9e35\u9e36\u9e37\u9e38\u9e39\u9e3a\u9e3b\u9e3c\u9e3d\u9e3e\u9e3f\u9e40\u9e41\u9e42\u9e43\u9e44\u9e45\u9e46\u9e47\u9e48\u9e49\u9e4a\u9e4b\u9e4c\u9e4d\u9e4e\u9e4f\u9e50\u9e51\u9e52\u9e53\u9e54\u9e55\u9e56\u9e57\u9e58\u9e59\u9e5a\u9e5b\u9e5c\u9e5d\u9e5e\u9e5f\u9e60\u9e61\u9e62\u9e63\u9e64\u9e65\u9e66\u9e67\u9e68\u9e69\u9e6a\u9e6b\u9e6c\u9e6d\u9e6e\u9e6f\u9e70\u9e71\u9e72\u9e73\u9e74\u9e75\u9e76\u9e77\u9e78\u9e79\u9e7a\u9e7b\u9e7c\u9e7d\u9e7e\u9e7f\u9e80\u9e81\u9e82\u9e83\u9e84\u9e85\u9e86\u9e87\u9e88\u9e89\u9e8a\u9e8b\u9e8c\u9e8d\u9e8e\u9e8f\u9e90\u9e91\u9e92\u9e93\u9e94\u9e95\u9e96\u9e97\u9e98\u9e99\u9e9a\u9e9b\u9e9c\u9e9d\u9e9e\u9e9f\u9ea0\u9ea1\u9ea2\u9ea3\u9ea4\u9ea5\u9ea6\u9ea7\u9ea8\u9ea9\u9eaa\u9eab\u9eac\u9ead\u9eae\u9eaf\u9eb0\u9eb1\u9eb2\u9eb3\u9eb4\u9eb5\u9eb6\u9eb7\u9eb8\u9eb9\u9eba\u9ebb\u9ebc\u9ebd\u9ebe\u9ebf\u9ec0\u9ec1\u9ec2\u9ec3\u9ec4\u9ec5\u9ec6\u9ec7\u9ec8\u9ec9\u9eca\u9ecb\u9ecc\u9ecd\u9ece\u9ecf\u9ed0\u9ed1\u9ed2\u9ed3\u9ed4\u9ed5\u9ed6\u9ed7\u9ed8\u9ed9\u9eda\u9edb\u9edc\u9edd\u9ede\u9edf\u9ee0\u9ee1\u9ee2\u9ee3\u9ee4\u9ee5\u9ee6\u9ee7\u9ee8\u9ee9\u9eea\u9eeb\u9eec\u9eed\u9eee\u9eef\u9ef0\u9ef1\u9ef2\u9ef3\u9ef4\u9ef5\u9ef6\u9ef7\u9ef8\u9ef9\u9efa\u9efb\u9efc\u9efd\u9efe\u9eff\u9f00\u9f01\u9f02\u9f03\u9f04\u9f05\u9f06\u9f07\u9f08\u9f09\u9f0a\u9f0b\u9f0c\u9f0d\u9f0e\u9f0f\u9f10\u9f11\u9f12\u9f13\u9f14\u9f15\u9f16\u9f17\u9f18\u9f19\u9f1a\u9f1b\u9f1c\u9f1d\u9f1e\u9f1f\u9f20\u9f21\u9f22\u9f23\u9f24\u9f25\u9f26\u9f27\u9f28\u9f29\u9f2a\u9f2b\u9f2c\u9f2d\u9f2e\u9f2f\u9f30\u9f31\u9f32\u9f33\u9f34\u9f35\u9f36\u9f37\u9f38\u9f39\u9f3a\u9f3b\u9f3c\u9f3d\u9f3e\u9f3f\u9f40\u9f41\u9f42\u9f43\u9f44\u9f45\u9f46\u9f47\u9f48\u9f49\u9f4a\u9f4b\u9f4c\u9f4d\u9f4e\u9f4f\u9f50\u9f51\u9f52\u9f53\u9f54\u9f55\u9f56\u9f57\u9f58\u9f59\u9f5a\u9f5b\u9f5c\u9f5d\u9f5e\u9f5f\u9f60\u9f61\u9f62\u9f63\u9f64\u9f65\u9f66\u9f67\u9f68\u9f69\u9f6a\u9f6b\u9f6c\u9f6d\u9f6e\u9f6f\u9f70\u9f71\u9f72\u9f73\u9f74\u9f75\u9f76\u9f77\u9f78\u9f79\u9f7a\u9f7b\u9f7c\u9f7d\u9f7e\u9f7f\u9f80\u9f81\u9f82\u9f83\u9f84\u9f85\u9f86\u9f87\u9f88\u9f89\u9f8a\u9f8b\u9f8c\u9f8d\u9f8e\u9f8f\u9f90\u9f91\u9f92\u9f93\u9f94\u9f95\u9f96\u9f97\u9f98\u9f99\u9f9a\u9f9b\u9f9c\u9f9d\u9f9e\u9f9f\u9fa0\u9fa1\u9fa2\u9fa3\u9fa4\u9fa5\u9fa6\u9fa7\u9fa8\u9fa9\u9faa\u9fab\u9fac\u9fad\u9fae\u9faf\u9fb0\u9fb1\u9fb2\u9fb3\u9fb4\u9fb5\u9fb6\u9fb7\u9fb8\u9fb9\u9fba\u9fbb\ua000\ua001\ua002\ua003\ua004\ua005\ua006\ua007\ua008\ua009\ua00a\ua00b\ua00c\ua00d\ua00e\ua00f\ua010\ua011\ua012\ua013\ua014\ua016\ua017\ua018\ua019\ua01a\ua01b\ua01c\ua01d\ua01e\ua01f\ua020\ua021\ua022\ua023\ua024\ua025\ua026\ua027\ua028\ua029\ua02a\ua02b\ua02c\ua02d\ua02e\ua02f\ua030\ua031\ua032\ua033\ua034\ua035\ua036\ua037\ua038\ua039\ua03a\ua03b\ua03c\ua03d\ua03e\ua03f\ua040\ua041\ua042\ua043\ua044\ua045\ua046\ua047\ua048\ua049\ua04a\ua04b\ua04c\ua04d\ua04e\ua04f\ua050\ua051\ua052\ua053\ua054\ua055\ua056\ua057\ua058\ua059\ua05a\ua05b\ua05c\ua05d\ua05e\ua05f\ua060\ua061\ua062\ua063\ua064\ua065\ua066\ua067\ua068\ua069\ua06a\ua06b\ua06c\ua06d\ua06e\ua06f\ua070\ua071\ua072\ua073\ua074\ua075\ua076\ua077\ua078\ua079\ua07a\ua07b\ua07c\ua07d\ua07e\ua07f\ua080\ua081\ua082\ua083\ua084\ua085\ua086\ua087\ua088\ua089\ua08a\ua08b\ua08c\ua08d\ua08e\ua08f\ua090\ua091\ua092\ua093\ua094\ua095\ua096\ua097\ua098\ua099\ua09a\ua09b\ua09c\ua09d\ua09e\ua09f\ua0a0\ua0a1\ua0a2\ua0a3\ua0a4\ua0a5\ua0a6\ua0a7\ua0a8\ua0a9\ua0aa\ua0ab\ua0ac\ua0ad\ua0ae\ua0af\ua0b0\ua0b1\ua0b2\ua0b3\ua0b4\ua0b5\ua0b6\ua0b7\ua0b8\ua0b9\ua0ba\ua0bb\ua0bc\ua0bd\ua0be\ua0bf\ua0c0\ua0c1\ua0c2\ua0c3\ua0c4\ua0c5\ua0c6\ua0c7\ua0c8\ua0c9\ua0ca\ua0cb\ua0cc\ua0cd\ua0ce\ua0cf\ua0d0\ua0d1\ua0d2\ua0d3\ua0d4\ua0d5\ua0d6\ua0d7\ua0d8\ua0d9\ua0da\ua0db\ua0dc\ua0dd\ua0de\ua0df\ua0e0\ua0e1\ua0e2\ua0e3\ua0e4\ua0e5\ua0e6\ua0e7\ua0e8\ua0e9\ua0ea\ua0eb\ua0ec\ua0ed\ua0ee\ua0ef\ua0f0\ua0f1\ua0f2\ua0f3\ua0f4\ua0f5\ua0f6\ua0f7\ua0f8\ua0f9\ua0fa\ua0fb\ua0fc\ua0fd\ua0fe\ua0ff\ua100\ua101\ua102\ua103\ua104\ua105\ua106\ua107\ua108\ua109\ua10a\ua10b\ua10c\ua10d\ua10e\ua10f\ua110\ua111\ua112\ua113\ua114\ua115\ua116\ua117\ua118\ua119\ua11a\ua11b\ua11c\ua11d\ua11e\ua11f\ua120\ua121\ua122\ua123\ua124\ua125\ua126\ua127\ua128\ua129\ua12a\ua12b\ua12c\ua12d\ua12e\ua12f\ua130\ua131\ua132\ua133\ua134\ua135\ua136\ua137\ua138\ua139\ua13a\ua13b\ua13c\ua13d\ua13e\ua13f\ua140\ua141\ua142\ua143\ua144\ua145\ua146\ua147\ua148\ua149\ua14a\ua14b\ua14c\ua14d\ua14e\ua14f\ua150\ua151\ua152\ua153\ua154\ua155\ua156\ua157\ua158\ua159\ua15a\ua15b\ua15c\ua15d\ua15e\ua15f\ua160\ua161\ua162\ua163\ua164\ua165\ua166\ua167\ua168\ua169\ua16a\ua16b\ua16c\ua16d\ua16e\ua16f\ua170\ua171\ua172\ua173\ua174\ua175\ua176\ua177\ua178\ua179\ua17a\ua17b\ua17c\ua17d\ua17e\ua17f\ua180\ua181\ua182\ua183\ua184\ua185\ua186\ua187\ua188\ua189\ua18a\ua18b\ua18c\ua18d\ua18e\ua18f\ua190\ua191\ua192\ua193\ua194\ua195\ua196\ua197\ua198\ua199\ua19a\ua19b\ua19c\ua19d\ua19e\ua19f\ua1a0\ua1a1\ua1a2\ua1a3\ua1a4\ua1a5\ua1a6\ua1a7\ua1a8\ua1a9\ua1aa\ua1ab\ua1ac\ua1ad\ua1ae\ua1af\ua1b0\ua1b1\ua1b2\ua1b3\ua1b4\ua1b5\ua1b6\ua1b7\ua1b8\ua1b9\ua1ba\ua1bb\ua1bc\ua1bd\ua1be\ua1bf\ua1c0\ua1c1\ua1c2\ua1c3\ua1c4\ua1c5\ua1c6\ua1c7\ua1c8\ua1c9\ua1ca\ua1cb\ua1cc\ua1cd\ua1ce\ua1cf\ua1d0\ua1d1\ua1d2\ua1d3\ua1d4\ua1d5\ua1d6\ua1d7\ua1d8\ua1d9\ua1da\ua1db\ua1dc\ua1dd\ua1de\ua1df\ua1e0\ua1e1\ua1e2\ua1e3\ua1e4\ua1e5\ua1e6\ua1e7\ua1e8\ua1e9\ua1ea\ua1eb\ua1ec\ua1ed\ua1ee\ua1ef\ua1f0\ua1f1\ua1f2\ua1f3\ua1f4\ua1f5\ua1f6\ua1f7\ua1f8\ua1f9\ua1fa\ua1fb\ua1fc\ua1fd\ua1fe\ua1ff\ua200\ua201\ua202\ua203\ua204\ua205\ua206\ua207\ua208\ua209\ua20a\ua20b\ua20c\ua20d\ua20e\ua20f\ua210\ua211\ua212\ua213\ua214\ua215\ua216\ua217\ua218\ua219\ua21a\ua21b\ua21c\ua21d\ua21e\ua21f\ua220\ua221\ua222\ua223\ua224\ua225\ua226\ua227\ua228\ua229\ua22a\ua22b\ua22c\ua22d\ua22e\ua22f\ua230\ua231\ua232\ua233\ua234\ua235\ua236\ua237\ua238\ua239\ua23a\ua23b\ua23c\ua23d\ua23e\ua23f\ua240\ua241\ua242\ua243\ua244\ua245\ua246\ua247\ua248\ua249\ua24a\ua24b\ua24c\ua24d\ua24e\ua24f\ua250\ua251\ua252\ua253\ua254\ua255\ua256\ua257\ua258\ua259\ua25a\ua25b\ua25c\ua25d\ua25e\ua25f\ua260\ua261\ua262\ua263\ua264\ua265\ua266\ua267\ua268\ua269\ua26a\ua26b\ua26c\ua26d\ua26e\ua26f\ua270\ua271\ua272\ua273\ua274\ua275\ua276\ua277\ua278\ua279\ua27a\ua27b\ua27c\ua27d\ua27e\ua27f\ua280\ua281\ua282\ua283\ua284\ua285\ua286\ua287\ua288\ua289\ua28a\ua28b\ua28c\ua28d\ua28e\ua28f\ua290\ua291\ua292\ua293\ua294\ua295\ua296\ua297\ua298\ua299\ua29a\ua29b\ua29c\ua29d\ua29e\ua29f\ua2a0\ua2a1\ua2a2\ua2a3\ua2a4\ua2a5\ua2a6\ua2a7\ua2a8\ua2a9\ua2aa\ua2ab\ua2ac\ua2ad\ua2ae\ua2af\ua2b0\ua2b1\ua2b2\ua2b3\ua2b4\ua2b5\ua2b6\ua2b7\ua2b8\ua2b9\ua2ba\ua2bb\ua2bc\ua2bd\ua2be\ua2bf\ua2c0\ua2c1\ua2c2\ua2c3\ua2c4\ua2c5\ua2c6\ua2c7\ua2c8\ua2c9\ua2ca\ua2cb\ua2cc\ua2cd\ua2ce\ua2cf\ua2d0\ua2d1\ua2d2\ua2d3\ua2d4\ua2d5\ua2d6\ua2d7\ua2d8\ua2d9\ua2da\ua2db\ua2dc\ua2dd\ua2de\ua2df\ua2e0\ua2e1\ua2e2\ua2e3\ua2e4\ua2e5\ua2e6\ua2e7\ua2e8\ua2e9\ua2ea\ua2eb\ua2ec\ua2ed\ua2ee\ua2ef\ua2f0\ua2f1\ua2f2\ua2f3\ua2f4\ua2f5\ua2f6\ua2f7\ua2f8\ua2f9\ua2fa\ua2fb\ua2fc\ua2fd\ua2fe\ua2ff\ua300\ua301\ua302\ua303\ua304\ua305\ua306\ua307\ua308\ua309\ua30a\ua30b\ua30c\ua30d\ua30e\ua30f\ua310\ua311\ua312\ua313\ua314\ua315\ua316\ua317\ua318\ua319\ua31a\ua31b\ua31c\ua31d\ua31e\ua31f\ua320\ua321\ua322\ua323\ua324\ua325\ua326\ua327\ua328\ua329\ua32a\ua32b\ua32c\ua32d\ua32e\ua32f\ua330\ua331\ua332\ua333\ua334\ua335\ua336\ua337\ua338\ua339\ua33a\ua33b\ua33c\ua33d\ua33e\ua33f\ua340\ua341\ua342\ua343\ua344\ua345\ua346\ua347\ua348\ua349\ua34a\ua34b\ua34c\ua34d\ua34e\ua34f\ua350\ua351\ua352\ua353\ua354\ua355\ua356\ua357\ua358\ua359\ua35a\ua35b\ua35c\ua35d\ua35e\ua35f\ua360\ua361\ua362\ua363\ua364\ua365\ua366\ua367\ua368\ua369\ua36a\ua36b\ua36c\ua36d\ua36e\ua36f\ua370\ua371\ua372\ua373\ua374\ua375\ua376\ua377\ua378\ua379\ua37a\ua37b\ua37c\ua37d\ua37e\ua37f\ua380\ua381\ua382\ua383\ua384\ua385\ua386\ua387\ua388\ua389\ua38a\ua38b\ua38c\ua38d\ua38e\ua38f\ua390\ua391\ua392\ua393\ua394\ua395\ua396\ua397\ua398\ua399\ua39a\ua39b\ua39c\ua39d\ua39e\ua39f\ua3a0\ua3a1\ua3a2\ua3a3\ua3a4\ua3a5\ua3a6\ua3a7\ua3a8\ua3a9\ua3aa\ua3ab\ua3ac\ua3ad\ua3ae\ua3af\ua3b0\ua3b1\ua3b2\ua3b3\ua3b4\ua3b5\ua3b6\ua3b7\ua3b8\ua3b9\ua3ba\ua3bb\ua3bc\ua3bd\ua3be\ua3bf\ua3c0\ua3c1\ua3c2\ua3c3\ua3c4\ua3c5\ua3c6\ua3c7\ua3c8\ua3c9\ua3ca\ua3cb\ua3cc\ua3cd\ua3ce\ua3cf\ua3d0\ua3d1\ua3d2\ua3d3\ua3d4\ua3d5\ua3d6\ua3d7\ua3d8\ua3d9\ua3da\ua3db\ua3dc\ua3dd\ua3de\ua3df\ua3e0\ua3e1\ua3e2\ua3e3\ua3e4\ua3e5\ua3e6\ua3e7\ua3e8\ua3e9\ua3ea\ua3eb\ua3ec\ua3ed\ua3ee\ua3ef\ua3f0\ua3f1\ua3f2\ua3f3\ua3f4\ua3f5\ua3f6\ua3f7\ua3f8\ua3f9\ua3fa\ua3fb\ua3fc\ua3fd\ua3fe\ua3ff\ua400\ua401\ua402\ua403\ua404\ua405\ua406\ua407\ua408\ua409\ua40a\ua40b\ua40c\ua40d\ua40e\ua40f\ua410\ua411\ua412\ua413\ua414\ua415\ua416\ua417\ua418\ua419\ua41a\ua41b\ua41c\ua41d\ua41e\ua41f\ua420\ua421\ua422\ua423\ua424\ua425\ua426\ua427\ua428\ua429\ua42a\ua42b\ua42c\ua42d\ua42e\ua42f\ua430\ua431\ua432\ua433\ua434\ua435\ua436\ua437\ua438\ua439\ua43a\ua43b\ua43c\ua43d\ua43e\ua43f\ua440\ua441\ua442\ua443\ua444\ua445\ua446\ua447\ua448\ua449\ua44a\ua44b\ua44c\ua44d\ua44e\ua44f\ua450\ua451\ua452\ua453\ua454\ua455\ua456\ua457\ua458\ua459\ua45a\ua45b\ua45c\ua45d\ua45e\ua45f\ua460\ua461\ua462\ua463\ua464\ua465\ua466\ua467\ua468\ua469\ua46a\ua46b\ua46c\ua46d\ua46e\ua46f\ua470\ua471\ua472\ua473\ua474\ua475\ua476\ua477\ua478\ua479\ua47a\ua47b\ua47c\ua47d\ua47e\ua47f\ua480\ua481\ua482\ua483\ua484\ua485\ua486\ua487\ua488\ua489\ua48a\ua48b\ua48c\ua800\ua801\ua803\ua804\ua805\ua807\ua808\ua809\ua80a\ua80c\ua80d\ua80e\ua80f\ua810\ua811\ua812\ua813\ua814\ua815\ua816\ua817\ua818\ua819\ua81a\ua81b\ua81c\ua81d\ua81e\ua81f\ua820\ua821\ua822\uac00\uac01\uac02\uac03\uac04\uac05\uac06\uac07\uac08\uac09\uac0a\uac0b\uac0c\uac0d\uac0e\uac0f\uac10\uac11\uac12\uac13\uac14\uac15\uac16\uac17\uac18\uac19\uac1a\uac1b\uac1c\uac1d\uac1e\uac1f\uac20\uac21\uac22\uac23\uac24\uac25\uac26\uac27\uac28\uac29\uac2a\uac2b\uac2c\uac2d\uac2e\uac2f\uac30\uac31\uac32\uac33\uac34\uac35\uac36\uac37\uac38\uac39\uac3a\uac3b\uac3c\uac3d\uac3e\uac3f\uac40\uac41\uac42\uac43\uac44\uac45\uac46\uac47\uac48\uac49\uac4a\uac4b\uac4c\uac4d\uac4e\uac4f\uac50\uac51\uac52\uac53\uac54\uac55\uac56\uac57\uac58\uac59\uac5a\uac5b\uac5c\uac5d\uac5e\uac5f\uac60\uac61\uac62\uac63\uac64\uac65\uac66\uac67\uac68\uac69\uac6a\uac6b\uac6c\uac6d\uac6e\uac6f\uac70\uac71\uac72\uac73\uac74\uac75\uac76\uac77\uac78\uac79\uac7a\uac7b\uac7c\uac7d\uac7e\uac7f\uac80\uac81\uac82\uac83\uac84\uac85\uac86\uac87\uac88\uac89\uac8a\uac8b\uac8c\uac8d\uac8e\uac8f\uac90\uac91\uac92\uac93\uac94\uac95\uac96\uac97\uac98\uac99\uac9a\uac9b\uac9c\uac9d\uac9e\uac9f\uaca0\uaca1\uaca2\uaca3\uaca4\uaca5\uaca6\uaca7\uaca8\uaca9\uacaa\uacab\uacac\uacad\uacae\uacaf\uacb0\uacb1\uacb2\uacb3\uacb4\uacb5\uacb6\uacb7\uacb8\uacb9\uacba\uacbb\uacbc\uacbd\uacbe\uacbf\uacc0\uacc1\uacc2\uacc3\uacc4\uacc5\uacc6\uacc7\uacc8\uacc9\uacca\uaccb\uaccc\uaccd\uacce\uaccf\uacd0\uacd1\uacd2\uacd3\uacd4\uacd5\uacd6\uacd7\uacd8\uacd9\uacda\uacdb\uacdc\uacdd\uacde\uacdf\uace0\uace1\uace2\uace3\uace4\uace5\uace6\uace7\uace8\uace9\uacea\uaceb\uacec\uaced\uacee\uacef\uacf0\uacf1\uacf2\uacf3\uacf4\uacf5\uacf6\uacf7\uacf8\uacf9\uacfa\uacfb\uacfc\uacfd\uacfe\uacff\uad00\uad01\uad02\uad03\uad04\uad05\uad06\uad07\uad08\uad09\uad0a\uad0b\uad0c\uad0d\uad0e\uad0f\uad10\uad11\uad12\uad13\uad14\uad15\uad16\uad17\uad18\uad19\uad1a\uad1b\uad1c\uad1d\uad1e\uad1f\uad20\uad21\uad22\uad23\uad24\uad25\uad26\uad27\uad28\uad29\uad2a\uad2b\uad2c\uad2d\uad2e\uad2f\uad30\uad31\uad32\uad33\uad34\uad35\uad36\uad37\uad38\uad39\uad3a\uad3b\uad3c\uad3d\uad3e\uad3f\uad40\uad41\uad42\uad43\uad44\uad45\uad46\uad47\uad48\uad49\uad4a\uad4b\uad4c\uad4d\uad4e\uad4f\uad50\uad51\uad52\uad53\uad54\uad55\uad56\uad57\uad58\uad59\uad5a\uad5b\uad5c\uad5d\uad5e\uad5f\uad60\uad61\uad62\uad63\uad64\uad65\uad66\uad67\uad68\uad69\uad6a\uad6b\uad6c\uad6d\uad6e\uad6f\uad70\uad71\uad72\uad73\uad74\uad75\uad76\uad77\uad78\uad79\uad7a\uad7b\uad7c\uad7d\uad7e\uad7f\uad80\uad81\uad82\uad83\uad84\uad85\uad86\uad87\uad88\uad89\uad8a\uad8b\uad8c\uad8d\uad8e\uad8f\uad90\uad91\uad92\uad93\uad94\uad95\uad96\uad97\uad98\uad99\uad9a\uad9b\uad9c\uad9d\uad9e\uad9f\uada0\uada1\uada2\uada3\uada4\uada5\uada6\uada7\uada8\uada9\uadaa\uadab\uadac\uadad\uadae\uadaf\uadb0\uadb1\uadb2\uadb3\uadb4\uadb5\uadb6\uadb7\uadb8\uadb9\uadba\uadbb\uadbc\uadbd\uadbe\uadbf\uadc0\uadc1\uadc2\uadc3\uadc4\uadc5\uadc6\uadc7\uadc8\uadc9\uadca\uadcb\uadcc\uadcd\uadce\uadcf\uadd0\uadd1\uadd2\uadd3\uadd4\uadd5\uadd6\uadd7\uadd8\uadd9\uadda\uaddb\uaddc\uaddd\uadde\uaddf\uade0\uade1\uade2\uade3\uade4\uade5\uade6\uade7\uade8\uade9\uadea\uadeb\uadec\uaded\uadee\uadef\uadf0\uadf1\uadf2\uadf3\uadf4\uadf5\uadf6\uadf7\uadf8\uadf9\uadfa\uadfb\uadfc\uadfd\uadfe\uadff\uae00\uae01\uae02\uae03\uae04\uae05\uae06\uae07\uae08\uae09\uae0a\uae0b\uae0c\uae0d\uae0e\uae0f\uae10\uae11\uae12\uae13\uae14\uae15\uae16\uae17\uae18\uae19\uae1a\uae1b\uae1c\uae1d\uae1e\uae1f\uae20\uae21\uae22\uae23\uae24\uae25\uae26\uae27\uae28\uae29\uae2a\uae2b\uae2c\uae2d\uae2e\uae2f\uae30\uae31\uae32\uae33\uae34\uae35\uae36\uae37\uae38\uae39\uae3a\uae3b\uae3c\uae3d\uae3e\uae3f\uae40\uae41\uae42\uae43\uae44\uae45\uae46\uae47\uae48\uae49\uae4a\uae4b\uae4c\uae4d\uae4e\uae4f\uae50\uae51\uae52\uae53\uae54\uae55\uae56\uae57\uae58\uae59\uae5a\uae5b\uae5c\uae5d\uae5e\uae5f\uae60\uae61\uae62\uae63\uae64\uae65\uae66\uae67\uae68\uae69\uae6a\uae6b\uae6c\uae6d\uae6e\uae6f\uae70\uae71\uae72\uae73\uae74\uae75\uae76\uae77\uae78\uae79\uae7a\uae7b\uae7c\uae7d\uae7e\uae7f\uae80\uae81\uae82\uae83\uae84\uae85\uae86\uae87\uae88\uae89\uae8a\uae8b\uae8c\uae8d\uae8e\uae8f\uae90\uae91\uae92\uae93\uae94\uae95\uae96\uae97\uae98\uae99\uae9a\uae9b\uae9c\uae9d\uae9e\uae9f\uaea0\uaea1\uaea2\uaea3\uaea4\uaea5\uaea6\uaea7\uaea8\uaea9\uaeaa\uaeab\uaeac\uaead\uaeae\uaeaf\uaeb0\uaeb1\uaeb2\uaeb3\uaeb4\uaeb5\uaeb6\uaeb7\uaeb8\uaeb9\uaeba\uaebb\uaebc\uaebd\uaebe\uaebf\uaec0\uaec1\uaec2\uaec3\uaec4\uaec5\uaec6\uaec7\uaec8\uaec9\uaeca\uaecb\uaecc\uaecd\uaece\uaecf\uaed0\uaed1\uaed2\uaed3\uaed4\uaed5\uaed6\uaed7\uaed8\uaed9\uaeda\uaedb\uaedc\uaedd\uaede\uaedf\uaee0\uaee1\uaee2\uaee3\uaee4\uaee5\uaee6\uaee7\uaee8\uaee9\uaeea\uaeeb\uaeec\uaeed\uaeee\uaeef\uaef0\uaef1\uaef2\uaef3\uaef4\uaef5\uaef6\uaef7\uaef8\uaef9\uaefa\uaefb\uaefc\uaefd\uaefe\uaeff\uaf00\uaf01\uaf02\uaf03\uaf04\uaf05\uaf06\uaf07\uaf08\uaf09\uaf0a\uaf0b\uaf0c\uaf0d\uaf0e\uaf0f\uaf10\uaf11\uaf12\uaf13\uaf14\uaf15\uaf16\uaf17\uaf18\uaf19\uaf1a\uaf1b\uaf1c\uaf1d\uaf1e\uaf1f\uaf20\uaf21\uaf22\uaf23\uaf24\uaf25\uaf26\uaf27\uaf28\uaf29\uaf2a\uaf2b\uaf2c\uaf2d\uaf2e\uaf2f\uaf30\uaf31\uaf32\uaf33\uaf34\uaf35\uaf36\uaf37\uaf38\uaf39\uaf3a\uaf3b\uaf3c\uaf3d\uaf3e\uaf3f\uaf40\uaf41\uaf42\uaf43\uaf44\uaf45\uaf46\uaf47\uaf48\uaf49\uaf4a\uaf4b\uaf4c\uaf4d\uaf4e\uaf4f\uaf50\uaf51\uaf52\uaf53\uaf54\uaf55\uaf56\uaf57\uaf58\uaf59\uaf5a\uaf5b\uaf5c\uaf5d\uaf5e\uaf5f\uaf60\uaf61\uaf62\uaf63\uaf64\uaf65\uaf66\uaf67\uaf68\uaf69\uaf6a\uaf6b\uaf6c\uaf6d\uaf6e\uaf6f\uaf70\uaf71\uaf72\uaf73\uaf74\uaf75\uaf76\uaf77\uaf78\uaf79\uaf7a\uaf7b\uaf7c\uaf7d\uaf7e\uaf7f\uaf80\uaf81\uaf82\uaf83\uaf84\uaf85\uaf86\uaf87\uaf88\uaf89\uaf8a\uaf8b\uaf8c\uaf8d\uaf8e\uaf8f\uaf90\uaf91\uaf92\uaf93\uaf94\uaf95\uaf96\uaf97\uaf98\uaf99\uaf9a\uaf9b\uaf9c\uaf9d\uaf9e\uaf9f\uafa0\uafa1\uafa2\uafa3\uafa4\uafa5\uafa6\uafa7\uafa8\uafa9\uafaa\uafab\uafac\uafad\uafae\uafaf\uafb0\uafb1\uafb2\uafb3\uafb4\uafb5\uafb6\uafb7\uafb8\uafb9\uafba\uafbb\uafbc\uafbd\uafbe\uafbf\uafc0\uafc1\uafc2\uafc3\uafc4\uafc5\uafc6\uafc7\uafc8\uafc9\uafca\uafcb\uafcc\uafcd\uafce\uafcf\uafd0\uafd1\uafd2\uafd3\uafd4\uafd5\uafd6\uafd7\uafd8\uafd9\uafda\uafdb\uafdc\uafdd\uafde\uafdf\uafe0\uafe1\uafe2\uafe3\uafe4\uafe5\uafe6\uafe7\uafe8\uafe9\uafea\uafeb\uafec\uafed\uafee\uafef\uaff0\uaff1\uaff2\uaff3\uaff4\uaff5\uaff6\uaff7\uaff8\uaff9\uaffa\uaffb\uaffc\uaffd\uaffe\uafff\ub000\ub001\ub002\ub003\ub004\ub005\ub006\ub007\ub008\ub009\ub00a\ub00b\ub00c\ub00d\ub00e\ub00f\ub010\ub011\ub012\ub013\ub014\ub015\ub016\ub017\ub018\ub019\ub01a\ub01b\ub01c\ub01d\ub01e\ub01f\ub020\ub021\ub022\ub023\ub024\ub025\ub026\ub027\ub028\ub029\ub02a\ub02b\ub02c\ub02d\ub02e\ub02f\ub030\ub031\ub032\ub033\ub034\ub035\ub036\ub037\ub038\ub039\ub03a\ub03b\ub03c\ub03d\ub03e\ub03f\ub040\ub041\ub042\ub043\ub044\ub045\ub046\ub047\ub048\ub049\ub04a\ub04b\ub04c\ub04d\ub04e\ub04f\ub050\ub051\ub052\ub053\ub054\ub055\ub056\ub057\ub058\ub059\ub05a\ub05b\ub05c\ub05d\ub05e\ub05f\ub060\ub061\ub062\ub063\ub064\ub065\ub066\ub067\ub068\ub069\ub06a\ub06b\ub06c\ub06d\ub06e\ub06f\ub070\ub071\ub072\ub073\ub074\ub075\ub076\ub077\ub078\ub079\ub07a\ub07b\ub07c\ub07d\ub07e\ub07f\ub080\ub081\ub082\ub083\ub084\ub085\ub086\ub087\ub088\ub089\ub08a\ub08b\ub08c\ub08d\ub08e\ub08f\ub090\ub091\ub092\ub093\ub094\ub095\ub096\ub097\ub098\ub099\ub09a\ub09b\ub09c\ub09d\ub09e\ub09f\ub0a0\ub0a1\ub0a2\ub0a3\ub0a4\ub0a5\ub0a6\ub0a7\ub0a8\ub0a9\ub0aa\ub0ab\ub0ac\ub0ad\ub0ae\ub0af\ub0b0\ub0b1\ub0b2\ub0b3\ub0b4\ub0b5\ub0b6\ub0b7\ub0b8\ub0b9\ub0ba\ub0bb\ub0bc\ub0bd\ub0be\ub0bf\ub0c0\ub0c1\ub0c2\ub0c3\ub0c4\ub0c5\ub0c6\ub0c7\ub0c8\ub0c9\ub0ca\ub0cb\ub0cc\ub0cd\ub0ce\ub0cf\ub0d0\ub0d1\ub0d2\ub0d3\ub0d4\ub0d5\ub0d6\ub0d7\ub0d8\ub0d9\ub0da\ub0db\ub0dc\ub0dd\ub0de\ub0df\ub0e0\ub0e1\ub0e2\ub0e3\ub0e4\ub0e5\ub0e6\ub0e7\ub0e8\ub0e9\ub0ea\ub0eb\ub0ec\ub0ed\ub0ee\ub0ef\ub0f0\ub0f1\ub0f2\ub0f3\ub0f4\ub0f5\ub0f6\ub0f7\ub0f8\ub0f9\ub0fa\ub0fb\ub0fc\ub0fd\ub0fe\ub0ff\ub100\ub101\ub102\ub103\ub104\ub105\ub106\ub107\ub108\ub109\ub10a\ub10b\ub10c\ub10d\ub10e\ub10f\ub110\ub111\ub112\ub113\ub114\ub115\ub116\ub117\ub118\ub119\ub11a\ub11b\ub11c\ub11d\ub11e\ub11f\ub120\ub121\ub122\ub123\ub124\ub125\ub126\ub127\ub128\ub129\ub12a\ub12b\ub12c\ub12d\ub12e\ub12f\ub130\ub131\ub132\ub133\ub134\ub135\ub136\ub137\ub138\ub139\ub13a\ub13b\ub13c\ub13d\ub13e\ub13f\ub140\ub141\ub142\ub143\ub144\ub145\ub146\ub147\ub148\ub149\ub14a\ub14b\ub14c\ub14d\ub14e\ub14f\ub150\ub151\ub152\ub153\ub154\ub155\ub156\ub157\ub158\ub159\ub15a\ub15b\ub15c\ub15d\ub15e\ub15f\ub160\ub161\ub162\ub163\ub164\ub165\ub166\ub167\ub168\ub169\ub16a\ub16b\ub16c\ub16d\ub16e\ub16f\ub170\ub171\ub172\ub173\ub174\ub175\ub176\ub177\ub178\ub179\ub17a\ub17b\ub17c\ub17d\ub17e\ub17f\ub180\ub181\ub182\ub183\ub184\ub185\ub186\ub187\ub188\ub189\ub18a\ub18b\ub18c\ub18d\ub18e\ub18f\ub190\ub191\ub192\ub193\ub194\ub195\ub196\ub197\ub198\ub199\ub19a\ub19b\ub19c\ub19d\ub19e\ub19f\ub1a0\ub1a1\ub1a2\ub1a3\ub1a4\ub1a5\ub1a6\ub1a7\ub1a8\ub1a9\ub1aa\ub1ab\ub1ac\ub1ad\ub1ae\ub1af\ub1b0\ub1b1\ub1b2\ub1b3\ub1b4\ub1b5\ub1b6\ub1b7\ub1b8\ub1b9\ub1ba\ub1bb\ub1bc\ub1bd\ub1be\ub1bf\ub1c0\ub1c1\ub1c2\ub1c3\ub1c4\ub1c5\ub1c6\ub1c7\ub1c8\ub1c9\ub1ca\ub1cb\ub1cc\ub1cd\ub1ce\ub1cf\ub1d0\ub1d1\ub1d2\ub1d3\ub1d4\ub1d5\ub1d6\ub1d7\ub1d8\ub1d9\ub1da\ub1db\ub1dc\ub1dd\ub1de\ub1df\ub1e0\ub1e1\ub1e2\ub1e3\ub1e4\ub1e5\ub1e6\ub1e7\ub1e8\ub1e9\ub1ea\ub1eb\ub1ec\ub1ed\ub1ee\ub1ef\ub1f0\ub1f1\ub1f2\ub1f3\ub1f4\ub1f5\ub1f6\ub1f7\ub1f8\ub1f9\ub1fa\ub1fb\ub1fc\ub1fd\ub1fe\ub1ff\ub200\ub201\ub202\ub203\ub204\ub205\ub206\ub207\ub208\ub209\ub20a\ub20b\ub20c\ub20d\ub20e\ub20f\ub210\ub211\ub212\ub213\ub214\ub215\ub216\ub217\ub218\ub219\ub21a\ub21b\ub21c\ub21d\ub21e\ub21f\ub220\ub221\ub222\ub223\ub224\ub225\ub226\ub227\ub228\ub229\ub22a\ub22b\ub22c\ub22d\ub22e\ub22f\ub230\ub231\ub232\ub233\ub234\ub235\ub236\ub237\ub238\ub239\ub23a\ub23b\ub23c\ub23d\ub23e\ub23f\ub240\ub241\ub242\ub243\ub244\ub245\ub246\ub247\ub248\ub249\ub24a\ub24b\ub24c\ub24d\ub24e\ub24f\ub250\ub251\ub252\ub253\ub254\ub255\ub256\ub257\ub258\ub259\ub25a\ub25b\ub25c\ub25d\ub25e\ub25f\ub260\ub261\ub262\ub263\ub264\ub265\ub266\ub267\ub268\ub269\ub26a\ub26b\ub26c\ub26d\ub26e\ub26f\ub270\ub271\ub272\ub273\ub274\ub275\ub276\ub277\ub278\ub279\ub27a\ub27b\ub27c\ub27d\ub27e\ub27f\ub280\ub281\ub282\ub283\ub284\ub285\ub286\ub287\ub288\ub289\ub28a\ub28b\ub28c\ub28d\ub28e\ub28f\ub290\ub291\ub292\ub293\ub294\ub295\ub296\ub297\ub298\ub299\ub29a\ub29b\ub29c\ub29d\ub29e\ub29f\ub2a0\ub2a1\ub2a2\ub2a3\ub2a4\ub2a5\ub2a6\ub2a7\ub2a8\ub2a9\ub2aa\ub2ab\ub2ac\ub2ad\ub2ae\ub2af\ub2b0\ub2b1\ub2b2\ub2b3\ub2b4\ub2b5\ub2b6\ub2b7\ub2b8\ub2b9\ub2ba\ub2bb\ub2bc\ub2bd\ub2be\ub2bf\ub2c0\ub2c1\ub2c2\ub2c3\ub2c4\ub2c5\ub2c6\ub2c7\ub2c8\ub2c9\ub2ca\ub2cb\ub2cc\ub2cd\ub2ce\ub2cf\ub2d0\ub2d1\ub2d2\ub2d3\ub2d4\ub2d5\ub2d6\ub2d7\ub2d8\ub2d9\ub2da\ub2db\ub2dc\ub2dd\ub2de\ub2df\ub2e0\ub2e1\ub2e2\ub2e3\ub2e4\ub2e5\ub2e6\ub2e7\ub2e8\ub2e9\ub2ea\ub2eb\ub2ec\ub2ed\ub2ee\ub2ef\ub2f0\ub2f1\ub2f2\ub2f3\ub2f4\ub2f5\ub2f6\ub2f7\ub2f8\ub2f9\ub2fa\ub2fb\ub2fc\ub2fd\ub2fe\ub2ff\ub300\ub301\ub302\ub303\ub304\ub305\ub306\ub307\ub308\ub309\ub30a\ub30b\ub30c\ub30d\ub30e\ub30f\ub310\ub311\ub312\ub313\ub314\ub315\ub316\ub317\ub318\ub319\ub31a\ub31b\ub31c\ub31d\ub31e\ub31f\ub320\ub321\ub322\ub323\ub324\ub325\ub326\ub327\ub328\ub329\ub32a\ub32b\ub32c\ub32d\ub32e\ub32f\ub330\ub331\ub332\ub333\ub334\ub335\ub336\ub337\ub338\ub339\ub33a\ub33b\ub33c\ub33d\ub33e\ub33f\ub340\ub341\ub342\ub343\ub344\ub345\ub346\ub347\ub348\ub349\ub34a\ub34b\ub34c\ub34d\ub34e\ub34f\ub350\ub351\ub352\ub353\ub354\ub355\ub356\ub357\ub358\ub359\ub35a\ub35b\ub35c\ub35d\ub35e\ub35f\ub360\ub361\ub362\ub363\ub364\ub365\ub366\ub367\ub368\ub369\ub36a\ub36b\ub36c\ub36d\ub36e\ub36f\ub370\ub371\ub372\ub373\ub374\ub375\ub376\ub377\ub378\ub379\ub37a\ub37b\ub37c\ub37d\ub37e\ub37f\ub380\ub381\ub382\ub383\ub384\ub385\ub386\ub387\ub388\ub389\ub38a\ub38b\ub38c\ub38d\ub38e\ub38f\ub390\ub391\ub392\ub393\ub394\ub395\ub396\ub397\ub398\ub399\ub39a\ub39b\ub39c\ub39d\ub39e\ub39f\ub3a0\ub3a1\ub3a2\ub3a3\ub3a4\ub3a5\ub3a6\ub3a7\ub3a8\ub3a9\ub3aa\ub3ab\ub3ac\ub3ad\ub3ae\ub3af\ub3b0\ub3b1\ub3b2\ub3b3\ub3b4\ub3b5\ub3b6\ub3b7\ub3b8\ub3b9\ub3ba\ub3bb\ub3bc\ub3bd\ub3be\ub3bf\ub3c0\ub3c1\ub3c2\ub3c3\ub3c4\ub3c5\ub3c6\ub3c7\ub3c8\ub3c9\ub3ca\ub3cb\ub3cc\ub3cd\ub3ce\ub3cf\ub3d0\ub3d1\ub3d2\ub3d3\ub3d4\ub3d5\ub3d6\ub3d7\ub3d8\ub3d9\ub3da\ub3db\ub3dc\ub3dd\ub3de\ub3df\ub3e0\ub3e1\ub3e2\ub3e3\ub3e4\ub3e5\ub3e6\ub3e7\ub3e8\ub3e9\ub3ea\ub3eb\ub3ec\ub3ed\ub3ee\ub3ef\ub3f0\ub3f1\ub3f2\ub3f3\ub3f4\ub3f5\ub3f6\ub3f7\ub3f8\ub3f9\ub3fa\ub3fb\ub3fc\ub3fd\ub3fe\ub3ff\ub400\ub401\ub402\ub403\ub404\ub405\ub406\ub407\ub408\ub409\ub40a\ub40b\ub40c\ub40d\ub40e\ub40f\ub410\ub411\ub412\ub413\ub414\ub415\ub416\ub417\ub418\ub419\ub41a\ub41b\ub41c\ub41d\ub41e\ub41f\ub420\ub421\ub422\ub423\ub424\ub425\ub426\ub427\ub428\ub429\ub42a\ub42b\ub42c\ub42d\ub42e\ub42f\ub430\ub431\ub432\ub433\ub434\ub435\ub436\ub437\ub438\ub439\ub43a\ub43b\ub43c\ub43d\ub43e\ub43f\ub440\ub441\ub442\ub443\ub444\ub445\ub446\ub447\ub448\ub449\ub44a\ub44b\ub44c\ub44d\ub44e\ub44f\ub450\ub451\ub452\ub453\ub454\ub455\ub456\ub457\ub458\ub459\ub45a\ub45b\ub45c\ub45d\ub45e\ub45f\ub460\ub461\ub462\ub463\ub464\ub465\ub466\ub467\ub468\ub469\ub46a\ub46b\ub46c\ub46d\ub46e\ub46f\ub470\ub471\ub472\ub473\ub474\ub475\ub476\ub477\ub478\ub479\ub47a\ub47b\ub47c\ub47d\ub47e\ub47f\ub480\ub481\ub482\ub483\ub484\ub485\ub486\ub487\ub488\ub489\ub48a\ub48b\ub48c\ub48d\ub48e\ub48f\ub490\ub491\ub492\ub493\ub494\ub495\ub496\ub497\ub498\ub499\ub49a\ub49b\ub49c\ub49d\ub49e\ub49f\ub4a0\ub4a1\ub4a2\ub4a3\ub4a4\ub4a5\ub4a6\ub4a7\ub4a8\ub4a9\ub4aa\ub4ab\ub4ac\ub4ad\ub4ae\ub4af\ub4b0\ub4b1\ub4b2\ub4b3\ub4b4\ub4b5\ub4b6\ub4b7\ub4b8\ub4b9\ub4ba\ub4bb\ub4bc\ub4bd\ub4be\ub4bf\ub4c0\ub4c1\ub4c2\ub4c3\ub4c4\ub4c5\ub4c6\ub4c7\ub4c8\ub4c9\ub4ca\ub4cb\ub4cc\ub4cd\ub4ce\ub4cf\ub4d0\ub4d1\ub4d2\ub4d3\ub4d4\ub4d5\ub4d6\ub4d7\ub4d8\ub4d9\ub4da\ub4db\ub4dc\ub4dd\ub4de\ub4df\ub4e0\ub4e1\ub4e2\ub4e3\ub4e4\ub4e5\ub4e6\ub4e7\ub4e8\ub4e9\ub4ea\ub4eb\ub4ec\ub4ed\ub4ee\ub4ef\ub4f0\ub4f1\ub4f2\ub4f3\ub4f4\ub4f5\ub4f6\ub4f7\ub4f8\ub4f9\ub4fa\ub4fb\ub4fc\ub4fd\ub4fe\ub4ff\ub500\ub501\ub502\ub503\ub504\ub505\ub506\ub507\ub508\ub509\ub50a\ub50b\ub50c\ub50d\ub50e\ub50f\ub510\ub511\ub512\ub513\ub514\ub515\ub516\ub517\ub518\ub519\ub51a\ub51b\ub51c\ub51d\ub51e\ub51f\ub520\ub521\ub522\ub523\ub524\ub525\ub526\ub527\ub528\ub529\ub52a\ub52b\ub52c\ub52d\ub52e\ub52f\ub530\ub531\ub532\ub533\ub534\ub535\ub536\ub537\ub538\ub539\ub53a\ub53b\ub53c\ub53d\ub53e\ub53f\ub540\ub541\ub542\ub543\ub544\ub545\ub546\ub547\ub548\ub549\ub54a\ub54b\ub54c\ub54d\ub54e\ub54f\ub550\ub551\ub552\ub553\ub554\ub555\ub556\ub557\ub558\ub559\ub55a\ub55b\ub55c\ub55d\ub55e\ub55f\ub560\ub561\ub562\ub563\ub564\ub565\ub566\ub567\ub568\ub569\ub56a\ub56b\ub56c\ub56d\ub56e\ub56f\ub570\ub571\ub572\ub573\ub574\ub575\ub576\ub577\ub578\ub579\ub57a\ub57b\ub57c\ub57d\ub57e\ub57f\ub580\ub581\ub582\ub583\ub584\ub585\ub586\ub587\ub588\ub589\ub58a\ub58b\ub58c\ub58d\ub58e\ub58f\ub590\ub591\ub592\ub593\ub594\ub595\ub596\ub597\ub598\ub599\ub59a\ub59b\ub59c\ub59d\ub59e\ub59f\ub5a0\ub5a1\ub5a2\ub5a3\ub5a4\ub5a5\ub5a6\ub5a7\ub5a8\ub5a9\ub5aa\ub5ab\ub5ac\ub5ad\ub5ae\ub5af\ub5b0\ub5b1\ub5b2\ub5b3\ub5b4\ub5b5\ub5b6\ub5b7\ub5b8\ub5b9\ub5ba\ub5bb\ub5bc\ub5bd\ub5be\ub5bf\ub5c0\ub5c1\ub5c2\ub5c3\ub5c4\ub5c5\ub5c6\ub5c7\ub5c8\ub5c9\ub5ca\ub5cb\ub5cc\ub5cd\ub5ce\ub5cf\ub5d0\ub5d1\ub5d2\ub5d3\ub5d4\ub5d5\ub5d6\ub5d7\ub5d8\ub5d9\ub5da\ub5db\ub5dc\ub5dd\ub5de\ub5df\ub5e0\ub5e1\ub5e2\ub5e3\ub5e4\ub5e5\ub5e6\ub5e7\ub5e8\ub5e9\ub5ea\ub5eb\ub5ec\ub5ed\ub5ee\ub5ef\ub5f0\ub5f1\ub5f2\ub5f3\ub5f4\ub5f5\ub5f6\ub5f7\ub5f8\ub5f9\ub5fa\ub5fb\ub5fc\ub5fd\ub5fe\ub5ff\ub600\ub601\ub602\ub603\ub604\ub605\ub606\ub607\ub608\ub609\ub60a\ub60b\ub60c\ub60d\ub60e\ub60f\ub610\ub611\ub612\ub613\ub614\ub615\ub616\ub617\ub618\ub619\ub61a\ub61b\ub61c\ub61d\ub61e\ub61f\ub620\ub621\ub622\ub623\ub624\ub625\ub626\ub627\ub628\ub629\ub62a\ub62b\ub62c\ub62d\ub62e\ub62f\ub630\ub631\ub632\ub633\ub634\ub635\ub636\ub637\ub638\ub639\ub63a\ub63b\ub63c\ub63d\ub63e\ub63f\ub640\ub641\ub642\ub643\ub644\ub645\ub646\ub647\ub648\ub649\ub64a\ub64b\ub64c\ub64d\ub64e\ub64f\ub650\ub651\ub652\ub653\ub654\ub655\ub656\ub657\ub658\ub659\ub65a\ub65b\ub65c\ub65d\ub65e\ub65f\ub660\ub661\ub662\ub663\ub664\ub665\ub666\ub667\ub668\ub669\ub66a\ub66b\ub66c\ub66d\ub66e\ub66f\ub670\ub671\ub672\ub673\ub674\ub675\ub676\ub677\ub678\ub679\ub67a\ub67b\ub67c\ub67d\ub67e\ub67f\ub680\ub681\ub682\ub683\ub684\ub685\ub686\ub687\ub688\ub689\ub68a\ub68b\ub68c\ub68d\ub68e\ub68f\ub690\ub691\ub692\ub693\ub694\ub695\ub696\ub697\ub698\ub699\ub69a\ub69b\ub69c\ub69d\ub69e\ub69f\ub6a0\ub6a1\ub6a2\ub6a3\ub6a4\ub6a5\ub6a6\ub6a7\ub6a8\ub6a9\ub6aa\ub6ab\ub6ac\ub6ad\ub6ae\ub6af\ub6b0\ub6b1\ub6b2\ub6b3\ub6b4\ub6b5\ub6b6\ub6b7\ub6b8\ub6b9\ub6ba\ub6bb\ub6bc\ub6bd\ub6be\ub6bf\ub6c0\ub6c1\ub6c2\ub6c3\ub6c4\ub6c5\ub6c6\ub6c7\ub6c8\ub6c9\ub6ca\ub6cb\ub6cc\ub6cd\ub6ce\ub6cf\ub6d0\ub6d1\ub6d2\ub6d3\ub6d4\ub6d5\ub6d6\ub6d7\ub6d8\ub6d9\ub6da\ub6db\ub6dc\ub6dd\ub6de\ub6df\ub6e0\ub6e1\ub6e2\ub6e3\ub6e4\ub6e5\ub6e6\ub6e7\ub6e8\ub6e9\ub6ea\ub6eb\ub6ec\ub6ed\ub6ee\ub6ef\ub6f0\ub6f1\ub6f2\ub6f3\ub6f4\ub6f5\ub6f6\ub6f7\ub6f8\ub6f9\ub6fa\ub6fb\ub6fc\ub6fd\ub6fe\ub6ff\ub700\ub701\ub702\ub703\ub704\ub705\ub706\ub707\ub708\ub709\ub70a\ub70b\ub70c\ub70d\ub70e\ub70f\ub710\ub711\ub712\ub713\ub714\ub715\ub716\ub717\ub718\ub719\ub71a\ub71b\ub71c\ub71d\ub71e\ub71f\ub720\ub721\ub722\ub723\ub724\ub725\ub726\ub727\ub728\ub729\ub72a\ub72b\ub72c\ub72d\ub72e\ub72f\ub730\ub731\ub732\ub733\ub734\ub735\ub736\ub737\ub738\ub739\ub73a\ub73b\ub73c\ub73d\ub73e\ub73f\ub740\ub741\ub742\ub743\ub744\ub745\ub746\ub747\ub748\ub749\ub74a\ub74b\ub74c\ub74d\ub74e\ub74f\ub750\ub751\ub752\ub753\ub754\ub755\ub756\ub757\ub758\ub759\ub75a\ub75b\ub75c\ub75d\ub75e\ub75f\ub760\ub761\ub762\ub763\ub764\ub765\ub766\ub767\ub768\ub769\ub76a\ub76b\ub76c\ub76d\ub76e\ub76f\ub770\ub771\ub772\ub773\ub774\ub775\ub776\ub777\ub778\ub779\ub77a\ub77b\ub77c\ub77d\ub77e\ub77f\ub780\ub781\ub782\ub783\ub784\ub785\ub786\ub787\ub788\ub789\ub78a\ub78b\ub78c\ub78d\ub78e\ub78f\ub790\ub791\ub792\ub793\ub794\ub795\ub796\ub797\ub798\ub799\ub79a\ub79b\ub79c\ub79d\ub79e\ub79f\ub7a0\ub7a1\ub7a2\ub7a3\ub7a4\ub7a5\ub7a6\ub7a7\ub7a8\ub7a9\ub7aa\ub7ab\ub7ac\ub7ad\ub7ae\ub7af\ub7b0\ub7b1\ub7b2\ub7b3\ub7b4\ub7b5\ub7b6\ub7b7\ub7b8\ub7b9\ub7ba\ub7bb\ub7bc\ub7bd\ub7be\ub7bf\ub7c0\ub7c1\ub7c2\ub7c3\ub7c4\ub7c5\ub7c6\ub7c7\ub7c8\ub7c9\ub7ca\ub7cb\ub7cc\ub7cd\ub7ce\ub7cf\ub7d0\ub7d1\ub7d2\ub7d3\ub7d4\ub7d5\ub7d6\ub7d7\ub7d8\ub7d9\ub7da\ub7db\ub7dc\ub7dd\ub7de\ub7df\ub7e0\ub7e1\ub7e2\ub7e3\ub7e4\ub7e5\ub7e6\ub7e7\ub7e8\ub7e9\ub7ea\ub7eb\ub7ec\ub7ed\ub7ee\ub7ef\ub7f0\ub7f1\ub7f2\ub7f3\ub7f4\ub7f5\ub7f6\ub7f7\ub7f8\ub7f9\ub7fa\ub7fb\ub7fc\ub7fd\ub7fe\ub7ff\ub800\ub801\ub802\ub803\ub804\ub805\ub806\ub807\ub808\ub809\ub80a\ub80b\ub80c\ub80d\ub80e\ub80f\ub810\ub811\ub812\ub813\ub814\ub815\ub816\ub817\ub818\ub819\ub81a\ub81b\ub81c\ub81d\ub81e\ub81f\ub820\ub821\ub822\ub823\ub824\ub825\ub826\ub827\ub828\ub829\ub82a\ub82b\ub82c\ub82d\ub82e\ub82f\ub830\ub831\ub832\ub833\ub834\ub835\ub836\ub837\ub838\ub839\ub83a\ub83b\ub83c\ub83d\ub83e\ub83f\ub840\ub841\ub842\ub843\ub844\ub845\ub846\ub847\ub848\ub849\ub84a\ub84b\ub84c\ub84d\ub84e\ub84f\ub850\ub851\ub852\ub853\ub854\ub855\ub856\ub857\ub858\ub859\ub85a\ub85b\ub85c\ub85d\ub85e\ub85f\ub860\ub861\ub862\ub863\ub864\ub865\ub866\ub867\ub868\ub869\ub86a\ub86b\ub86c\ub86d\ub86e\ub86f\ub870\ub871\ub872\ub873\ub874\ub875\ub876\ub877\ub878\ub879\ub87a\ub87b\ub87c\ub87d\ub87e\ub87f\ub880\ub881\ub882\ub883\ub884\ub885\ub886\ub887\ub888\ub889\ub88a\ub88b\ub88c\ub88d\ub88e\ub88f\ub890\ub891\ub892\ub893\ub894\ub895\ub896\ub897\ub898\ub899\ub89a\ub89b\ub89c\ub89d\ub89e\ub89f\ub8a0\ub8a1\ub8a2\ub8a3\ub8a4\ub8a5\ub8a6\ub8a7\ub8a8\ub8a9\ub8aa\ub8ab\ub8ac\ub8ad\ub8ae\ub8af\ub8b0\ub8b1\ub8b2\ub8b3\ub8b4\ub8b5\ub8b6\ub8b7\ub8b8\ub8b9\ub8ba\ub8bb\ub8bc\ub8bd\ub8be\ub8bf\ub8c0\ub8c1\ub8c2\ub8c3\ub8c4\ub8c5\ub8c6\ub8c7\ub8c8\ub8c9\ub8ca\ub8cb\ub8cc\ub8cd\ub8ce\ub8cf\ub8d0\ub8d1\ub8d2\ub8d3\ub8d4\ub8d5\ub8d6\ub8d7\ub8d8\ub8d9\ub8da\ub8db\ub8dc\ub8dd\ub8de\ub8df\ub8e0\ub8e1\ub8e2\ub8e3\ub8e4\ub8e5\ub8e6\ub8e7\ub8e8\ub8e9\ub8ea\ub8eb\ub8ec\ub8ed\ub8ee\ub8ef\ub8f0\ub8f1\ub8f2\ub8f3\ub8f4\ub8f5\ub8f6\ub8f7\ub8f8\ub8f9\ub8fa\ub8fb\ub8fc\ub8fd\ub8fe\ub8ff\ub900\ub901\ub902\ub903\ub904\ub905\ub906\ub907\ub908\ub909\ub90a\ub90b\ub90c\ub90d\ub90e\ub90f\ub910\ub911\ub912\ub913\ub914\ub915\ub916\ub917\ub918\ub919\ub91a\ub91b\ub91c\ub91d\ub91e\ub91f\ub920\ub921\ub922\ub923\ub924\ub925\ub926\ub927\ub928\ub929\ub92a\ub92b\ub92c\ub92d\ub92e\ub92f\ub930\ub931\ub932\ub933\ub934\ub935\ub936\ub937\ub938\ub939\ub93a\ub93b\ub93c\ub93d\ub93e\ub93f\ub940\ub941\ub942\ub943\ub944\ub945\ub946\ub947\ub948\ub949\ub94a\ub94b\ub94c\ub94d\ub94e\ub94f\ub950\ub951\ub952\ub953\ub954\ub955\ub956\ub957\ub958\ub959\ub95a\ub95b\ub95c\ub95d\ub95e\ub95f\ub960\ub961\ub962\ub963\ub964\ub965\ub966\ub967\ub968\ub969\ub96a\ub96b\ub96c\ub96d\ub96e\ub96f\ub970\ub971\ub972\ub973\ub974\ub975\ub976\ub977\ub978\ub979\ub97a\ub97b\ub97c\ub97d\ub97e\ub97f\ub980\ub981\ub982\ub983\ub984\ub985\ub986\ub987\ub988\ub989\ub98a\ub98b\ub98c\ub98d\ub98e\ub98f\ub990\ub991\ub992\ub993\ub994\ub995\ub996\ub997\ub998\ub999\ub99a\ub99b\ub99c\ub99d\ub99e\ub99f\ub9a0\ub9a1\ub9a2\ub9a3\ub9a4\ub9a5\ub9a6\ub9a7\ub9a8\ub9a9\ub9aa\ub9ab\ub9ac\ub9ad\ub9ae\ub9af\ub9b0\ub9b1\ub9b2\ub9b3\ub9b4\ub9b5\ub9b6\ub9b7\ub9b8\ub9b9\ub9ba\ub9bb\ub9bc\ub9bd\ub9be\ub9bf\ub9c0\ub9c1\ub9c2\ub9c3\ub9c4\ub9c5\ub9c6\ub9c7\ub9c8\ub9c9\ub9ca\ub9cb\ub9cc\ub9cd\ub9ce\ub9cf\ub9d0\ub9d1\ub9d2\ub9d3\ub9d4\ub9d5\ub9d6\ub9d7\ub9d8\ub9d9\ub9da\ub9db\ub9dc\ub9dd\ub9de\ub9df\ub9e0\ub9e1\ub9e2\ub9e3\ub9e4\ub9e5\ub9e6\ub9e7\ub9e8\ub9e9\ub9ea\ub9eb\ub9ec\ub9ed\ub9ee\ub9ef\ub9f0\ub9f1\ub9f2\ub9f3\ub9f4\ub9f5\ub9f6\ub9f7\ub9f8\ub9f9\ub9fa\ub9fb\ub9fc\ub9fd\ub9fe\ub9ff\uba00\uba01\uba02\uba03\uba04\uba05\uba06\uba07\uba08\uba09\uba0a\uba0b\uba0c\uba0d\uba0e\uba0f\uba10\uba11\uba12\uba13\uba14\uba15\uba16\uba17\uba18\uba19\uba1a\uba1b\uba1c\uba1d\uba1e\uba1f\uba20\uba21\uba22\uba23\uba24\uba25\uba26\uba27\uba28\uba29\uba2a\uba2b\uba2c\uba2d\uba2e\uba2f\uba30\uba31\uba32\uba33\uba34\uba35\uba36\uba37\uba38\uba39\uba3a\uba3b\uba3c\uba3d\uba3e\uba3f\uba40\uba41\uba42\uba43\uba44\uba45\uba46\uba47\uba48\uba49\uba4a\uba4b\uba4c\uba4d\uba4e\uba4f\uba50\uba51\uba52\uba53\uba54\uba55\uba56\uba57\uba58\uba59\uba5a\uba5b\uba5c\uba5d\uba5e\uba5f\uba60\uba61\uba62\uba63\uba64\uba65\uba66\uba67\uba68\uba69\uba6a\uba6b\uba6c\uba6d\uba6e\uba6f\uba70\uba71\uba72\uba73\uba74\uba75\uba76\uba77\uba78\uba79\uba7a\uba7b\uba7c\uba7d\uba7e\uba7f\uba80\uba81\uba82\uba83\uba84\uba85\uba86\uba87\uba88\uba89\uba8a\uba8b\uba8c\uba8d\uba8e\uba8f\uba90\uba91\uba92\uba93\uba94\uba95\uba96\uba97\uba98\uba99\uba9a\uba9b\uba9c\uba9d\uba9e\uba9f\ubaa0\ubaa1\ubaa2\ubaa3\ubaa4\ubaa5\ubaa6\ubaa7\ubaa8\ubaa9\ubaaa\ubaab\ubaac\ubaad\ubaae\ubaaf\ubab0\ubab1\ubab2\ubab3\ubab4\ubab5\ubab6\ubab7\ubab8\ubab9\ubaba\ubabb\ubabc\ubabd\ubabe\ubabf\ubac0\ubac1\ubac2\ubac3\ubac4\ubac5\ubac6\ubac7\ubac8\ubac9\ubaca\ubacb\ubacc\ubacd\ubace\ubacf\ubad0\ubad1\ubad2\ubad3\ubad4\ubad5\ubad6\ubad7\ubad8\ubad9\ubada\ubadb\ubadc\ubadd\ubade\ubadf\ubae0\ubae1\ubae2\ubae3\ubae4\ubae5\ubae6\ubae7\ubae8\ubae9\ubaea\ubaeb\ubaec\ubaed\ubaee\ubaef\ubaf0\ubaf1\ubaf2\ubaf3\ubaf4\ubaf5\ubaf6\ubaf7\ubaf8\ubaf9\ubafa\ubafb\ubafc\ubafd\ubafe\ubaff\ubb00\ubb01\ubb02\ubb03\ubb04\ubb05\ubb06\ubb07\ubb08\ubb09\ubb0a\ubb0b\ubb0c\ubb0d\ubb0e\ubb0f\ubb10\ubb11\ubb12\ubb13\ubb14\ubb15\ubb16\ubb17\ubb18\ubb19\ubb1a\ubb1b\ubb1c\ubb1d\ubb1e\ubb1f\ubb20\ubb21\ubb22\ubb23\ubb24\ubb25\ubb26\ubb27\ubb28\ubb29\ubb2a\ubb2b\ubb2c\ubb2d\ubb2e\ubb2f\ubb30\ubb31\ubb32\ubb33\ubb34\ubb35\ubb36\ubb37\ubb38\ubb39\ubb3a\ubb3b\ubb3c\ubb3d\ubb3e\ubb3f\ubb40\ubb41\ubb42\ubb43\ubb44\ubb45\ubb46\ubb47\ubb48\ubb49\ubb4a\ubb4b\ubb4c\ubb4d\ubb4e\ubb4f\ubb50\ubb51\ubb52\ubb53\ubb54\ubb55\ubb56\ubb57\ubb58\ubb59\ubb5a\ubb5b\ubb5c\ubb5d\ubb5e\ubb5f\ubb60\ubb61\ubb62\ubb63\ubb64\ubb65\ubb66\ubb67\ubb68\ubb69\ubb6a\ubb6b\ubb6c\ubb6d\ubb6e\ubb6f\ubb70\ubb71\ubb72\ubb73\ubb74\ubb75\ubb76\ubb77\ubb78\ubb79\ubb7a\ubb7b\ubb7c\ubb7d\ubb7e\ubb7f\ubb80\ubb81\ubb82\ubb83\ubb84\ubb85\ubb86\ubb87\ubb88\ubb89\ubb8a\ubb8b\ubb8c\ubb8d\ubb8e\ubb8f\ubb90\ubb91\ubb92\ubb93\ubb94\ubb95\ubb96\ubb97\ubb98\ubb99\ubb9a\ubb9b\ubb9c\ubb9d\ubb9e\ubb9f\ubba0\ubba1\ubba2\ubba3\ubba4\ubba5\ubba6\ubba7\ubba8\ubba9\ubbaa\ubbab\ubbac\ubbad\ubbae\ubbaf\ubbb0\ubbb1\ubbb2\ubbb3\ubbb4\ubbb5\ubbb6\ubbb7\ubbb8\ubbb9\ubbba\ubbbb\ubbbc\ubbbd\ubbbe\ubbbf\ubbc0\ubbc1\ubbc2\ubbc3\ubbc4\ubbc5\ubbc6\ubbc7\ubbc8\ubbc9\ubbca\ubbcb\ubbcc\ubbcd\ubbce\ubbcf\ubbd0\ubbd1\ubbd2\ubbd3\ubbd4\ubbd5\ubbd6\ubbd7\ubbd8\ubbd9\ubbda\ubbdb\ubbdc\ubbdd\ubbde\ubbdf\ubbe0\ubbe1\ubbe2\ubbe3\ubbe4\ubbe5\ubbe6\ubbe7\ubbe8\ubbe9\ubbea\ubbeb\ubbec\ubbed\ubbee\ubbef\ubbf0\ubbf1\ubbf2\ubbf3\ubbf4\ubbf5\ubbf6\ubbf7\ubbf8\ubbf9\ubbfa\ubbfb\ubbfc\ubbfd\ubbfe\ubbff\ubc00\ubc01\ubc02\ubc03\ubc04\ubc05\ubc06\ubc07\ubc08\ubc09\ubc0a\ubc0b\ubc0c\ubc0d\ubc0e\ubc0f\ubc10\ubc11\ubc12\ubc13\ubc14\ubc15\ubc16\ubc17\ubc18\ubc19\ubc1a\ubc1b\ubc1c\ubc1d\ubc1e\ubc1f\ubc20\ubc21\ubc22\ubc23\ubc24\ubc25\ubc26\ubc27\ubc28\ubc29\ubc2a\ubc2b\ubc2c\ubc2d\ubc2e\ubc2f\ubc30\ubc31\ubc32\ubc33\ubc34\ubc35\ubc36\ubc37\ubc38\ubc39\ubc3a\ubc3b\ubc3c\ubc3d\ubc3e\ubc3f\ubc40\ubc41\ubc42\ubc43\ubc44\ubc45\ubc46\ubc47\ubc48\ubc49\ubc4a\ubc4b\ubc4c\ubc4d\ubc4e\ubc4f\ubc50\ubc51\ubc52\ubc53\ubc54\ubc55\ubc56\ubc57\ubc58\ubc59\ubc5a\ubc5b\ubc5c\ubc5d\ubc5e\ubc5f\ubc60\ubc61\ubc62\ubc63\ubc64\ubc65\ubc66\ubc67\ubc68\ubc69\ubc6a\ubc6b\ubc6c\ubc6d\ubc6e\ubc6f\ubc70\ubc71\ubc72\ubc73\ubc74\ubc75\ubc76\ubc77\ubc78\ubc79\ubc7a\ubc7b\ubc7c\ubc7d\ubc7e\ubc7f\ubc80\ubc81\ubc82\ubc83\ubc84\ubc85\ubc86\ubc87\ubc88\ubc89\ubc8a\ubc8b\ubc8c\ubc8d\ubc8e\ubc8f\ubc90\ubc91\ubc92\ubc93\ubc94\ubc95\ubc96\ubc97\ubc98\ubc99\ubc9a\ubc9b\ubc9c\ubc9d\ubc9e\ubc9f\ubca0\ubca1\ubca2\ubca3\ubca4\ubca5\ubca6\ubca7\ubca8\ubca9\ubcaa\ubcab\ubcac\ubcad\ubcae\ubcaf\ubcb0\ubcb1\ubcb2\ubcb3\ubcb4\ubcb5\ubcb6\ubcb7\ubcb8\ubcb9\ubcba\ubcbb\ubcbc\ubcbd\ubcbe\ubcbf\ubcc0\ubcc1\ubcc2\ubcc3\ubcc4\ubcc5\ubcc6\ubcc7\ubcc8\ubcc9\ubcca\ubccb\ubccc\ubccd\ubcce\ubccf\ubcd0\ubcd1\ubcd2\ubcd3\ubcd4\ubcd5\ubcd6\ubcd7\ubcd8\ubcd9\ubcda\ubcdb\ubcdc\ubcdd\ubcde\ubcdf\ubce0\ubce1\ubce2\ubce3\ubce4\ubce5\ubce6\ubce7\ubce8\ubce9\ubcea\ubceb\ubcec\ubced\ubcee\ubcef\ubcf0\ubcf1\ubcf2\ubcf3\ubcf4\ubcf5\ubcf6\ubcf7\ubcf8\ubcf9\ubcfa\ubcfb\ubcfc\ubcfd\ubcfe\ubcff\ubd00\ubd01\ubd02\ubd03\ubd04\ubd05\ubd06\ubd07\ubd08\ubd09\ubd0a\ubd0b\ubd0c\ubd0d\ubd0e\ubd0f\ubd10\ubd11\ubd12\ubd13\ubd14\ubd15\ubd16\ubd17\ubd18\ubd19\ubd1a\ubd1b\ubd1c\ubd1d\ubd1e\ubd1f\ubd20\ubd21\ubd22\ubd23\ubd24\ubd25\ubd26\ubd27\ubd28\ubd29\ubd2a\ubd2b\ubd2c\ubd2d\ubd2e\ubd2f\ubd30\ubd31\ubd32\ubd33\ubd34\ubd35\ubd36\ubd37\ubd38\ubd39\ubd3a\ubd3b\ubd3c\ubd3d\ubd3e\ubd3f\ubd40\ubd41\ubd42\ubd43\ubd44\ubd45\ubd46\ubd47\ubd48\ubd49\ubd4a\ubd4b\ubd4c\ubd4d\ubd4e\ubd4f\ubd50\ubd51\ubd52\ubd53\ubd54\ubd55\ubd56\ubd57\ubd58\ubd59\ubd5a\ubd5b\ubd5c\ubd5d\ubd5e\ubd5f\ubd60\ubd61\ubd62\ubd63\ubd64\ubd65\ubd66\ubd67\ubd68\ubd69\ubd6a\ubd6b\ubd6c\ubd6d\ubd6e\ubd6f\ubd70\ubd71\ubd72\ubd73\ubd74\ubd75\ubd76\ubd77\ubd78\ubd79\ubd7a\ubd7b\ubd7c\ubd7d\ubd7e\ubd7f\ubd80\ubd81\ubd82\ubd83\ubd84\ubd85\ubd86\ubd87\ubd88\ubd89\ubd8a\ubd8b\ubd8c\ubd8d\ubd8e\ubd8f\ubd90\ubd91\ubd92\ubd93\ubd94\ubd95\ubd96\ubd97\ubd98\ubd99\ubd9a\ubd9b\ubd9c\ubd9d\ubd9e\ubd9f\ubda0\ubda1\ubda2\ubda3\ubda4\ubda5\ubda6\ubda7\ubda8\ubda9\ubdaa\ubdab\ubdac\ubdad\ubdae\ubdaf\ubdb0\ubdb1\ubdb2\ubdb3\ubdb4\ubdb5\ubdb6\ubdb7\ubdb8\ubdb9\ubdba\ubdbb\ubdbc\ubdbd\ubdbe\ubdbf\ubdc0\ubdc1\ubdc2\ubdc3\ubdc4\ubdc5\ubdc6\ubdc7\ubdc8\ubdc9\ubdca\ubdcb\ubdcc\ubdcd\ubdce\ubdcf\ubdd0\ubdd1\ubdd2\ubdd3\ubdd4\ubdd5\ubdd6\ubdd7\ubdd8\ubdd9\ubdda\ubddb\ubddc\ubddd\ubdde\ubddf\ubde0\ubde1\ubde2\ubde3\ubde4\ubde5\ubde6\ubde7\ubde8\ubde9\ubdea\ubdeb\ubdec\ubded\ubdee\ubdef\ubdf0\ubdf1\ubdf2\ubdf3\ubdf4\ubdf5\ubdf6\ubdf7\ubdf8\ubdf9\ubdfa\ubdfb\ubdfc\ubdfd\ubdfe\ubdff\ube00\ube01\ube02\ube03\ube04\ube05\ube06\ube07\ube08\ube09\ube0a\ube0b\ube0c\ube0d\ube0e\ube0f\ube10\ube11\ube12\ube13\ube14\ube15\ube16\ube17\ube18\ube19\ube1a\ube1b\ube1c\ube1d\ube1e\ube1f\ube20\ube21\ube22\ube23\ube24\ube25\ube26\ube27\ube28\ube29\ube2a\ube2b\ube2c\ube2d\ube2e\ube2f\ube30\ube31\ube32\ube33\ube34\ube35\ube36\ube37\ube38\ube39\ube3a\ube3b\ube3c\ube3d\ube3e\ube3f\ube40\ube41\ube42\ube43\ube44\ube45\ube46\ube47\ube48\ube49\ube4a\ube4b\ube4c\ube4d\ube4e\ube4f\ube50\ube51\ube52\ube53\ube54\ube55\ube56\ube57\ube58\ube59\ube5a\ube5b\ube5c\ube5d\ube5e\ube5f\ube60\ube61\ube62\ube63\ube64\ube65\ube66\ube67\ube68\ube69\ube6a\ube6b\ube6c\ube6d\ube6e\ube6f\ube70\ube71\ube72\ube73\ube74\ube75\ube76\ube77\ube78\ube79\ube7a\ube7b\ube7c\ube7d\ube7e\ube7f\ube80\ube81\ube82\ube83\ube84\ube85\ube86\ube87\ube88\ube89\ube8a\ube8b\ube8c\ube8d\ube8e\ube8f\ube90\ube91\ube92\ube93\ube94\ube95\ube96\ube97\ube98\ube99\ube9a\ube9b\ube9c\ube9d\ube9e\ube9f\ubea0\ubea1\ubea2\ubea3\ubea4\ubea5\ubea6\ubea7\ubea8\ubea9\ubeaa\ubeab\ubeac\ubead\ubeae\ubeaf\ubeb0\ubeb1\ubeb2\ubeb3\ubeb4\ubeb5\ubeb6\ubeb7\ubeb8\ubeb9\ubeba\ubebb\ubebc\ubebd\ubebe\ubebf\ubec0\ubec1\ubec2\ubec3\ubec4\ubec5\ubec6\ubec7\ubec8\ubec9\ubeca\ubecb\ubecc\ubecd\ubece\ubecf\ubed0\ubed1\ubed2\ubed3\ubed4\ubed5\ubed6\ubed7\ubed8\ubed9\ubeda\ubedb\ubedc\ubedd\ubede\ubedf\ubee0\ubee1\ubee2\ubee3\ubee4\ubee5\ubee6\ubee7\ubee8\ubee9\ubeea\ubeeb\ubeec\ubeed\ubeee\ubeef\ubef0\ubef1\ubef2\ubef3\ubef4\ubef5\ubef6\ubef7\ubef8\ubef9\ubefa\ubefb\ubefc\ubefd\ubefe\ubeff\ubf00\ubf01\ubf02\ubf03\ubf04\ubf05\ubf06\ubf07\ubf08\ubf09\ubf0a\ubf0b\ubf0c\ubf0d\ubf0e\ubf0f\ubf10\ubf11\ubf12\ubf13\ubf14\ubf15\ubf16\ubf17\ubf18\ubf19\ubf1a\ubf1b\ubf1c\ubf1d\ubf1e\ubf1f\ubf20\ubf21\ubf22\ubf23\ubf24\ubf25\ubf26\ubf27\ubf28\ubf29\ubf2a\ubf2b\ubf2c\ubf2d\ubf2e\ubf2f\ubf30\ubf31\ubf32\ubf33\ubf34\ubf35\ubf36\ubf37\ubf38\ubf39\ubf3a\ubf3b\ubf3c\ubf3d\ubf3e\ubf3f\ubf40\ubf41\ubf42\ubf43\ubf44\ubf45\ubf46\ubf47\ubf48\ubf49\ubf4a\ubf4b\ubf4c\ubf4d\ubf4e\ubf4f\ubf50\ubf51\ubf52\ubf53\ubf54\ubf55\ubf56\ubf57\ubf58\ubf59\ubf5a\ubf5b\ubf5c\ubf5d\ubf5e\ubf5f\ubf60\ubf61\ubf62\ubf63\ubf64\ubf65\ubf66\ubf67\ubf68\ubf69\ubf6a\ubf6b\ubf6c\ubf6d\ubf6e\ubf6f\ubf70\ubf71\ubf72\ubf73\ubf74\ubf75\ubf76\ubf77\ubf78\ubf79\ubf7a\ubf7b\ubf7c\ubf7d\ubf7e\ubf7f\ubf80\ubf81\ubf82\ubf83\ubf84\ubf85\ubf86\ubf87\ubf88\ubf89\ubf8a\ubf8b\ubf8c\ubf8d\ubf8e\ubf8f\ubf90\ubf91\ubf92\ubf93\ubf94\ubf95\ubf96\ubf97\ubf98\ubf99\ubf9a\ubf9b\ubf9c\ubf9d\ubf9e\ubf9f\ubfa0\ubfa1\ubfa2\ubfa3\ubfa4\ubfa5\ubfa6\ubfa7\ubfa8\ubfa9\ubfaa\ubfab\ubfac\ubfad\ubfae\ubfaf\ubfb0\ubfb1\ubfb2\ubfb3\ubfb4\ubfb5\ubfb6\ubfb7\ubfb8\ubfb9\ubfba\ubfbb\ubfbc\ubfbd\ubfbe\ubfbf\ubfc0\ubfc1\ubfc2\ubfc3\ubfc4\ubfc5\ubfc6\ubfc7\ubfc8\ubfc9\ubfca\ubfcb\ubfcc\ubfcd\ubfce\ubfcf\ubfd0\ubfd1\ubfd2\ubfd3\ubfd4\ubfd5\ubfd6\ubfd7\ubfd8\ubfd9\ubfda\ubfdb\ubfdc\ubfdd\ubfde\ubfdf\ubfe0\ubfe1\ubfe2\ubfe3\ubfe4\ubfe5\ubfe6\ubfe7\ubfe8\ubfe9\ubfea\ubfeb\ubfec\ubfed\ubfee\ubfef\ubff0\ubff1\ubff2\ubff3\ubff4\ubff5\ubff6\ubff7\ubff8\ubff9\ubffa\ubffb\ubffc\ubffd\ubffe\ubfff\uc000\uc001\uc002\uc003\uc004\uc005\uc006\uc007\uc008\uc009\uc00a\uc00b\uc00c\uc00d\uc00e\uc00f\uc010\uc011\uc012\uc013\uc014\uc015\uc016\uc017\uc018\uc019\uc01a\uc01b\uc01c\uc01d\uc01e\uc01f\uc020\uc021\uc022\uc023\uc024\uc025\uc026\uc027\uc028\uc029\uc02a\uc02b\uc02c\uc02d\uc02e\uc02f\uc030\uc031\uc032\uc033\uc034\uc035\uc036\uc037\uc038\uc039\uc03a\uc03b\uc03c\uc03d\uc03e\uc03f\uc040\uc041\uc042\uc043\uc044\uc045\uc046\uc047\uc048\uc049\uc04a\uc04b\uc04c\uc04d\uc04e\uc04f\uc050\uc051\uc052\uc053\uc054\uc055\uc056\uc057\uc058\uc059\uc05a\uc05b\uc05c\uc05d\uc05e\uc05f\uc060\uc061\uc062\uc063\uc064\uc065\uc066\uc067\uc068\uc069\uc06a\uc06b\uc06c\uc06d\uc06e\uc06f\uc070\uc071\uc072\uc073\uc074\uc075\uc076\uc077\uc078\uc079\uc07a\uc07b\uc07c\uc07d\uc07e\uc07f\uc080\uc081\uc082\uc083\uc084\uc085\uc086\uc087\uc088\uc089\uc08a\uc08b\uc08c\uc08d\uc08e\uc08f\uc090\uc091\uc092\uc093\uc094\uc095\uc096\uc097\uc098\uc099\uc09a\uc09b\uc09c\uc09d\uc09e\uc09f\uc0a0\uc0a1\uc0a2\uc0a3\uc0a4\uc0a5\uc0a6\uc0a7\uc0a8\uc0a9\uc0aa\uc0ab\uc0ac\uc0ad\uc0ae\uc0af\uc0b0\uc0b1\uc0b2\uc0b3\uc0b4\uc0b5\uc0b6\uc0b7\uc0b8\uc0b9\uc0ba\uc0bb\uc0bc\uc0bd\uc0be\uc0bf\uc0c0\uc0c1\uc0c2\uc0c3\uc0c4\uc0c5\uc0c6\uc0c7\uc0c8\uc0c9\uc0ca\uc0cb\uc0cc\uc0cd\uc0ce\uc0cf\uc0d0\uc0d1\uc0d2\uc0d3\uc0d4\uc0d5\uc0d6\uc0d7\uc0d8\uc0d9\uc0da\uc0db\uc0dc\uc0dd\uc0de\uc0df\uc0e0\uc0e1\uc0e2\uc0e3\uc0e4\uc0e5\uc0e6\uc0e7\uc0e8\uc0e9\uc0ea\uc0eb\uc0ec\uc0ed\uc0ee\uc0ef\uc0f0\uc0f1\uc0f2\uc0f3\uc0f4\uc0f5\uc0f6\uc0f7\uc0f8\uc0f9\uc0fa\uc0fb\uc0fc\uc0fd\uc0fe\uc0ff\uc100\uc101\uc102\uc103\uc104\uc105\uc106\uc107\uc108\uc109\uc10a\uc10b\uc10c\uc10d\uc10e\uc10f\uc110\uc111\uc112\uc113\uc114\uc115\uc116\uc117\uc118\uc119\uc11a\uc11b\uc11c\uc11d\uc11e\uc11f\uc120\uc121\uc122\uc123\uc124\uc125\uc126\uc127\uc128\uc129\uc12a\uc12b\uc12c\uc12d\uc12e\uc12f\uc130\uc131\uc132\uc133\uc134\uc135\uc136\uc137\uc138\uc139\uc13a\uc13b\uc13c\uc13d\uc13e\uc13f\uc140\uc141\uc142\uc143\uc144\uc145\uc146\uc147\uc148\uc149\uc14a\uc14b\uc14c\uc14d\uc14e\uc14f\uc150\uc151\uc152\uc153\uc154\uc155\uc156\uc157\uc158\uc159\uc15a\uc15b\uc15c\uc15d\uc15e\uc15f\uc160\uc161\uc162\uc163\uc164\uc165\uc166\uc167\uc168\uc169\uc16a\uc16b\uc16c\uc16d\uc16e\uc16f\uc170\uc171\uc172\uc173\uc174\uc175\uc176\uc177\uc178\uc179\uc17a\uc17b\uc17c\uc17d\uc17e\uc17f\uc180\uc181\uc182\uc183\uc184\uc185\uc186\uc187\uc188\uc189\uc18a\uc18b\uc18c\uc18d\uc18e\uc18f\uc190\uc191\uc192\uc193\uc194\uc195\uc196\uc197\uc198\uc199\uc19a\uc19b\uc19c\uc19d\uc19e\uc19f\uc1a0\uc1a1\uc1a2\uc1a3\uc1a4\uc1a5\uc1a6\uc1a7\uc1a8\uc1a9\uc1aa\uc1ab\uc1ac\uc1ad\uc1ae\uc1af\uc1b0\uc1b1\uc1b2\uc1b3\uc1b4\uc1b5\uc1b6\uc1b7\uc1b8\uc1b9\uc1ba\uc1bb\uc1bc\uc1bd\uc1be\uc1bf\uc1c0\uc1c1\uc1c2\uc1c3\uc1c4\uc1c5\uc1c6\uc1c7\uc1c8\uc1c9\uc1ca\uc1cb\uc1cc\uc1cd\uc1ce\uc1cf\uc1d0\uc1d1\uc1d2\uc1d3\uc1d4\uc1d5\uc1d6\uc1d7\uc1d8\uc1d9\uc1da\uc1db\uc1dc\uc1dd\uc1de\uc1df\uc1e0\uc1e1\uc1e2\uc1e3\uc1e4\uc1e5\uc1e6\uc1e7\uc1e8\uc1e9\uc1ea\uc1eb\uc1ec\uc1ed\uc1ee\uc1ef\uc1f0\uc1f1\uc1f2\uc1f3\uc1f4\uc1f5\uc1f6\uc1f7\uc1f8\uc1f9\uc1fa\uc1fb\uc1fc\uc1fd\uc1fe\uc1ff\uc200\uc201\uc202\uc203\uc204\uc205\uc206\uc207\uc208\uc209\uc20a\uc20b\uc20c\uc20d\uc20e\uc20f\uc210\uc211\uc212\uc213\uc214\uc215\uc216\uc217\uc218\uc219\uc21a\uc21b\uc21c\uc21d\uc21e\uc21f\uc220\uc221\uc222\uc223\uc224\uc225\uc226\uc227\uc228\uc229\uc22a\uc22b\uc22c\uc22d\uc22e\uc22f\uc230\uc231\uc232\uc233\uc234\uc235\uc236\uc237\uc238\uc239\uc23a\uc23b\uc23c\uc23d\uc23e\uc23f\uc240\uc241\uc242\uc243\uc244\uc245\uc246\uc247\uc248\uc249\uc24a\uc24b\uc24c\uc24d\uc24e\uc24f\uc250\uc251\uc252\uc253\uc254\uc255\uc256\uc257\uc258\uc259\uc25a\uc25b\uc25c\uc25d\uc25e\uc25f\uc260\uc261\uc262\uc263\uc264\uc265\uc266\uc267\uc268\uc269\uc26a\uc26b\uc26c\uc26d\uc26e\uc26f\uc270\uc271\uc272\uc273\uc274\uc275\uc276\uc277\uc278\uc279\uc27a\uc27b\uc27c\uc27d\uc27e\uc27f\uc280\uc281\uc282\uc283\uc284\uc285\uc286\uc287\uc288\uc289\uc28a\uc28b\uc28c\uc28d\uc28e\uc28f\uc290\uc291\uc292\uc293\uc294\uc295\uc296\uc297\uc298\uc299\uc29a\uc29b\uc29c\uc29d\uc29e\uc29f\uc2a0\uc2a1\uc2a2\uc2a3\uc2a4\uc2a5\uc2a6\uc2a7\uc2a8\uc2a9\uc2aa\uc2ab\uc2ac\uc2ad\uc2ae\uc2af\uc2b0\uc2b1\uc2b2\uc2b3\uc2b4\uc2b5\uc2b6\uc2b7\uc2b8\uc2b9\uc2ba\uc2bb\uc2bc\uc2bd\uc2be\uc2bf\uc2c0\uc2c1\uc2c2\uc2c3\uc2c4\uc2c5\uc2c6\uc2c7\uc2c8\uc2c9\uc2ca\uc2cb\uc2cc\uc2cd\uc2ce\uc2cf\uc2d0\uc2d1\uc2d2\uc2d3\uc2d4\uc2d5\uc2d6\uc2d7\uc2d8\uc2d9\uc2da\uc2db\uc2dc\uc2dd\uc2de\uc2df\uc2e0\uc2e1\uc2e2\uc2e3\uc2e4\uc2e5\uc2e6\uc2e7\uc2e8\uc2e9\uc2ea\uc2eb\uc2ec\uc2ed\uc2ee\uc2ef\uc2f0\uc2f1\uc2f2\uc2f3\uc2f4\uc2f5\uc2f6\uc2f7\uc2f8\uc2f9\uc2fa\uc2fb\uc2fc\uc2fd\uc2fe\uc2ff\uc300\uc301\uc302\uc303\uc304\uc305\uc306\uc307\uc308\uc309\uc30a\uc30b\uc30c\uc30d\uc30e\uc30f\uc310\uc311\uc312\uc313\uc314\uc315\uc316\uc317\uc318\uc319\uc31a\uc31b\uc31c\uc31d\uc31e\uc31f\uc320\uc321\uc322\uc323\uc324\uc325\uc326\uc327\uc328\uc329\uc32a\uc32b\uc32c\uc32d\uc32e\uc32f\uc330\uc331\uc332\uc333\uc334\uc335\uc336\uc337\uc338\uc339\uc33a\uc33b\uc33c\uc33d\uc33e\uc33f\uc340\uc341\uc342\uc343\uc344\uc345\uc346\uc347\uc348\uc349\uc34a\uc34b\uc34c\uc34d\uc34e\uc34f\uc350\uc351\uc352\uc353\uc354\uc355\uc356\uc357\uc358\uc359\uc35a\uc35b\uc35c\uc35d\uc35e\uc35f\uc360\uc361\uc362\uc363\uc364\uc365\uc366\uc367\uc368\uc369\uc36a\uc36b\uc36c\uc36d\uc36e\uc36f\uc370\uc371\uc372\uc373\uc374\uc375\uc376\uc377\uc378\uc379\uc37a\uc37b\uc37c\uc37d\uc37e\uc37f\uc380\uc381\uc382\uc383\uc384\uc385\uc386\uc387\uc388\uc389\uc38a\uc38b\uc38c\uc38d\uc38e\uc38f\uc390\uc391\uc392\uc393\uc394\uc395\uc396\uc397\uc398\uc399\uc39a\uc39b\uc39c\uc39d\uc39e\uc39f\uc3a0\uc3a1\uc3a2\uc3a3\uc3a4\uc3a5\uc3a6\uc3a7\uc3a8\uc3a9\uc3aa\uc3ab\uc3ac\uc3ad\uc3ae\uc3af\uc3b0\uc3b1\uc3b2\uc3b3\uc3b4\uc3b5\uc3b6\uc3b7\uc3b8\uc3b9\uc3ba\uc3bb\uc3bc\uc3bd\uc3be\uc3bf\uc3c0\uc3c1\uc3c2\uc3c3\uc3c4\uc3c5\uc3c6\uc3c7\uc3c8\uc3c9\uc3ca\uc3cb\uc3cc\uc3cd\uc3ce\uc3cf\uc3d0\uc3d1\uc3d2\uc3d3\uc3d4\uc3d5\uc3d6\uc3d7\uc3d8\uc3d9\uc3da\uc3db\uc3dc\uc3dd\uc3de\uc3df\uc3e0\uc3e1\uc3e2\uc3e3\uc3e4\uc3e5\uc3e6\uc3e7\uc3e8\uc3e9\uc3ea\uc3eb\uc3ec\uc3ed\uc3ee\uc3ef\uc3f0\uc3f1\uc3f2\uc3f3\uc3f4\uc3f5\uc3f6\uc3f7\uc3f8\uc3f9\uc3fa\uc3fb\uc3fc\uc3fd\uc3fe\uc3ff\uc400\uc401\uc402\uc403\uc404\uc405\uc406\uc407\uc408\uc409\uc40a\uc40b\uc40c\uc40d\uc40e\uc40f\uc410\uc411\uc412\uc413\uc414\uc415\uc416\uc417\uc418\uc419\uc41a\uc41b\uc41c\uc41d\uc41e\uc41f\uc420\uc421\uc422\uc423\uc424\uc425\uc426\uc427\uc428\uc429\uc42a\uc42b\uc42c\uc42d\uc42e\uc42f\uc430\uc431\uc432\uc433\uc434\uc435\uc436\uc437\uc438\uc439\uc43a\uc43b\uc43c\uc43d\uc43e\uc43f\uc440\uc441\uc442\uc443\uc444\uc445\uc446\uc447\uc448\uc449\uc44a\uc44b\uc44c\uc44d\uc44e\uc44f\uc450\uc451\uc452\uc453\uc454\uc455\uc456\uc457\uc458\uc459\uc45a\uc45b\uc45c\uc45d\uc45e\uc45f\uc460\uc461\uc462\uc463\uc464\uc465\uc466\uc467\uc468\uc469\uc46a\uc46b\uc46c\uc46d\uc46e\uc46f\uc470\uc471\uc472\uc473\uc474\uc475\uc476\uc477\uc478\uc479\uc47a\uc47b\uc47c\uc47d\uc47e\uc47f\uc480\uc481\uc482\uc483\uc484\uc485\uc486\uc487\uc488\uc489\uc48a\uc48b\uc48c\uc48d\uc48e\uc48f\uc490\uc491\uc492\uc493\uc494\uc495\uc496\uc497\uc498\uc499\uc49a\uc49b\uc49c\uc49d\uc49e\uc49f\uc4a0\uc4a1\uc4a2\uc4a3\uc4a4\uc4a5\uc4a6\uc4a7\uc4a8\uc4a9\uc4aa\uc4ab\uc4ac\uc4ad\uc4ae\uc4af\uc4b0\uc4b1\uc4b2\uc4b3\uc4b4\uc4b5\uc4b6\uc4b7\uc4b8\uc4b9\uc4ba\uc4bb\uc4bc\uc4bd\uc4be\uc4bf\uc4c0\uc4c1\uc4c2\uc4c3\uc4c4\uc4c5\uc4c6\uc4c7\uc4c8\uc4c9\uc4ca\uc4cb\uc4cc\uc4cd\uc4ce\uc4cf\uc4d0\uc4d1\uc4d2\uc4d3\uc4d4\uc4d5\uc4d6\uc4d7\uc4d8\uc4d9\uc4da\uc4db\uc4dc\uc4dd\uc4de\uc4df\uc4e0\uc4e1\uc4e2\uc4e3\uc4e4\uc4e5\uc4e6\uc4e7\uc4e8\uc4e9\uc4ea\uc4eb\uc4ec\uc4ed\uc4ee\uc4ef\uc4f0\uc4f1\uc4f2\uc4f3\uc4f4\uc4f5\uc4f6\uc4f7\uc4f8\uc4f9\uc4fa\uc4fb\uc4fc\uc4fd\uc4fe\uc4ff\uc500\uc501\uc502\uc503\uc504\uc505\uc506\uc507\uc508\uc509\uc50a\uc50b\uc50c\uc50d\uc50e\uc50f\uc510\uc511\uc512\uc513\uc514\uc515\uc516\uc517\uc518\uc519\uc51a\uc51b\uc51c\uc51d\uc51e\uc51f\uc520\uc521\uc522\uc523\uc524\uc525\uc526\uc527\uc528\uc529\uc52a\uc52b\uc52c\uc52d\uc52e\uc52f\uc530\uc531\uc532\uc533\uc534\uc535\uc536\uc537\uc538\uc539\uc53a\uc53b\uc53c\uc53d\uc53e\uc53f\uc540\uc541\uc542\uc543\uc544\uc545\uc546\uc547\uc548\uc549\uc54a\uc54b\uc54c\uc54d\uc54e\uc54f\uc550\uc551\uc552\uc553\uc554\uc555\uc556\uc557\uc558\uc559\uc55a\uc55b\uc55c\uc55d\uc55e\uc55f\uc560\uc561\uc562\uc563\uc564\uc565\uc566\uc567\uc568\uc569\uc56a\uc56b\uc56c\uc56d\uc56e\uc56f\uc570\uc571\uc572\uc573\uc574\uc575\uc576\uc577\uc578\uc579\uc57a\uc57b\uc57c\uc57d\uc57e\uc57f\uc580\uc581\uc582\uc583\uc584\uc585\uc586\uc587\uc588\uc589\uc58a\uc58b\uc58c\uc58d\uc58e\uc58f\uc590\uc591\uc592\uc593\uc594\uc595\uc596\uc597\uc598\uc599\uc59a\uc59b\uc59c\uc59d\uc59e\uc59f\uc5a0\uc5a1\uc5a2\uc5a3\uc5a4\uc5a5\uc5a6\uc5a7\uc5a8\uc5a9\uc5aa\uc5ab\uc5ac\uc5ad\uc5ae\uc5af\uc5b0\uc5b1\uc5b2\uc5b3\uc5b4\uc5b5\uc5b6\uc5b7\uc5b8\uc5b9\uc5ba\uc5bb\uc5bc\uc5bd\uc5be\uc5bf\uc5c0\uc5c1\uc5c2\uc5c3\uc5c4\uc5c5\uc5c6\uc5c7\uc5c8\uc5c9\uc5ca\uc5cb\uc5cc\uc5cd\uc5ce\uc5cf\uc5d0\uc5d1\uc5d2\uc5d3\uc5d4\uc5d5\uc5d6\uc5d7\uc5d8\uc5d9\uc5da\uc5db\uc5dc\uc5dd\uc5de\uc5df\uc5e0\uc5e1\uc5e2\uc5e3\uc5e4\uc5e5\uc5e6\uc5e7\uc5e8\uc5e9\uc5ea\uc5eb\uc5ec\uc5ed\uc5ee\uc5ef\uc5f0\uc5f1\uc5f2\uc5f3\uc5f4\uc5f5\uc5f6\uc5f7\uc5f8\uc5f9\uc5fa\uc5fb\uc5fc\uc5fd\uc5fe\uc5ff\uc600\uc601\uc602\uc603\uc604\uc605\uc606\uc607\uc608\uc609\uc60a\uc60b\uc60c\uc60d\uc60e\uc60f\uc610\uc611\uc612\uc613\uc614\uc615\uc616\uc617\uc618\uc619\uc61a\uc61b\uc61c\uc61d\uc61e\uc61f\uc620\uc621\uc622\uc623\uc624\uc625\uc626\uc627\uc628\uc629\uc62a\uc62b\uc62c\uc62d\uc62e\uc62f\uc630\uc631\uc632\uc633\uc634\uc635\uc636\uc637\uc638\uc639\uc63a\uc63b\uc63c\uc63d\uc63e\uc63f\uc640\uc641\uc642\uc643\uc644\uc645\uc646\uc647\uc648\uc649\uc64a\uc64b\uc64c\uc64d\uc64e\uc64f\uc650\uc651\uc652\uc653\uc654\uc655\uc656\uc657\uc658\uc659\uc65a\uc65b\uc65c\uc65d\uc65e\uc65f\uc660\uc661\uc662\uc663\uc664\uc665\uc666\uc667\uc668\uc669\uc66a\uc66b\uc66c\uc66d\uc66e\uc66f\uc670\uc671\uc672\uc673\uc674\uc675\uc676\uc677\uc678\uc679\uc67a\uc67b\uc67c\uc67d\uc67e\uc67f\uc680\uc681\uc682\uc683\uc684\uc685\uc686\uc687\uc688\uc689\uc68a\uc68b\uc68c\uc68d\uc68e\uc68f\uc690\uc691\uc692\uc693\uc694\uc695\uc696\uc697\uc698\uc699\uc69a\uc69b\uc69c\uc69d\uc69e\uc69f\uc6a0\uc6a1\uc6a2\uc6a3\uc6a4\uc6a5\uc6a6\uc6a7\uc6a8\uc6a9\uc6aa\uc6ab\uc6ac\uc6ad\uc6ae\uc6af\uc6b0\uc6b1\uc6b2\uc6b3\uc6b4\uc6b5\uc6b6\uc6b7\uc6b8\uc6b9\uc6ba\uc6bb\uc6bc\uc6bd\uc6be\uc6bf\uc6c0\uc6c1\uc6c2\uc6c3\uc6c4\uc6c5\uc6c6\uc6c7\uc6c8\uc6c9\uc6ca\uc6cb\uc6cc\uc6cd\uc6ce\uc6cf\uc6d0\uc6d1\uc6d2\uc6d3\uc6d4\uc6d5\uc6d6\uc6d7\uc6d8\uc6d9\uc6da\uc6db\uc6dc\uc6dd\uc6de\uc6df\uc6e0\uc6e1\uc6e2\uc6e3\uc6e4\uc6e5\uc6e6\uc6e7\uc6e8\uc6e9\uc6ea\uc6eb\uc6ec\uc6ed\uc6ee\uc6ef\uc6f0\uc6f1\uc6f2\uc6f3\uc6f4\uc6f5\uc6f6\uc6f7\uc6f8\uc6f9\uc6fa\uc6fb\uc6fc\uc6fd\uc6fe\uc6ff\uc700\uc701\uc702\uc703\uc704\uc705\uc706\uc707\uc708\uc709\uc70a\uc70b\uc70c\uc70d\uc70e\uc70f\uc710\uc711\uc712\uc713\uc714\uc715\uc716\uc717\uc718\uc719\uc71a\uc71b\uc71c\uc71d\uc71e\uc71f\uc720\uc721\uc722\uc723\uc724\uc725\uc726\uc727\uc728\uc729\uc72a\uc72b\uc72c\uc72d\uc72e\uc72f\uc730\uc731\uc732\uc733\uc734\uc735\uc736\uc737\uc738\uc739\uc73a\uc73b\uc73c\uc73d\uc73e\uc73f\uc740\uc741\uc742\uc743\uc744\uc745\uc746\uc747\uc748\uc749\uc74a\uc74b\uc74c\uc74d\uc74e\uc74f\uc750\uc751\uc752\uc753\uc754\uc755\uc756\uc757\uc758\uc759\uc75a\uc75b\uc75c\uc75d\uc75e\uc75f\uc760\uc761\uc762\uc763\uc764\uc765\uc766\uc767\uc768\uc769\uc76a\uc76b\uc76c\uc76d\uc76e\uc76f\uc770\uc771\uc772\uc773\uc774\uc775\uc776\uc777\uc778\uc779\uc77a\uc77b\uc77c\uc77d\uc77e\uc77f\uc780\uc781\uc782\uc783\uc784\uc785\uc786\uc787\uc788\uc789\uc78a\uc78b\uc78c\uc78d\uc78e\uc78f\uc790\uc791\uc792\uc793\uc794\uc795\uc796\uc797\uc798\uc799\uc79a\uc79b\uc79c\uc79d\uc79e\uc79f\uc7a0\uc7a1\uc7a2\uc7a3\uc7a4\uc7a5\uc7a6\uc7a7\uc7a8\uc7a9\uc7aa\uc7ab\uc7ac\uc7ad\uc7ae\uc7af\uc7b0\uc7b1\uc7b2\uc7b3\uc7b4\uc7b5\uc7b6\uc7b7\uc7b8\uc7b9\uc7ba\uc7bb\uc7bc\uc7bd\uc7be\uc7bf\uc7c0\uc7c1\uc7c2\uc7c3\uc7c4\uc7c5\uc7c6\uc7c7\uc7c8\uc7c9\uc7ca\uc7cb\uc7cc\uc7cd\uc7ce\uc7cf\uc7d0\uc7d1\uc7d2\uc7d3\uc7d4\uc7d5\uc7d6\uc7d7\uc7d8\uc7d9\uc7da\uc7db\uc7dc\uc7dd\uc7de\uc7df\uc7e0\uc7e1\uc7e2\uc7e3\uc7e4\uc7e5\uc7e6\uc7e7\uc7e8\uc7e9\uc7ea\uc7eb\uc7ec\uc7ed\uc7ee\uc7ef\uc7f0\uc7f1\uc7f2\uc7f3\uc7f4\uc7f5\uc7f6\uc7f7\uc7f8\uc7f9\uc7fa\uc7fb\uc7fc\uc7fd\uc7fe\uc7ff\uc800\uc801\uc802\uc803\uc804\uc805\uc806\uc807\uc808\uc809\uc80a\uc80b\uc80c\uc80d\uc80e\uc80f\uc810\uc811\uc812\uc813\uc814\uc815\uc816\uc817\uc818\uc819\uc81a\uc81b\uc81c\uc81d\uc81e\uc81f\uc820\uc821\uc822\uc823\uc824\uc825\uc826\uc827\uc828\uc829\uc82a\uc82b\uc82c\uc82d\uc82e\uc82f\uc830\uc831\uc832\uc833\uc834\uc835\uc836\uc837\uc838\uc839\uc83a\uc83b\uc83c\uc83d\uc83e\uc83f\uc840\uc841\uc842\uc843\uc844\uc845\uc846\uc847\uc848\uc849\uc84a\uc84b\uc84c\uc84d\uc84e\uc84f\uc850\uc851\uc852\uc853\uc854\uc855\uc856\uc857\uc858\uc859\uc85a\uc85b\uc85c\uc85d\uc85e\uc85f\uc860\uc861\uc862\uc863\uc864\uc865\uc866\uc867\uc868\uc869\uc86a\uc86b\uc86c\uc86d\uc86e\uc86f\uc870\uc871\uc872\uc873\uc874\uc875\uc876\uc877\uc878\uc879\uc87a\uc87b\uc87c\uc87d\uc87e\uc87f\uc880\uc881\uc882\uc883\uc884\uc885\uc886\uc887\uc888\uc889\uc88a\uc88b\uc88c\uc88d\uc88e\uc88f\uc890\uc891\uc892\uc893\uc894\uc895\uc896\uc897\uc898\uc899\uc89a\uc89b\uc89c\uc89d\uc89e\uc89f\uc8a0\uc8a1\uc8a2\uc8a3\uc8a4\uc8a5\uc8a6\uc8a7\uc8a8\uc8a9\uc8aa\uc8ab\uc8ac\uc8ad\uc8ae\uc8af\uc8b0\uc8b1\uc8b2\uc8b3\uc8b4\uc8b5\uc8b6\uc8b7\uc8b8\uc8b9\uc8ba\uc8bb\uc8bc\uc8bd\uc8be\uc8bf\uc8c0\uc8c1\uc8c2\uc8c3\uc8c4\uc8c5\uc8c6\uc8c7\uc8c8\uc8c9\uc8ca\uc8cb\uc8cc\uc8cd\uc8ce\uc8cf\uc8d0\uc8d1\uc8d2\uc8d3\uc8d4\uc8d5\uc8d6\uc8d7\uc8d8\uc8d9\uc8da\uc8db\uc8dc\uc8dd\uc8de\uc8df\uc8e0\uc8e1\uc8e2\uc8e3\uc8e4\uc8e5\uc8e6\uc8e7\uc8e8\uc8e9\uc8ea\uc8eb\uc8ec\uc8ed\uc8ee\uc8ef\uc8f0\uc8f1\uc8f2\uc8f3\uc8f4\uc8f5\uc8f6\uc8f7\uc8f8\uc8f9\uc8fa\uc8fb\uc8fc\uc8fd\uc8fe\uc8ff\uc900\uc901\uc902\uc903\uc904\uc905\uc906\uc907\uc908\uc909\uc90a\uc90b\uc90c\uc90d\uc90e\uc90f\uc910\uc911\uc912\uc913\uc914\uc915\uc916\uc917\uc918\uc919\uc91a\uc91b\uc91c\uc91d\uc91e\uc91f\uc920\uc921\uc922\uc923\uc924\uc925\uc926\uc927\uc928\uc929\uc92a\uc92b\uc92c\uc92d\uc92e\uc92f\uc930\uc931\uc932\uc933\uc934\uc935\uc936\uc937\uc938\uc939\uc93a\uc93b\uc93c\uc93d\uc93e\uc93f\uc940\uc941\uc942\uc943\uc944\uc945\uc946\uc947\uc948\uc949\uc94a\uc94b\uc94c\uc94d\uc94e\uc94f\uc950\uc951\uc952\uc953\uc954\uc955\uc956\uc957\uc958\uc959\uc95a\uc95b\uc95c\uc95d\uc95e\uc95f\uc960\uc961\uc962\uc963\uc964\uc965\uc966\uc967\uc968\uc969\uc96a\uc96b\uc96c\uc96d\uc96e\uc96f\uc970\uc971\uc972\uc973\uc974\uc975\uc976\uc977\uc978\uc979\uc97a\uc97b\uc97c\uc97d\uc97e\uc97f\uc980\uc981\uc982\uc983\uc984\uc985\uc986\uc987\uc988\uc989\uc98a\uc98b\uc98c\uc98d\uc98e\uc98f\uc990\uc991\uc992\uc993\uc994\uc995\uc996\uc997\uc998\uc999\uc99a\uc99b\uc99c\uc99d\uc99e\uc99f\uc9a0\uc9a1\uc9a2\uc9a3\uc9a4\uc9a5\uc9a6\uc9a7\uc9a8\uc9a9\uc9aa\uc9ab\uc9ac\uc9ad\uc9ae\uc9af\uc9b0\uc9b1\uc9b2\uc9b3\uc9b4\uc9b5\uc9b6\uc9b7\uc9b8\uc9b9\uc9ba\uc9bb\uc9bc\uc9bd\uc9be\uc9bf\uc9c0\uc9c1\uc9c2\uc9c3\uc9c4\uc9c5\uc9c6\uc9c7\uc9c8\uc9c9\uc9ca\uc9cb\uc9cc\uc9cd\uc9ce\uc9cf\uc9d0\uc9d1\uc9d2\uc9d3\uc9d4\uc9d5\uc9d6\uc9d7\uc9d8\uc9d9\uc9da\uc9db\uc9dc\uc9dd\uc9de\uc9df\uc9e0\uc9e1\uc9e2\uc9e3\uc9e4\uc9e5\uc9e6\uc9e7\uc9e8\uc9e9\uc9ea\uc9eb\uc9ec\uc9ed\uc9ee\uc9ef\uc9f0\uc9f1\uc9f2\uc9f3\uc9f4\uc9f5\uc9f6\uc9f7\uc9f8\uc9f9\uc9fa\uc9fb\uc9fc\uc9fd\uc9fe\uc9ff\uca00\uca01\uca02\uca03\uca04\uca05\uca06\uca07\uca08\uca09\uca0a\uca0b\uca0c\uca0d\uca0e\uca0f\uca10\uca11\uca12\uca13\uca14\uca15\uca16\uca17\uca18\uca19\uca1a\uca1b\uca1c\uca1d\uca1e\uca1f\uca20\uca21\uca22\uca23\uca24\uca25\uca26\uca27\uca28\uca29\uca2a\uca2b\uca2c\uca2d\uca2e\uca2f\uca30\uca31\uca32\uca33\uca34\uca35\uca36\uca37\uca38\uca39\uca3a\uca3b\uca3c\uca3d\uca3e\uca3f\uca40\uca41\uca42\uca43\uca44\uca45\uca46\uca47\uca48\uca49\uca4a\uca4b\uca4c\uca4d\uca4e\uca4f\uca50\uca51\uca52\uca53\uca54\uca55\uca56\uca57\uca58\uca59\uca5a\uca5b\uca5c\uca5d\uca5e\uca5f\uca60\uca61\uca62\uca63\uca64\uca65\uca66\uca67\uca68\uca69\uca6a\uca6b\uca6c\uca6d\uca6e\uca6f\uca70\uca71\uca72\uca73\uca74\uca75\uca76\uca77\uca78\uca79\uca7a\uca7b\uca7c\uca7d\uca7e\uca7f\uca80\uca81\uca82\uca83\uca84\uca85\uca86\uca87\uca88\uca89\uca8a\uca8b\uca8c\uca8d\uca8e\uca8f\uca90\uca91\uca92\uca93\uca94\uca95\uca96\uca97\uca98\uca99\uca9a\uca9b\uca9c\uca9d\uca9e\uca9f\ucaa0\ucaa1\ucaa2\ucaa3\ucaa4\ucaa5\ucaa6\ucaa7\ucaa8\ucaa9\ucaaa\ucaab\ucaac\ucaad\ucaae\ucaaf\ucab0\ucab1\ucab2\ucab3\ucab4\ucab5\ucab6\ucab7\ucab8\ucab9\ucaba\ucabb\ucabc\ucabd\ucabe\ucabf\ucac0\ucac1\ucac2\ucac3\ucac4\ucac5\ucac6\ucac7\ucac8\ucac9\ucaca\ucacb\ucacc\ucacd\ucace\ucacf\ucad0\ucad1\ucad2\ucad3\ucad4\ucad5\ucad6\ucad7\ucad8\ucad9\ucada\ucadb\ucadc\ucadd\ucade\ucadf\ucae0\ucae1\ucae2\ucae3\ucae4\ucae5\ucae6\ucae7\ucae8\ucae9\ucaea\ucaeb\ucaec\ucaed\ucaee\ucaef\ucaf0\ucaf1\ucaf2\ucaf3\ucaf4\ucaf5\ucaf6\ucaf7\ucaf8\ucaf9\ucafa\ucafb\ucafc\ucafd\ucafe\ucaff\ucb00\ucb01\ucb02\ucb03\ucb04\ucb05\ucb06\ucb07\ucb08\ucb09\ucb0a\ucb0b\ucb0c\ucb0d\ucb0e\ucb0f\ucb10\ucb11\ucb12\ucb13\ucb14\ucb15\ucb16\ucb17\ucb18\ucb19\ucb1a\ucb1b\ucb1c\ucb1d\ucb1e\ucb1f\ucb20\ucb21\ucb22\ucb23\ucb24\ucb25\ucb26\ucb27\ucb28\ucb29\ucb2a\ucb2b\ucb2c\ucb2d\ucb2e\ucb2f\ucb30\ucb31\ucb32\ucb33\ucb34\ucb35\ucb36\ucb37\ucb38\ucb39\ucb3a\ucb3b\ucb3c\ucb3d\ucb3e\ucb3f\ucb40\ucb41\ucb42\ucb43\ucb44\ucb45\ucb46\ucb47\ucb48\ucb49\ucb4a\ucb4b\ucb4c\ucb4d\ucb4e\ucb4f\ucb50\ucb51\ucb52\ucb53\ucb54\ucb55\ucb56\ucb57\ucb58\ucb59\ucb5a\ucb5b\ucb5c\ucb5d\ucb5e\ucb5f\ucb60\ucb61\ucb62\ucb63\ucb64\ucb65\ucb66\ucb67\ucb68\ucb69\ucb6a\ucb6b\ucb6c\ucb6d\ucb6e\ucb6f\ucb70\ucb71\ucb72\ucb73\ucb74\ucb75\ucb76\ucb77\ucb78\ucb79\ucb7a\ucb7b\ucb7c\ucb7d\ucb7e\ucb7f\ucb80\ucb81\ucb82\ucb83\ucb84\ucb85\ucb86\ucb87\ucb88\ucb89\ucb8a\ucb8b\ucb8c\ucb8d\ucb8e\ucb8f\ucb90\ucb91\ucb92\ucb93\ucb94\ucb95\ucb96\ucb97\ucb98\ucb99\ucb9a\ucb9b\ucb9c\ucb9d\ucb9e\ucb9f\ucba0\ucba1\ucba2\ucba3\ucba4\ucba5\ucba6\ucba7\ucba8\ucba9\ucbaa\ucbab\ucbac\ucbad\ucbae\ucbaf\ucbb0\ucbb1\ucbb2\ucbb3\ucbb4\ucbb5\ucbb6\ucbb7\ucbb8\ucbb9\ucbba\ucbbb\ucbbc\ucbbd\ucbbe\ucbbf\ucbc0\ucbc1\ucbc2\ucbc3\ucbc4\ucbc5\ucbc6\ucbc7\ucbc8\ucbc9\ucbca\ucbcb\ucbcc\ucbcd\ucbce\ucbcf\ucbd0\ucbd1\ucbd2\ucbd3\ucbd4\ucbd5\ucbd6\ucbd7\ucbd8\ucbd9\ucbda\ucbdb\ucbdc\ucbdd\ucbde\ucbdf\ucbe0\ucbe1\ucbe2\ucbe3\ucbe4\ucbe5\ucbe6\ucbe7\ucbe8\ucbe9\ucbea\ucbeb\ucbec\ucbed\ucbee\ucbef\ucbf0\ucbf1\ucbf2\ucbf3\ucbf4\ucbf5\ucbf6\ucbf7\ucbf8\ucbf9\ucbfa\ucbfb\ucbfc\ucbfd\ucbfe\ucbff\ucc00\ucc01\ucc02\ucc03\ucc04\ucc05\ucc06\ucc07\ucc08\ucc09\ucc0a\ucc0b\ucc0c\ucc0d\ucc0e\ucc0f\ucc10\ucc11\ucc12\ucc13\ucc14\ucc15\ucc16\ucc17\ucc18\ucc19\ucc1a\ucc1b\ucc1c\ucc1d\ucc1e\ucc1f\ucc20\ucc21\ucc22\ucc23\ucc24\ucc25\ucc26\ucc27\ucc28\ucc29\ucc2a\ucc2b\ucc2c\ucc2d\ucc2e\ucc2f\ucc30\ucc31\ucc32\ucc33\ucc34\ucc35\ucc36\ucc37\ucc38\ucc39\ucc3a\ucc3b\ucc3c\ucc3d\ucc3e\ucc3f\ucc40\ucc41\ucc42\ucc43\ucc44\ucc45\ucc46\ucc47\ucc48\ucc49\ucc4a\ucc4b\ucc4c\ucc4d\ucc4e\ucc4f\ucc50\ucc51\ucc52\ucc53\ucc54\ucc55\ucc56\ucc57\ucc58\ucc59\ucc5a\ucc5b\ucc5c\ucc5d\ucc5e\ucc5f\ucc60\ucc61\ucc62\ucc63\ucc64\ucc65\ucc66\ucc67\ucc68\ucc69\ucc6a\ucc6b\ucc6c\ucc6d\ucc6e\ucc6f\ucc70\ucc71\ucc72\ucc73\ucc74\ucc75\ucc76\ucc77\ucc78\ucc79\ucc7a\ucc7b\ucc7c\ucc7d\ucc7e\ucc7f\ucc80\ucc81\ucc82\ucc83\ucc84\ucc85\ucc86\ucc87\ucc88\ucc89\ucc8a\ucc8b\ucc8c\ucc8d\ucc8e\ucc8f\ucc90\ucc91\ucc92\ucc93\ucc94\ucc95\ucc96\ucc97\ucc98\ucc99\ucc9a\ucc9b\ucc9c\ucc9d\ucc9e\ucc9f\ucca0\ucca1\ucca2\ucca3\ucca4\ucca5\ucca6\ucca7\ucca8\ucca9\uccaa\uccab\uccac\uccad\uccae\uccaf\uccb0\uccb1\uccb2\uccb3\uccb4\uccb5\uccb6\uccb7\uccb8\uccb9\uccba\uccbb\uccbc\uccbd\uccbe\uccbf\uccc0\uccc1\uccc2\uccc3\uccc4\uccc5\uccc6\uccc7\uccc8\uccc9\uccca\ucccb\ucccc\ucccd\uccce\ucccf\uccd0\uccd1\uccd2\uccd3\uccd4\uccd5\uccd6\uccd7\uccd8\uccd9\uccda\uccdb\uccdc\uccdd\uccde\uccdf\ucce0\ucce1\ucce2\ucce3\ucce4\ucce5\ucce6\ucce7\ucce8\ucce9\uccea\ucceb\uccec\ucced\uccee\uccef\uccf0\uccf1\uccf2\uccf3\uccf4\uccf5\uccf6\uccf7\uccf8\uccf9\uccfa\uccfb\uccfc\uccfd\uccfe\uccff\ucd00\ucd01\ucd02\ucd03\ucd04\ucd05\ucd06\ucd07\ucd08\ucd09\ucd0a\ucd0b\ucd0c\ucd0d\ucd0e\ucd0f\ucd10\ucd11\ucd12\ucd13\ucd14\ucd15\ucd16\ucd17\ucd18\ucd19\ucd1a\ucd1b\ucd1c\ucd1d\ucd1e\ucd1f\ucd20\ucd21\ucd22\ucd23\ucd24\ucd25\ucd26\ucd27\ucd28\ucd29\ucd2a\ucd2b\ucd2c\ucd2d\ucd2e\ucd2f\ucd30\ucd31\ucd32\ucd33\ucd34\ucd35\ucd36\ucd37\ucd38\ucd39\ucd3a\ucd3b\ucd3c\ucd3d\ucd3e\ucd3f\ucd40\ucd41\ucd42\ucd43\ucd44\ucd45\ucd46\ucd47\ucd48\ucd49\ucd4a\ucd4b\ucd4c\ucd4d\ucd4e\ucd4f\ucd50\ucd51\ucd52\ucd53\ucd54\ucd55\ucd56\ucd57\ucd58\ucd59\ucd5a\ucd5b\ucd5c\ucd5d\ucd5e\ucd5f\ucd60\ucd61\ucd62\ucd63\ucd64\ucd65\ucd66\ucd67\ucd68\ucd69\ucd6a\ucd6b\ucd6c\ucd6d\ucd6e\ucd6f\ucd70\ucd71\ucd72\ucd73\ucd74\ucd75\ucd76\ucd77\ucd78\ucd79\ucd7a\ucd7b\ucd7c\ucd7d\ucd7e\ucd7f\ucd80\ucd81\ucd82\ucd83\ucd84\ucd85\ucd86\ucd87\ucd88\ucd89\ucd8a\ucd8b\ucd8c\ucd8d\ucd8e\ucd8f\ucd90\ucd91\ucd92\ucd93\ucd94\ucd95\ucd96\ucd97\ucd98\ucd99\ucd9a\ucd9b\ucd9c\ucd9d\ucd9e\ucd9f\ucda0\ucda1\ucda2\ucda3\ucda4\ucda5\ucda6\ucda7\ucda8\ucda9\ucdaa\ucdab\ucdac\ucdad\ucdae\ucdaf\ucdb0\ucdb1\ucdb2\ucdb3\ucdb4\ucdb5\ucdb6\ucdb7\ucdb8\ucdb9\ucdba\ucdbb\ucdbc\ucdbd\ucdbe\ucdbf\ucdc0\ucdc1\ucdc2\ucdc3\ucdc4\ucdc5\ucdc6\ucdc7\ucdc8\ucdc9\ucdca\ucdcb\ucdcc\ucdcd\ucdce\ucdcf\ucdd0\ucdd1\ucdd2\ucdd3\ucdd4\ucdd5\ucdd6\ucdd7\ucdd8\ucdd9\ucdda\ucddb\ucddc\ucddd\ucdde\ucddf\ucde0\ucde1\ucde2\ucde3\ucde4\ucde5\ucde6\ucde7\ucde8\ucde9\ucdea\ucdeb\ucdec\ucded\ucdee\ucdef\ucdf0\ucdf1\ucdf2\ucdf3\ucdf4\ucdf5\ucdf6\ucdf7\ucdf8\ucdf9\ucdfa\ucdfb\ucdfc\ucdfd\ucdfe\ucdff\uce00\uce01\uce02\uce03\uce04\uce05\uce06\uce07\uce08\uce09\uce0a\uce0b\uce0c\uce0d\uce0e\uce0f\uce10\uce11\uce12\uce13\uce14\uce15\uce16\uce17\uce18\uce19\uce1a\uce1b\uce1c\uce1d\uce1e\uce1f\uce20\uce21\uce22\uce23\uce24\uce25\uce26\uce27\uce28\uce29\uce2a\uce2b\uce2c\uce2d\uce2e\uce2f\uce30\uce31\uce32\uce33\uce34\uce35\uce36\uce37\uce38\uce39\uce3a\uce3b\uce3c\uce3d\uce3e\uce3f\uce40\uce41\uce42\uce43\uce44\uce45\uce46\uce47\uce48\uce49\uce4a\uce4b\uce4c\uce4d\uce4e\uce4f\uce50\uce51\uce52\uce53\uce54\uce55\uce56\uce57\uce58\uce59\uce5a\uce5b\uce5c\uce5d\uce5e\uce5f\uce60\uce61\uce62\uce63\uce64\uce65\uce66\uce67\uce68\uce69\uce6a\uce6b\uce6c\uce6d\uce6e\uce6f\uce70\uce71\uce72\uce73\uce74\uce75\uce76\uce77\uce78\uce79\uce7a\uce7b\uce7c\uce7d\uce7e\uce7f\uce80\uce81\uce82\uce83\uce84\uce85\uce86\uce87\uce88\uce89\uce8a\uce8b\uce8c\uce8d\uce8e\uce8f\uce90\uce91\uce92\uce93\uce94\uce95\uce96\uce97\uce98\uce99\uce9a\uce9b\uce9c\uce9d\uce9e\uce9f\ucea0\ucea1\ucea2\ucea3\ucea4\ucea5\ucea6\ucea7\ucea8\ucea9\uceaa\uceab\uceac\ucead\uceae\uceaf\uceb0\uceb1\uceb2\uceb3\uceb4\uceb5\uceb6\uceb7\uceb8\uceb9\uceba\ucebb\ucebc\ucebd\ucebe\ucebf\ucec0\ucec1\ucec2\ucec3\ucec4\ucec5\ucec6\ucec7\ucec8\ucec9\uceca\ucecb\ucecc\ucecd\ucece\ucecf\uced0\uced1\uced2\uced3\uced4\uced5\uced6\uced7\uced8\uced9\uceda\ucedb\ucedc\ucedd\ucede\ucedf\ucee0\ucee1\ucee2\ucee3\ucee4\ucee5\ucee6\ucee7\ucee8\ucee9\uceea\uceeb\uceec\uceed\uceee\uceef\ucef0\ucef1\ucef2\ucef3\ucef4\ucef5\ucef6\ucef7\ucef8\ucef9\ucefa\ucefb\ucefc\ucefd\ucefe\uceff\ucf00\ucf01\ucf02\ucf03\ucf04\ucf05\ucf06\ucf07\ucf08\ucf09\ucf0a\ucf0b\ucf0c\ucf0d\ucf0e\ucf0f\ucf10\ucf11\ucf12\ucf13\ucf14\ucf15\ucf16\ucf17\ucf18\ucf19\ucf1a\ucf1b\ucf1c\ucf1d\ucf1e\ucf1f\ucf20\ucf21\ucf22\ucf23\ucf24\ucf25\ucf26\ucf27\ucf28\ucf29\ucf2a\ucf2b\ucf2c\ucf2d\ucf2e\ucf2f\ucf30\ucf31\ucf32\ucf33\ucf34\ucf35\ucf36\ucf37\ucf38\ucf39\ucf3a\ucf3b\ucf3c\ucf3d\ucf3e\ucf3f\ucf40\ucf41\ucf42\ucf43\ucf44\ucf45\ucf46\ucf47\ucf48\ucf49\ucf4a\ucf4b\ucf4c\ucf4d\ucf4e\ucf4f\ucf50\ucf51\ucf52\ucf53\ucf54\ucf55\ucf56\ucf57\ucf58\ucf59\ucf5a\ucf5b\ucf5c\ucf5d\ucf5e\ucf5f\ucf60\ucf61\ucf62\ucf63\ucf64\ucf65\ucf66\ucf67\ucf68\ucf69\ucf6a\ucf6b\ucf6c\ucf6d\ucf6e\ucf6f\ucf70\ucf71\ucf72\ucf73\ucf74\ucf75\ucf76\ucf77\ucf78\ucf79\ucf7a\ucf7b\ucf7c\ucf7d\ucf7e\ucf7f\ucf80\ucf81\ucf82\ucf83\ucf84\ucf85\ucf86\ucf87\ucf88\ucf89\ucf8a\ucf8b\ucf8c\ucf8d\ucf8e\ucf8f\ucf90\ucf91\ucf92\ucf93\ucf94\ucf95\ucf96\ucf97\ucf98\ucf99\ucf9a\ucf9b\ucf9c\ucf9d\ucf9e\ucf9f\ucfa0\ucfa1\ucfa2\ucfa3\ucfa4\ucfa5\ucfa6\ucfa7\ucfa8\ucfa9\ucfaa\ucfab\ucfac\ucfad\ucfae\ucfaf\ucfb0\ucfb1\ucfb2\ucfb3\ucfb4\ucfb5\ucfb6\ucfb7\ucfb8\ucfb9\ucfba\ucfbb\ucfbc\ucfbd\ucfbe\ucfbf\ucfc0\ucfc1\ucfc2\ucfc3\ucfc4\ucfc5\ucfc6\ucfc7\ucfc8\ucfc9\ucfca\ucfcb\ucfcc\ucfcd\ucfce\ucfcf\ucfd0\ucfd1\ucfd2\ucfd3\ucfd4\ucfd5\ucfd6\ucfd7\ucfd8\ucfd9\ucfda\ucfdb\ucfdc\ucfdd\ucfde\ucfdf\ucfe0\ucfe1\ucfe2\ucfe3\ucfe4\ucfe5\ucfe6\ucfe7\ucfe8\ucfe9\ucfea\ucfeb\ucfec\ucfed\ucfee\ucfef\ucff0\ucff1\ucff2\ucff3\ucff4\ucff5\ucff6\ucff7\ucff8\ucff9\ucffa\ucffb\ucffc\ucffd\ucffe\ucfff\ud000\ud001\ud002\ud003\ud004\ud005\ud006\ud007\ud008\ud009\ud00a\ud00b\ud00c\ud00d\ud00e\ud00f\ud010\ud011\ud012\ud013\ud014\ud015\ud016\ud017\ud018\ud019\ud01a\ud01b\ud01c\ud01d\ud01e\ud01f\ud020\ud021\ud022\ud023\ud024\ud025\ud026\ud027\ud028\ud029\ud02a\ud02b\ud02c\ud02d\ud02e\ud02f\ud030\ud031\ud032\ud033\ud034\ud035\ud036\ud037\ud038\ud039\ud03a\ud03b\ud03c\ud03d\ud03e\ud03f\ud040\ud041\ud042\ud043\ud044\ud045\ud046\ud047\ud048\ud049\ud04a\ud04b\ud04c\ud04d\ud04e\ud04f\ud050\ud051\ud052\ud053\ud054\ud055\ud056\ud057\ud058\ud059\ud05a\ud05b\ud05c\ud05d\ud05e\ud05f\ud060\ud061\ud062\ud063\ud064\ud065\ud066\ud067\ud068\ud069\ud06a\ud06b\ud06c\ud06d\ud06e\ud06f\ud070\ud071\ud072\ud073\ud074\ud075\ud076\ud077\ud078\ud079\ud07a\ud07b\ud07c\ud07d\ud07e\ud07f\ud080\ud081\ud082\ud083\ud084\ud085\ud086\ud087\ud088\ud089\ud08a\ud08b\ud08c\ud08d\ud08e\ud08f\ud090\ud091\ud092\ud093\ud094\ud095\ud096\ud097\ud098\ud099\ud09a\ud09b\ud09c\ud09d\ud09e\ud09f\ud0a0\ud0a1\ud0a2\ud0a3\ud0a4\ud0a5\ud0a6\ud0a7\ud0a8\ud0a9\ud0aa\ud0ab\ud0ac\ud0ad\ud0ae\ud0af\ud0b0\ud0b1\ud0b2\ud0b3\ud0b4\ud0b5\ud0b6\ud0b7\ud0b8\ud0b9\ud0ba\ud0bb\ud0bc\ud0bd\ud0be\ud0bf\ud0c0\ud0c1\ud0c2\ud0c3\ud0c4\ud0c5\ud0c6\ud0c7\ud0c8\ud0c9\ud0ca\ud0cb\ud0cc\ud0cd\ud0ce\ud0cf\ud0d0\ud0d1\ud0d2\ud0d3\ud0d4\ud0d5\ud0d6\ud0d7\ud0d8\ud0d9\ud0da\ud0db\ud0dc\ud0dd\ud0de\ud0df\ud0e0\ud0e1\ud0e2\ud0e3\ud0e4\ud0e5\ud0e6\ud0e7\ud0e8\ud0e9\ud0ea\ud0eb\ud0ec\ud0ed\ud0ee\ud0ef\ud0f0\ud0f1\ud0f2\ud0f3\ud0f4\ud0f5\ud0f6\ud0f7\ud0f8\ud0f9\ud0fa\ud0fb\ud0fc\ud0fd\ud0fe\ud0ff\ud100\ud101\ud102\ud103\ud104\ud105\ud106\ud107\ud108\ud109\ud10a\ud10b\ud10c\ud10d\ud10e\ud10f\ud110\ud111\ud112\ud113\ud114\ud115\ud116\ud117\ud118\ud119\ud11a\ud11b\ud11c\ud11d\ud11e\ud11f\ud120\ud121\ud122\ud123\ud124\ud125\ud126\ud127\ud128\ud129\ud12a\ud12b\ud12c\ud12d\ud12e\ud12f\ud130\ud131\ud132\ud133\ud134\ud135\ud136\ud137\ud138\ud139\ud13a\ud13b\ud13c\ud13d\ud13e\ud13f\ud140\ud141\ud142\ud143\ud144\ud145\ud146\ud147\ud148\ud149\ud14a\ud14b\ud14c\ud14d\ud14e\ud14f\ud150\ud151\ud152\ud153\ud154\ud155\ud156\ud157\ud158\ud159\ud15a\ud15b\ud15c\ud15d\ud15e\ud15f\ud160\ud161\ud162\ud163\ud164\ud165\ud166\ud167\ud168\ud169\ud16a\ud16b\ud16c\ud16d\ud16e\ud16f\ud170\ud171\ud172\ud173\ud174\ud175\ud176\ud177\ud178\ud179\ud17a\ud17b\ud17c\ud17d\ud17e\ud17f\ud180\ud181\ud182\ud183\ud184\ud185\ud186\ud187\ud188\ud189\ud18a\ud18b\ud18c\ud18d\ud18e\ud18f\ud190\ud191\ud192\ud193\ud194\ud195\ud196\ud197\ud198\ud199\ud19a\ud19b\ud19c\ud19d\ud19e\ud19f\ud1a0\ud1a1\ud1a2\ud1a3\ud1a4\ud1a5\ud1a6\ud1a7\ud1a8\ud1a9\ud1aa\ud1ab\ud1ac\ud1ad\ud1ae\ud1af\ud1b0\ud1b1\ud1b2\ud1b3\ud1b4\ud1b5\ud1b6\ud1b7\ud1b8\ud1b9\ud1ba\ud1bb\ud1bc\ud1bd\ud1be\ud1bf\ud1c0\ud1c1\ud1c2\ud1c3\ud1c4\ud1c5\ud1c6\ud1c7\ud1c8\ud1c9\ud1ca\ud1cb\ud1cc\ud1cd\ud1ce\ud1cf\ud1d0\ud1d1\ud1d2\ud1d3\ud1d4\ud1d5\ud1d6\ud1d7\ud1d8\ud1d9\ud1da\ud1db\ud1dc\ud1dd\ud1de\ud1df\ud1e0\ud1e1\ud1e2\ud1e3\ud1e4\ud1e5\ud1e6\ud1e7\ud1e8\ud1e9\ud1ea\ud1eb\ud1ec\ud1ed\ud1ee\ud1ef\ud1f0\ud1f1\ud1f2\ud1f3\ud1f4\ud1f5\ud1f6\ud1f7\ud1f8\ud1f9\ud1fa\ud1fb\ud1fc\ud1fd\ud1fe\ud1ff\ud200\ud201\ud202\ud203\ud204\ud205\ud206\ud207\ud208\ud209\ud20a\ud20b\ud20c\ud20d\ud20e\ud20f\ud210\ud211\ud212\ud213\ud214\ud215\ud216\ud217\ud218\ud219\ud21a\ud21b\ud21c\ud21d\ud21e\ud21f\ud220\ud221\ud222\ud223\ud224\ud225\ud226\ud227\ud228\ud229\ud22a\ud22b\ud22c\ud22d\ud22e\ud22f\ud230\ud231\ud232\ud233\ud234\ud235\ud236\ud237\ud238\ud239\ud23a\ud23b\ud23c\ud23d\ud23e\ud23f\ud240\ud241\ud242\ud243\ud244\ud245\ud246\ud247\ud248\ud249\ud24a\ud24b\ud24c\ud24d\ud24e\ud24f\ud250\ud251\ud252\ud253\ud254\ud255\ud256\ud257\ud258\ud259\ud25a\ud25b\ud25c\ud25d\ud25e\ud25f\ud260\ud261\ud262\ud263\ud264\ud265\ud266\ud267\ud268\ud269\ud26a\ud26b\ud26c\ud26d\ud26e\ud26f\ud270\ud271\ud272\ud273\ud274\ud275\ud276\ud277\ud278\ud279\ud27a\ud27b\ud27c\ud27d\ud27e\ud27f\ud280\ud281\ud282\ud283\ud284\ud285\ud286\ud287\ud288\ud289\ud28a\ud28b\ud28c\ud28d\ud28e\ud28f\ud290\ud291\ud292\ud293\ud294\ud295\ud296\ud297\ud298\ud299\ud29a\ud29b\ud29c\ud29d\ud29e\ud29f\ud2a0\ud2a1\ud2a2\ud2a3\ud2a4\ud2a5\ud2a6\ud2a7\ud2a8\ud2a9\ud2aa\ud2ab\ud2ac\ud2ad\ud2ae\ud2af\ud2b0\ud2b1\ud2b2\ud2b3\ud2b4\ud2b5\ud2b6\ud2b7\ud2b8\ud2b9\ud2ba\ud2bb\ud2bc\ud2bd\ud2be\ud2bf\ud2c0\ud2c1\ud2c2\ud2c3\ud2c4\ud2c5\ud2c6\ud2c7\ud2c8\ud2c9\ud2ca\ud2cb\ud2cc\ud2cd\ud2ce\ud2cf\ud2d0\ud2d1\ud2d2\ud2d3\ud2d4\ud2d5\ud2d6\ud2d7\ud2d8\ud2d9\ud2da\ud2db\ud2dc\ud2dd\ud2de\ud2df\ud2e0\ud2e1\ud2e2\ud2e3\ud2e4\ud2e5\ud2e6\ud2e7\ud2e8\ud2e9\ud2ea\ud2eb\ud2ec\ud2ed\ud2ee\ud2ef\ud2f0\ud2f1\ud2f2\ud2f3\ud2f4\ud2f5\ud2f6\ud2f7\ud2f8\ud2f9\ud2fa\ud2fb\ud2fc\ud2fd\ud2fe\ud2ff\ud300\ud301\ud302\ud303\ud304\ud305\ud306\ud307\ud308\ud309\ud30a\ud30b\ud30c\ud30d\ud30e\ud30f\ud310\ud311\ud312\ud313\ud314\ud315\ud316\ud317\ud318\ud319\ud31a\ud31b\ud31c\ud31d\ud31e\ud31f\ud320\ud321\ud322\ud323\ud324\ud325\ud326\ud327\ud328\ud329\ud32a\ud32b\ud32c\ud32d\ud32e\ud32f\ud330\ud331\ud332\ud333\ud334\ud335\ud336\ud337\ud338\ud339\ud33a\ud33b\ud33c\ud33d\ud33e\ud33f\ud340\ud341\ud342\ud343\ud344\ud345\ud346\ud347\ud348\ud349\ud34a\ud34b\ud34c\ud34d\ud34e\ud34f\ud350\ud351\ud352\ud353\ud354\ud355\ud356\ud357\ud358\ud359\ud35a\ud35b\ud35c\ud35d\ud35e\ud35f\ud360\ud361\ud362\ud363\ud364\ud365\ud366\ud367\ud368\ud369\ud36a\ud36b\ud36c\ud36d\ud36e\ud36f\ud370\ud371\ud372\ud373\ud374\ud375\ud376\ud377\ud378\ud379\ud37a\ud37b\ud37c\ud37d\ud37e\ud37f\ud380\ud381\ud382\ud383\ud384\ud385\ud386\ud387\ud388\ud389\ud38a\ud38b\ud38c\ud38d\ud38e\ud38f\ud390\ud391\ud392\ud393\ud394\ud395\ud396\ud397\ud398\ud399\ud39a\ud39b\ud39c\ud39d\ud39e\ud39f\ud3a0\ud3a1\ud3a2\ud3a3\ud3a4\ud3a5\ud3a6\ud3a7\ud3a8\ud3a9\ud3aa\ud3ab\ud3ac\ud3ad\ud3ae\ud3af\ud3b0\ud3b1\ud3b2\ud3b3\ud3b4\ud3b5\ud3b6\ud3b7\ud3b8\ud3b9\ud3ba\ud3bb\ud3bc\ud3bd\ud3be\ud3bf\ud3c0\ud3c1\ud3c2\ud3c3\ud3c4\ud3c5\ud3c6\ud3c7\ud3c8\ud3c9\ud3ca\ud3cb\ud3cc\ud3cd\ud3ce\ud3cf\ud3d0\ud3d1\ud3d2\ud3d3\ud3d4\ud3d5\ud3d6\ud3d7\ud3d8\ud3d9\ud3da\ud3db\ud3dc\ud3dd\ud3de\ud3df\ud3e0\ud3e1\ud3e2\ud3e3\ud3e4\ud3e5\ud3e6\ud3e7\ud3e8\ud3e9\ud3ea\ud3eb\ud3ec\ud3ed\ud3ee\ud3ef\ud3f0\ud3f1\ud3f2\ud3f3\ud3f4\ud3f5\ud3f6\ud3f7\ud3f8\ud3f9\ud3fa\ud3fb\ud3fc\ud3fd\ud3fe\ud3ff\ud400\ud401\ud402\ud403\ud404\ud405\ud406\ud407\ud408\ud409\ud40a\ud40b\ud40c\ud40d\ud40e\ud40f\ud410\ud411\ud412\ud413\ud414\ud415\ud416\ud417\ud418\ud419\ud41a\ud41b\ud41c\ud41d\ud41e\ud41f\ud420\ud421\ud422\ud423\ud424\ud425\ud426\ud427\ud428\ud429\ud42a\ud42b\ud42c\ud42d\ud42e\ud42f\ud430\ud431\ud432\ud433\ud434\ud435\ud436\ud437\ud438\ud439\ud43a\ud43b\ud43c\ud43d\ud43e\ud43f\ud440\ud441\ud442\ud443\ud444\ud445\ud446\ud447\ud448\ud449\ud44a\ud44b\ud44c\ud44d\ud44e\ud44f\ud450\ud451\ud452\ud453\ud454\ud455\ud456\ud457\ud458\ud459\ud45a\ud45b\ud45c\ud45d\ud45e\ud45f\ud460\ud461\ud462\ud463\ud464\ud465\ud466\ud467\ud468\ud469\ud46a\ud46b\ud46c\ud46d\ud46e\ud46f\ud470\ud471\ud472\ud473\ud474\ud475\ud476\ud477\ud478\ud479\ud47a\ud47b\ud47c\ud47d\ud47e\ud47f\ud480\ud481\ud482\ud483\ud484\ud485\ud486\ud487\ud488\ud489\ud48a\ud48b\ud48c\ud48d\ud48e\ud48f\ud490\ud491\ud492\ud493\ud494\ud495\ud496\ud497\ud498\ud499\ud49a\ud49b\ud49c\ud49d\ud49e\ud49f\ud4a0\ud4a1\ud4a2\ud4a3\ud4a4\ud4a5\ud4a6\ud4a7\ud4a8\ud4a9\ud4aa\ud4ab\ud4ac\ud4ad\ud4ae\ud4af\ud4b0\ud4b1\ud4b2\ud4b3\ud4b4\ud4b5\ud4b6\ud4b7\ud4b8\ud4b9\ud4ba\ud4bb\ud4bc\ud4bd\ud4be\ud4bf\ud4c0\ud4c1\ud4c2\ud4c3\ud4c4\ud4c5\ud4c6\ud4c7\ud4c8\ud4c9\ud4ca\ud4cb\ud4cc\ud4cd\ud4ce\ud4cf\ud4d0\ud4d1\ud4d2\ud4d3\ud4d4\ud4d5\ud4d6\ud4d7\ud4d8\ud4d9\ud4da\ud4db\ud4dc\ud4dd\ud4de\ud4df\ud4e0\ud4e1\ud4e2\ud4e3\ud4e4\ud4e5\ud4e6\ud4e7\ud4e8\ud4e9\ud4ea\ud4eb\ud4ec\ud4ed\ud4ee\ud4ef\ud4f0\ud4f1\ud4f2\ud4f3\ud4f4\ud4f5\ud4f6\ud4f7\ud4f8\ud4f9\ud4fa\ud4fb\ud4fc\ud4fd\ud4fe\ud4ff\ud500\ud501\ud502\ud503\ud504\ud505\ud506\ud507\ud508\ud509\ud50a\ud50b\ud50c\ud50d\ud50e\ud50f\ud510\ud511\ud512\ud513\ud514\ud515\ud516\ud517\ud518\ud519\ud51a\ud51b\ud51c\ud51d\ud51e\ud51f\ud520\ud521\ud522\ud523\ud524\ud525\ud526\ud527\ud528\ud529\ud52a\ud52b\ud52c\ud52d\ud52e\ud52f\ud530\ud531\ud532\ud533\ud534\ud535\ud536\ud537\ud538\ud539\ud53a\ud53b\ud53c\ud53d\ud53e\ud53f\ud540\ud541\ud542\ud543\ud544\ud545\ud546\ud547\ud548\ud549\ud54a\ud54b\ud54c\ud54d\ud54e\ud54f\ud550\ud551\ud552\ud553\ud554\ud555\ud556\ud557\ud558\ud559\ud55a\ud55b\ud55c\ud55d\ud55e\ud55f\ud560\ud561\ud562\ud563\ud564\ud565\ud566\ud567\ud568\ud569\ud56a\ud56b\ud56c\ud56d\ud56e\ud56f\ud570\ud571\ud572\ud573\ud574\ud575\ud576\ud577\ud578\ud579\ud57a\ud57b\ud57c\ud57d\ud57e\ud57f\ud580\ud581\ud582\ud583\ud584\ud585\ud586\ud587\ud588\ud589\ud58a\ud58b\ud58c\ud58d\ud58e\ud58f\ud590\ud591\ud592\ud593\ud594\ud595\ud596\ud597\ud598\ud599\ud59a\ud59b\ud59c\ud59d\ud59e\ud59f\ud5a0\ud5a1\ud5a2\ud5a3\ud5a4\ud5a5\ud5a6\ud5a7\ud5a8\ud5a9\ud5aa\ud5ab\ud5ac\ud5ad\ud5ae\ud5af\ud5b0\ud5b1\ud5b2\ud5b3\ud5b4\ud5b5\ud5b6\ud5b7\ud5b8\ud5b9\ud5ba\ud5bb\ud5bc\ud5bd\ud5be\ud5bf\ud5c0\ud5c1\ud5c2\ud5c3\ud5c4\ud5c5\ud5c6\ud5c7\ud5c8\ud5c9\ud5ca\ud5cb\ud5cc\ud5cd\ud5ce\ud5cf\ud5d0\ud5d1\ud5d2\ud5d3\ud5d4\ud5d5\ud5d6\ud5d7\ud5d8\ud5d9\ud5da\ud5db\ud5dc\ud5dd\ud5de\ud5df\ud5e0\ud5e1\ud5e2\ud5e3\ud5e4\ud5e5\ud5e6\ud5e7\ud5e8\ud5e9\ud5ea\ud5eb\ud5ec\ud5ed\ud5ee\ud5ef\ud5f0\ud5f1\ud5f2\ud5f3\ud5f4\ud5f5\ud5f6\ud5f7\ud5f8\ud5f9\ud5fa\ud5fb\ud5fc\ud5fd\ud5fe\ud5ff\ud600\ud601\ud602\ud603\ud604\ud605\ud606\ud607\ud608\ud609\ud60a\ud60b\ud60c\ud60d\ud60e\ud60f\ud610\ud611\ud612\ud613\ud614\ud615\ud616\ud617\ud618\ud619\ud61a\ud61b\ud61c\ud61d\ud61e\ud61f\ud620\ud621\ud622\ud623\ud624\ud625\ud626\ud627\ud628\ud629\ud62a\ud62b\ud62c\ud62d\ud62e\ud62f\ud630\ud631\ud632\ud633\ud634\ud635\ud636\ud637\ud638\ud639\ud63a\ud63b\ud63c\ud63d\ud63e\ud63f\ud640\ud641\ud642\ud643\ud644\ud645\ud646\ud647\ud648\ud649\ud64a\ud64b\ud64c\ud64d\ud64e\ud64f\ud650\ud651\ud652\ud653\ud654\ud655\ud656\ud657\ud658\ud659\ud65a\ud65b\ud65c\ud65d\ud65e\ud65f\ud660\ud661\ud662\ud663\ud664\ud665\ud666\ud667\ud668\ud669\ud66a\ud66b\ud66c\ud66d\ud66e\ud66f\ud670\ud671\ud672\ud673\ud674\ud675\ud676\ud677\ud678\ud679\ud67a\ud67b\ud67c\ud67d\ud67e\ud67f\ud680\ud681\ud682\ud683\ud684\ud685\ud686\ud687\ud688\ud689\ud68a\ud68b\ud68c\ud68d\ud68e\ud68f\ud690\ud691\ud692\ud693\ud694\ud695\ud696\ud697\ud698\ud699\ud69a\ud69b\ud69c\ud69d\ud69e\ud69f\ud6a0\ud6a1\ud6a2\ud6a3\ud6a4\ud6a5\ud6a6\ud6a7\ud6a8\ud6a9\ud6aa\ud6ab\ud6ac\ud6ad\ud6ae\ud6af\ud6b0\ud6b1\ud6b2\ud6b3\ud6b4\ud6b5\ud6b6\ud6b7\ud6b8\ud6b9\ud6ba\ud6bb\ud6bc\ud6bd\ud6be\ud6bf\ud6c0\ud6c1\ud6c2\ud6c3\ud6c4\ud6c5\ud6c6\ud6c7\ud6c8\ud6c9\ud6ca\ud6cb\ud6cc\ud6cd\ud6ce\ud6cf\ud6d0\ud6d1\ud6d2\ud6d3\ud6d4\ud6d5\ud6d6\ud6d7\ud6d8\ud6d9\ud6da\ud6db\ud6dc\ud6dd\ud6de\ud6df\ud6e0\ud6e1\ud6e2\ud6e3\ud6e4\ud6e5\ud6e6\ud6e7\ud6e8\ud6e9\ud6ea\ud6eb\ud6ec\ud6ed\ud6ee\ud6ef\ud6f0\ud6f1\ud6f2\ud6f3\ud6f4\ud6f5\ud6f6\ud6f7\ud6f8\ud6f9\ud6fa\ud6fb\ud6fc\ud6fd\ud6fe\ud6ff\ud700\ud701\ud702\ud703\ud704\ud705\ud706\ud707\ud708\ud709\ud70a\ud70b\ud70c\ud70d\ud70e\ud70f\ud710\ud711\ud712\ud713\ud714\ud715\ud716\ud717\ud718\ud719\ud71a\ud71b\ud71c\ud71d\ud71e\ud71f\ud720\ud721\ud722\ud723\ud724\ud725\ud726\ud727\ud728\ud729\ud72a\ud72b\ud72c\ud72d\ud72e\ud72f\ud730\ud731\ud732\ud733\ud734\ud735\ud736\ud737\ud738\ud739\ud73a\ud73b\ud73c\ud73d\ud73e\ud73f\ud740\ud741\ud742\ud743\ud744\ud745\ud746\ud747\ud748\ud749\ud74a\ud74b\ud74c\ud74d\ud74e\ud74f\ud750\ud751\ud752\ud753\ud754\ud755\ud756\ud757\ud758\ud759\ud75a\ud75b\ud75c\ud75d\ud75e\ud75f\ud760\ud761\ud762\ud763\ud764\ud765\ud766\ud767\ud768\ud769\ud76a\ud76b\ud76c\ud76d\ud76e\ud76f\ud770\ud771\ud772\ud773\ud774\ud775\ud776\ud777\ud778\ud779\ud77a\ud77b\ud77c\ud77d\ud77e\ud77f\ud780\ud781\ud782\ud783\ud784\ud785\ud786\ud787\ud788\ud789\ud78a\ud78b\ud78c\ud78d\ud78e\ud78f\ud790\ud791\ud792\ud793\ud794\ud795\ud796\ud797\ud798\ud799\ud79a\ud79b\ud79c\ud79d\ud79e\ud79f\ud7a0\ud7a1\ud7a2\ud7a3\uf900\uf901\uf902\uf903\uf904\uf905\uf906\uf907\uf908\uf909\uf90a\uf90b\uf90c\uf90d\uf90e\uf90f\uf910\uf911\uf912\uf913\uf914\uf915\uf916\uf917\uf918\uf919\uf91a\uf91b\uf91c\uf91d\uf91e\uf91f\uf920\uf921\uf922\uf923\uf924\uf925\uf926\uf927\uf928\uf929\uf92a\uf92b\uf92c\uf92d\uf92e\uf92f\uf930\uf931\uf932\uf933\uf934\uf935\uf936\uf937\uf938\uf939\uf93a\uf93b\uf93c\uf93d\uf93e\uf93f\uf940\uf941\uf942\uf943\uf944\uf945\uf946\uf947\uf948\uf949\uf94a\uf94b\uf94c\uf94d\uf94e\uf94f\uf950\uf951\uf952\uf953\uf954\uf955\uf956\uf957\uf958\uf959\uf95a\uf95b\uf95c\uf95d\uf95e\uf95f\uf960\uf961\uf962\uf963\uf964\uf965\uf966\uf967\uf968\uf969\uf96a\uf96b\uf96c\uf96d\uf96e\uf96f\uf970\uf971\uf972\uf973\uf974\uf975\uf976\uf977\uf978\uf979\uf97a\uf97b\uf97c\uf97d\uf97e\uf97f\uf980\uf981\uf982\uf983\uf984\uf985\uf986\uf987\uf988\uf989\uf98a\uf98b\uf98c\uf98d\uf98e\uf98f\uf990\uf991\uf992\uf993\uf994\uf995\uf996\uf997\uf998\uf999\uf99a\uf99b\uf99c\uf99d\uf99e\uf99f\uf9a0\uf9a1\uf9a2\uf9a3\uf9a4\uf9a5\uf9a6\uf9a7\uf9a8\uf9a9\uf9aa\uf9ab\uf9ac\uf9ad\uf9ae\uf9af\uf9b0\uf9b1\uf9b2\uf9b3\uf9b4\uf9b5\uf9b6\uf9b7\uf9b8\uf9b9\uf9ba\uf9bb\uf9bc\uf9bd\uf9be\uf9bf\uf9c0\uf9c1\uf9c2\uf9c3\uf9c4\uf9c5\uf9c6\uf9c7\uf9c8\uf9c9\uf9ca\uf9cb\uf9cc\uf9cd\uf9ce\uf9cf\uf9d0\uf9d1\uf9d2\uf9d3\uf9d4\uf9d5\uf9d6\uf9d7\uf9d8\uf9d9\uf9da\uf9db\uf9dc\uf9dd\uf9de\uf9df\uf9e0\uf9e1\uf9e2\uf9e3\uf9e4\uf9e5\uf9e6\uf9e7\uf9e8\uf9e9\uf9ea\uf9eb\uf9ec\uf9ed\uf9ee\uf9ef\uf9f0\uf9f1\uf9f2\uf9f3\uf9f4\uf9f5\uf9f6\uf9f7\uf9f8\uf9f9\uf9fa\uf9fb\uf9fc\uf9fd\uf9fe\uf9ff\ufa00\ufa01\ufa02\ufa03\ufa04\ufa05\ufa06\ufa07\ufa08\ufa09\ufa0a\ufa0b\ufa0c\ufa0d\ufa0e\ufa0f\ufa10\ufa11\ufa12\ufa13\ufa14\ufa15\ufa16\ufa17\ufa18\ufa19\ufa1a\ufa1b\ufa1c\ufa1d\ufa1e\ufa1f\ufa20\ufa21\ufa22\ufa23\ufa24\ufa25\ufa26\ufa27\ufa28\ufa29\ufa2a\ufa2b\ufa2c\ufa2d\ufa30\ufa31\ufa32\ufa33\ufa34\ufa35\ufa36\ufa37\ufa38\ufa39\ufa3a\ufa3b\ufa3c\ufa3d\ufa3e\ufa3f\ufa40\ufa41\ufa42\ufa43\ufa44\ufa45\ufa46\ufa47\ufa48\ufa49\ufa4a\ufa4b\ufa4c\ufa4d\ufa4e\ufa4f\ufa50\ufa51\ufa52\ufa53\ufa54\ufa55\ufa56\ufa57\ufa58\ufa59\ufa5a\ufa5b\ufa5c\ufa5d\ufa5e\ufa5f\ufa60\ufa61\ufa62\ufa63\ufa64\ufa65\ufa66\ufa67\ufa68\ufa69\ufa6a\ufa70\ufa71\ufa72\ufa73\ufa74\ufa75\ufa76\ufa77\ufa78\ufa79\ufa7a\ufa7b\ufa7c\ufa7d\ufa7e\ufa7f\ufa80\ufa81\ufa82\ufa83\ufa84\ufa85\ufa86\ufa87\ufa88\ufa89\ufa8a\ufa8b\ufa8c\ufa8d\ufa8e\ufa8f\ufa90\ufa91\ufa92\ufa93\ufa94\ufa95\ufa96\ufa97\ufa98\ufa99\ufa9a\ufa9b\ufa9c\ufa9d\ufa9e\ufa9f\ufaa0\ufaa1\ufaa2\ufaa3\ufaa4\ufaa5\ufaa6\ufaa7\ufaa8\ufaa9\ufaaa\ufaab\ufaac\ufaad\ufaae\ufaaf\ufab0\ufab1\ufab2\ufab3\ufab4\ufab5\ufab6\ufab7\ufab8\ufab9\ufaba\ufabb\ufabc\ufabd\ufabe\ufabf\ufac0\ufac1\ufac2\ufac3\ufac4\ufac5\ufac6\ufac7\ufac8\ufac9\ufaca\ufacb\ufacc\ufacd\uface\ufacf\ufad0\ufad1\ufad2\ufad3\ufad4\ufad5\ufad6\ufad7\ufad8\ufad9\ufb1d\ufb1f\ufb20\ufb21\ufb22\ufb23\ufb24\ufb25\ufb26\ufb27\ufb28\ufb2a\ufb2b\ufb2c\ufb2d\ufb2e\ufb2f\ufb30\ufb31\ufb32\ufb33\ufb34\ufb35\ufb36\ufb38\ufb39\ufb3a\ufb3b\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46\ufb47\ufb48\ufb49\ufb4a\ufb4b\ufb4c\ufb4d\ufb4e\ufb4f\ufb50\ufb51\ufb52\ufb53\ufb54\ufb55\ufb56\ufb57\ufb58\ufb59\ufb5a\ufb5b\ufb5c\ufb5d\ufb5e\ufb5f\ufb60\ufb61\ufb62\ufb63\ufb64\ufb65\ufb66\ufb67\ufb68\ufb69\ufb6a\ufb6b\ufb6c\ufb6d\ufb6e\ufb6f\ufb70\ufb71\ufb72\ufb73\ufb74\ufb75\ufb76\ufb77\ufb78\ufb79\ufb7a\ufb7b\ufb7c\ufb7d\ufb7e\ufb7f\ufb80\ufb81\ufb82\ufb83\ufb84\ufb85\ufb86\ufb87\ufb88\ufb89\ufb8a\ufb8b\ufb8c\ufb8d\ufb8e\ufb8f\ufb90\ufb91\ufb92\ufb93\ufb94\ufb95\ufb96\ufb97\ufb98\ufb99\ufb9a\ufb9b\ufb9c\ufb9d\ufb9e\ufb9f\ufba0\ufba1\ufba2\ufba3\ufba4\ufba5\ufba6\ufba7\ufba8\ufba9\ufbaa\ufbab\ufbac\ufbad\ufbae\ufbaf\ufbb0\ufbb1\ufbd3\ufbd4\ufbd5\ufbd6\ufbd7\ufbd8\ufbd9\ufbda\ufbdb\ufbdc\ufbdd\ufbde\ufbdf\ufbe0\ufbe1\ufbe2\ufbe3\ufbe4\ufbe5\ufbe6\ufbe7\ufbe8\ufbe9\ufbea\ufbeb\ufbec\ufbed\ufbee\ufbef\ufbf0\ufbf1\ufbf2\ufbf3\ufbf4\ufbf5\ufbf6\ufbf7\ufbf8\ufbf9\ufbfa\ufbfb\ufbfc\ufbfd\ufbfe\ufbff\ufc00\ufc01\ufc02\ufc03\ufc04\ufc05\ufc06\ufc07\ufc08\ufc09\ufc0a\ufc0b\ufc0c\ufc0d\ufc0e\ufc0f\ufc10\ufc11\ufc12\ufc13\ufc14\ufc15\ufc16\ufc17\ufc18\ufc19\ufc1a\ufc1b\ufc1c\ufc1d\ufc1e\ufc1f\ufc20\ufc21\ufc22\ufc23\ufc24\ufc25\ufc26\ufc27\ufc28\ufc29\ufc2a\ufc2b\ufc2c\ufc2d\ufc2e\ufc2f\ufc30\ufc31\ufc32\ufc33\ufc34\ufc35\ufc36\ufc37\ufc38\ufc39\ufc3a\ufc3b\ufc3c\ufc3d\ufc3e\ufc3f\ufc40\ufc41\ufc42\ufc43\ufc44\ufc45\ufc46\ufc47\ufc48\ufc49\ufc4a\ufc4b\ufc4c\ufc4d\ufc4e\ufc4f\ufc50\ufc51\ufc52\ufc53\ufc54\ufc55\ufc56\ufc57\ufc58\ufc59\ufc5a\ufc5b\ufc5c\ufc5d\ufc5e\ufc5f\ufc60\ufc61\ufc62\ufc63\ufc64\ufc65\ufc66\ufc67\ufc68\ufc69\ufc6a\ufc6b\ufc6c\ufc6d\ufc6e\ufc6f\ufc70\ufc71\ufc72\ufc73\ufc74\ufc75\ufc76\ufc77\ufc78\ufc79\ufc7a\ufc7b\ufc7c\ufc7d\ufc7e\ufc7f\ufc80\ufc81\ufc82\ufc83\ufc84\ufc85\ufc86\ufc87\ufc88\ufc89\ufc8a\ufc8b\ufc8c\ufc8d\ufc8e\ufc8f\ufc90\ufc91\ufc92\ufc93\ufc94\ufc95\ufc96\ufc97\ufc98\ufc99\ufc9a\ufc9b\ufc9c\ufc9d\ufc9e\ufc9f\ufca0\ufca1\ufca2\ufca3\ufca4\ufca5\ufca6\ufca7\ufca8\ufca9\ufcaa\ufcab\ufcac\ufcad\ufcae\ufcaf\ufcb0\ufcb1\ufcb2\ufcb3\ufcb4\ufcb5\ufcb6\ufcb7\ufcb8\ufcb9\ufcba\ufcbb\ufcbc\ufcbd\ufcbe\ufcbf\ufcc0\ufcc1\ufcc2\ufcc3\ufcc4\ufcc5\ufcc6\ufcc7\ufcc8\ufcc9\ufcca\ufccb\ufccc\ufccd\ufcce\ufccf\ufcd0\ufcd1\ufcd2\ufcd3\ufcd4\ufcd5\ufcd6\ufcd7\ufcd8\ufcd9\ufcda\ufcdb\ufcdc\ufcdd\ufcde\ufcdf\ufce0\ufce1\ufce2\ufce3\ufce4\ufce5\ufce6\ufce7\ufce8\ufce9\ufcea\ufceb\ufcec\ufced\ufcee\ufcef\ufcf0\ufcf1\ufcf2\ufcf3\ufcf4\ufcf5\ufcf6\ufcf7\ufcf8\ufcf9\ufcfa\ufcfb\ufcfc\ufcfd\ufcfe\ufcff\ufd00\ufd01\ufd02\ufd03\ufd04\ufd05\ufd06\ufd07\ufd08\ufd09\ufd0a\ufd0b\ufd0c\ufd0d\ufd0e\ufd0f\ufd10\ufd11\ufd12\ufd13\ufd14\ufd15\ufd16\ufd17\ufd18\ufd19\ufd1a\ufd1b\ufd1c\ufd1d\ufd1e\ufd1f\ufd20\ufd21\ufd22\ufd23\ufd24\ufd25\ufd26\ufd27\ufd28\ufd29\ufd2a\ufd2b\ufd2c\ufd2d\ufd2e\ufd2f\ufd30\ufd31\ufd32\ufd33\ufd34\ufd35\ufd36\ufd37\ufd38\ufd39\ufd3a\ufd3b\ufd3c\ufd3d\ufd50\ufd51\ufd52\ufd53\ufd54\ufd55\ufd56\ufd57\ufd58\ufd59\ufd5a\ufd5b\ufd5c\ufd5d\ufd5e\ufd5f\ufd60\ufd61\ufd62\ufd63\ufd64\ufd65\ufd66\ufd67\ufd68\ufd69\ufd6a\ufd6b\ufd6c\ufd6d\ufd6e\ufd6f\ufd70\ufd71\ufd72\ufd73\ufd74\ufd75\ufd76\ufd77\ufd78\ufd79\ufd7a\ufd7b\ufd7c\ufd7d\ufd7e\ufd7f\ufd80\ufd81\ufd82\ufd83\ufd84\ufd85\ufd86\ufd87\ufd88\ufd89\ufd8a\ufd8b\ufd8c\ufd8d\ufd8e\ufd8f\ufd92\ufd93\ufd94\ufd95\ufd96\ufd97\ufd98\ufd99\ufd9a\ufd9b\ufd9c\ufd9d\ufd9e\ufd9f\ufda0\ufda1\ufda2\ufda3\ufda4\ufda5\ufda6\ufda7\ufda8\ufda9\ufdaa\ufdab\ufdac\ufdad\ufdae\ufdaf\ufdb0\ufdb1\ufdb2\ufdb3\ufdb4\ufdb5\ufdb6\ufdb7\ufdb8\ufdb9\ufdba\ufdbb\ufdbc\ufdbd\ufdbe\ufdbf\ufdc0\ufdc1\ufdc2\ufdc3\ufdc4\ufdc5\ufdc6\ufdc7\ufdf0\ufdf1\ufdf2\ufdf3\ufdf4\ufdf5\ufdf6\ufdf7\ufdf8\ufdf9\ufdfa\ufdfb\ufe70\ufe71\ufe72\ufe73\ufe74\ufe76\ufe77\ufe78\ufe79\ufe7a\ufe7b\ufe7c\ufe7d\ufe7e\ufe7f\ufe80\ufe81\ufe82\ufe83\ufe84\ufe85\ufe86\ufe87\ufe88\ufe89\ufe8a\ufe8b\ufe8c\ufe8d\ufe8e\ufe8f\ufe90\ufe91\ufe92\ufe93\ufe94\ufe95\ufe96\ufe97\ufe98\ufe99\ufe9a\ufe9b\ufe9c\ufe9d\ufe9e\ufe9f\ufea0\ufea1\ufea2\ufea3\ufea4\ufea5\ufea6\ufea7\ufea8\ufea9\ufeaa\ufeab\ufeac\ufead\ufeae\ufeaf\ufeb0\ufeb1\ufeb2\ufeb3\ufeb4\ufeb5\ufeb6\ufeb7\ufeb8\ufeb9\ufeba\ufebb\ufebc\ufebd\ufebe\ufebf\ufec0\ufec1\ufec2\ufec3\ufec4\ufec5\ufec6\ufec7\ufec8\ufec9\ufeca\ufecb\ufecc\ufecd\ufece\ufecf\ufed0\ufed1\ufed2\ufed3\ufed4\ufed5\ufed6\ufed7\ufed8\ufed9\ufeda\ufedb\ufedc\ufedd\ufede\ufedf\ufee0\ufee1\ufee2\ufee3\ufee4\ufee5\ufee6\ufee7\ufee8\ufee9\ufeea\ufeeb\ufeec\ufeed\ufeee\ufeef\ufef0\ufef1\ufef2\ufef3\ufef4\ufef5\ufef6\ufef7\ufef8\ufef9\ufefa\ufefb\ufefc\uff66\uff67\uff68\uff69\uff6a\uff6b\uff6c\uff6d\uff6e\uff6f\uff71\uff72\uff73\uff74\uff75\uff76\uff77\uff78\uff79\uff7a\uff7b\uff7c\uff7d\uff7e\uff7f\uff80\uff81\uff82\uff83\uff84\uff85\uff86\uff87\uff88\uff89\uff8a\uff8b\uff8c\uff8d\uff8e\uff8f\uff90\uff91\uff92\uff93\uff94\uff95\uff96\uff97\uff98\uff99\uff9a\uff9b\uff9c\uff9d\uffa0\uffa1\uffa2\uffa3\uffa4\uffa5\uffa6\uffa7\uffa8\uffa9\uffaa\uffab\uffac\uffad\uffae\uffaf\uffb0\uffb1\uffb2\uffb3\uffb4\uffb5\uffb6\uffb7\uffb8\uffb9\uffba\uffbb\uffbc\uffbd\uffbe\uffc2\uffc3\uffc4\uffc5\uffc6\uffc7\uffca\uffcb\uffcc\uffcd\uffce\uffcf\uffd2\uffd3\uffd4\uffd5\uffd6\uffd7\uffda\uffdb\uffdc' + +Lt = u'\u01c5\u01c8\u01cb\u01f2\u1f88\u1f89\u1f8a\u1f8b\u1f8c\u1f8d\u1f8e\u1f8f\u1f98\u1f99\u1f9a\u1f9b\u1f9c\u1f9d\u1f9e\u1f9f\u1fa8\u1fa9\u1faa\u1fab\u1fac\u1fad\u1fae\u1faf\u1fbc\u1fcc\u1ffc' + +Lu = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178\u0179\u017b\u017d\u0181\u0182\u0184\u0186\u0187\u0189\u018a\u018b\u018e\u018f\u0190\u0191\u0193\u0194\u0196\u0197\u0198\u019c\u019d\u019f\u01a0\u01a2\u01a4\u01a6\u01a7\u01a9\u01ac\u01ae\u01af\u01b1\u01b2\u01b3\u01b5\u01b7\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6\u01f7\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a\u023b\u023d\u023e\u0241\u0386\u0388\u0389\u038a\u038c\u038e\u038f\u0391\u0392\u0393\u0394\u0395\u0396\u0397\u0398\u0399\u039a\u039b\u039c\u039d\u039e\u039f\u03a0\u03a1\u03a3\u03a4\u03a5\u03a6\u03a7\u03a8\u03a9\u03aa\u03ab\u03d2\u03d3\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9\u03fa\u03fd\u03fe\u03ff\u0400\u0401\u0402\u0403\u0404\u0405\u0406\u0407\u0408\u0409\u040a\u040b\u040c\u040d\u040e\u040f\u0410\u0411\u0412\u0413\u0414\u0415\u0416\u0417\u0418\u0419\u041a\u041b\u041c\u041d\u041e\u041f\u0420\u0421\u0422\u0423\u0424\u0425\u0426\u0427\u0428\u0429\u042a\u042b\u042c\u042d\u042e\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0531\u0532\u0533\u0534\u0535\u0536\u0537\u0538\u0539\u053a\u053b\u053c\u053d\u053e\u053f\u0540\u0541\u0542\u0543\u0544\u0545\u0546\u0547\u0548\u0549\u054a\u054b\u054c\u054d\u054e\u054f\u0550\u0551\u0552\u0553\u0554\u0555\u0556\u10a0\u10a1\u10a2\u10a3\u10a4\u10a5\u10a6\u10a7\u10a8\u10a9\u10aa\u10ab\u10ac\u10ad\u10ae\u10af\u10b0\u10b1\u10b2\u10b3\u10b4\u10b5\u10b6\u10b7\u10b8\u10b9\u10ba\u10bb\u10bc\u10bd\u10be\u10bf\u10c0\u10c1\u10c2\u10c3\u10c4\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1f08\u1f09\u1f0a\u1f0b\u1f0c\u1f0d\u1f0e\u1f0f\u1f18\u1f19\u1f1a\u1f1b\u1f1c\u1f1d\u1f28\u1f29\u1f2a\u1f2b\u1f2c\u1f2d\u1f2e\u1f2f\u1f38\u1f39\u1f3a\u1f3b\u1f3c\u1f3d\u1f3e\u1f3f\u1f48\u1f49\u1f4a\u1f4b\u1f4c\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68\u1f69\u1f6a\u1f6b\u1f6c\u1f6d\u1f6e\u1f6f\u1fb8\u1fb9\u1fba\u1fbb\u1fc8\u1fc9\u1fca\u1fcb\u1fd8\u1fd9\u1fda\u1fdb\u1fe8\u1fe9\u1fea\u1feb\u1fec\u1ff8\u1ff9\u1ffa\u1ffb\u2102\u2107\u210b\u210c\u210d\u2110\u2111\u2112\u2115\u2119\u211a\u211b\u211c\u211d\u2124\u2126\u2128\u212a\u212b\u212c\u212d\u2130\u2131\u2133\u213e\u213f\u2145\u2c00\u2c01\u2c02\u2c03\u2c04\u2c05\u2c06\u2c07\u2c08\u2c09\u2c0a\u2c0b\u2c0c\u2c0d\u2c0e\u2c0f\u2c10\u2c11\u2c12\u2c13\u2c14\u2c15\u2c16\u2c17\u2c18\u2c19\u2c1a\u2c1b\u2c1c\u2c1d\u2c1e\u2c1f\u2c20\u2c21\u2c22\u2c23\u2c24\u2c25\u2c26\u2c27\u2c28\u2c29\u2c2a\u2c2b\u2c2c\u2c2d\u2c2e\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\uff21\uff22\uff23\uff24\uff25\uff26\uff27\uff28\uff29\uff2a\uff2b\uff2c\uff2d\uff2e\uff2f\uff30\uff31\uff32\uff33\uff34\uff35\uff36\uff37\uff38\uff39\uff3a' + +Mc = u'\u0903\u093e\u093f\u0940\u0949\u094a\u094b\u094c\u0982\u0983\u09be\u09bf\u09c0\u09c7\u09c8\u09cb\u09cc\u09d7\u0a03\u0a3e\u0a3f\u0a40\u0a83\u0abe\u0abf\u0ac0\u0ac9\u0acb\u0acc\u0b02\u0b03\u0b3e\u0b40\u0b47\u0b48\u0b4b\u0b4c\u0b57\u0bbe\u0bbf\u0bc1\u0bc2\u0bc6\u0bc7\u0bc8\u0bca\u0bcb\u0bcc\u0bd7\u0c01\u0c02\u0c03\u0c41\u0c42\u0c43\u0c44\u0c82\u0c83\u0cbe\u0cc0\u0cc1\u0cc2\u0cc3\u0cc4\u0cc7\u0cc8\u0cca\u0ccb\u0cd5\u0cd6\u0d02\u0d03\u0d3e\u0d3f\u0d40\u0d46\u0d47\u0d48\u0d4a\u0d4b\u0d4c\u0d57\u0d82\u0d83\u0dcf\u0dd0\u0dd1\u0dd8\u0dd9\u0dda\u0ddb\u0ddc\u0ddd\u0dde\u0ddf\u0df2\u0df3\u0f3e\u0f3f\u0f7f\u102c\u1031\u1038\u1056\u1057\u17b6\u17be\u17bf\u17c0\u17c1\u17c2\u17c3\u17c4\u17c5\u17c7\u17c8\u1923\u1924\u1925\u1926\u1929\u192a\u192b\u1930\u1931\u1933\u1934\u1935\u1936\u1937\u1938\u19b0\u19b1\u19b2\u19b3\u19b4\u19b5\u19b6\u19b7\u19b8\u19b9\u19ba\u19bb\u19bc\u19bd\u19be\u19bf\u19c0\u19c8\u19c9\u1a19\u1a1a\u1a1b\ua802\ua823\ua824\ua827' + +Me = u'\u0488\u0489\u06de\u20dd\u20de\u20df\u20e0\u20e2\u20e3\u20e4' + +Mn = u'\u0300\u0301\u0302\u0303\u0304\u0305\u0306\u0307\u0308\u0309\u030a\u030b\u030c\u030d\u030e\u030f\u0310\u0311\u0312\u0313\u0314\u0315\u0316\u0317\u0318\u0319\u031a\u031b\u031c\u031d\u031e\u031f\u0320\u0321\u0322\u0323\u0324\u0325\u0326\u0327\u0328\u0329\u032a\u032b\u032c\u032d\u032e\u032f\u0330\u0331\u0332\u0333\u0334\u0335\u0336\u0337\u0338\u0339\u033a\u033b\u033c\u033d\u033e\u033f\u0340\u0341\u0342\u0343\u0344\u0345\u0346\u0347\u0348\u0349\u034a\u034b\u034c\u034d\u034e\u034f\u0350\u0351\u0352\u0353\u0354\u0355\u0356\u0357\u0358\u0359\u035a\u035b\u035c\u035d\u035e\u035f\u0360\u0361\u0362\u0363\u0364\u0365\u0366\u0367\u0368\u0369\u036a\u036b\u036c\u036d\u036e\u036f\u0483\u0484\u0485\u0486\u0591\u0592\u0593\u0594\u0595\u0596\u0597\u0598\u0599\u059a\u059b\u059c\u059d\u059e\u059f\u05a0\u05a1\u05a2\u05a3\u05a4\u05a5\u05a6\u05a7\u05a8\u05a9\u05aa\u05ab\u05ac\u05ad\u05ae\u05af\u05b0\u05b1\u05b2\u05b3\u05b4\u05b5\u05b6\u05b7\u05b8\u05b9\u05bb\u05bc\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610\u0611\u0612\u0613\u0614\u0615\u064b\u064c\u064d\u064e\u064f\u0650\u0651\u0652\u0653\u0654\u0655\u0656\u0657\u0658\u0659\u065a\u065b\u065c\u065d\u065e\u0670\u06d6\u06d7\u06d8\u06d9\u06da\u06db\u06dc\u06df\u06e0\u06e1\u06e2\u06e3\u06e4\u06e7\u06e8\u06ea\u06eb\u06ec\u06ed\u0711\u0730\u0731\u0732\u0733\u0734\u0735\u0736\u0737\u0738\u0739\u073a\u073b\u073c\u073d\u073e\u073f\u0740\u0741\u0742\u0743\u0744\u0745\u0746\u0747\u0748\u0749\u074a\u07a6\u07a7\u07a8\u07a9\u07aa\u07ab\u07ac\u07ad\u07ae\u07af\u07b0\u0901\u0902\u093c\u0941\u0942\u0943\u0944\u0945\u0946\u0947\u0948\u094d\u0951\u0952\u0953\u0954\u0962\u0963\u0981\u09bc\u09c1\u09c2\u09c3\u09c4\u09cd\u09e2\u09e3\u0a01\u0a02\u0a3c\u0a41\u0a42\u0a47\u0a48\u0a4b\u0a4c\u0a4d\u0a70\u0a71\u0a81\u0a82\u0abc\u0ac1\u0ac2\u0ac3\u0ac4\u0ac5\u0ac7\u0ac8\u0acd\u0ae2\u0ae3\u0b01\u0b3c\u0b3f\u0b41\u0b42\u0b43\u0b4d\u0b56\u0b82\u0bc0\u0bcd\u0c3e\u0c3f\u0c40\u0c46\u0c47\u0c48\u0c4a\u0c4b\u0c4c\u0c4d\u0c55\u0c56\u0cbc\u0cbf\u0cc6\u0ccc\u0ccd\u0d41\u0d42\u0d43\u0d4d\u0dca\u0dd2\u0dd3\u0dd4\u0dd6\u0e31\u0e34\u0e35\u0e36\u0e37\u0e38\u0e39\u0e3a\u0e47\u0e48\u0e49\u0e4a\u0e4b\u0e4c\u0e4d\u0e4e\u0eb1\u0eb4\u0eb5\u0eb6\u0eb7\u0eb8\u0eb9\u0ebb\u0ebc\u0ec8\u0ec9\u0eca\u0ecb\u0ecc\u0ecd\u0f18\u0f19\u0f35\u0f37\u0f39\u0f71\u0f72\u0f73\u0f74\u0f75\u0f76\u0f77\u0f78\u0f79\u0f7a\u0f7b\u0f7c\u0f7d\u0f7e\u0f80\u0f81\u0f82\u0f83\u0f84\u0f86\u0f87\u0f90\u0f91\u0f92\u0f93\u0f94\u0f95\u0f96\u0f97\u0f99\u0f9a\u0f9b\u0f9c\u0f9d\u0f9e\u0f9f\u0fa0\u0fa1\u0fa2\u0fa3\u0fa4\u0fa5\u0fa6\u0fa7\u0fa8\u0fa9\u0faa\u0fab\u0fac\u0fad\u0fae\u0faf\u0fb0\u0fb1\u0fb2\u0fb3\u0fb4\u0fb5\u0fb6\u0fb7\u0fb8\u0fb9\u0fba\u0fbb\u0fbc\u0fc6\u102d\u102e\u102f\u1030\u1032\u1036\u1037\u1039\u1058\u1059\u135f\u1712\u1713\u1714\u1732\u1733\u1734\u1752\u1753\u1772\u1773\u17b7\u17b8\u17b9\u17ba\u17bb\u17bc\u17bd\u17c6\u17c9\u17ca\u17cb\u17cc\u17cd\u17ce\u17cf\u17d0\u17d1\u17d2\u17d3\u17dd\u180b\u180c\u180d\u18a9\u1920\u1921\u1922\u1927\u1928\u1932\u1939\u193a\u193b\u1a17\u1a18\u1dc0\u1dc1\u1dc2\u1dc3\u20d0\u20d1\u20d2\u20d3\u20d4\u20d5\u20d6\u20d7\u20d8\u20d9\u20da\u20db\u20dc\u20e1\u20e5\u20e6\u20e7\u20e8\u20e9\u20ea\u20eb\u302a\u302b\u302c\u302d\u302e\u302f\u3099\u309a\ua806\ua80b\ua825\ua826\ufb1e\ufe00\ufe01\ufe02\ufe03\ufe04\ufe05\ufe06\ufe07\ufe08\ufe09\ufe0a\ufe0b\ufe0c\ufe0d\ufe0e\ufe0f\ufe20\ufe21\ufe22\ufe23' + +Nd = u'0123456789\u0660\u0661\u0662\u0663\u0664\u0665\u0666\u0667\u0668\u0669\u06f0\u06f1\u06f2\u06f3\u06f4\u06f5\u06f6\u06f7\u06f8\u06f9\u0966\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f\u09e6\u09e7\u09e8\u09e9\u09ea\u09eb\u09ec\u09ed\u09ee\u09ef\u0a66\u0a67\u0a68\u0a69\u0a6a\u0a6b\u0a6c\u0a6d\u0a6e\u0a6f\u0ae6\u0ae7\u0ae8\u0ae9\u0aea\u0aeb\u0aec\u0aed\u0aee\u0aef\u0b66\u0b67\u0b68\u0b69\u0b6a\u0b6b\u0b6c\u0b6d\u0b6e\u0b6f\u0be6\u0be7\u0be8\u0be9\u0bea\u0beb\u0bec\u0bed\u0bee\u0bef\u0c66\u0c67\u0c68\u0c69\u0c6a\u0c6b\u0c6c\u0c6d\u0c6e\u0c6f\u0ce6\u0ce7\u0ce8\u0ce9\u0cea\u0ceb\u0cec\u0ced\u0cee\u0cef\u0d66\u0d67\u0d68\u0d69\u0d6a\u0d6b\u0d6c\u0d6d\u0d6e\u0d6f\u0e50\u0e51\u0e52\u0e53\u0e54\u0e55\u0e56\u0e57\u0e58\u0e59\u0ed0\u0ed1\u0ed2\u0ed3\u0ed4\u0ed5\u0ed6\u0ed7\u0ed8\u0ed9\u0f20\u0f21\u0f22\u0f23\u0f24\u0f25\u0f26\u0f27\u0f28\u0f29\u1040\u1041\u1042\u1043\u1044\u1045\u1046\u1047\u1048\u1049\u17e0\u17e1\u17e2\u17e3\u17e4\u17e5\u17e6\u17e7\u17e8\u17e9\u1810\u1811\u1812\u1813\u1814\u1815\u1816\u1817\u1818\u1819\u1946\u1947\u1948\u1949\u194a\u194b\u194c\u194d\u194e\u194f\u19d0\u19d1\u19d2\u19d3\u19d4\u19d5\u19d6\u19d7\u19d8\u19d9\uff10\uff11\uff12\uff13\uff14\uff15\uff16\uff17\uff18\uff19' + +Nl = u'\u16ee\u16ef\u16f0\u2160\u2161\u2162\u2163\u2164\u2165\u2166\u2167\u2168\u2169\u216a\u216b\u216c\u216d\u216e\u216f\u2170\u2171\u2172\u2173\u2174\u2175\u2176\u2177\u2178\u2179\u217a\u217b\u217c\u217d\u217e\u217f\u2180\u2181\u2182\u2183\u3007\u3021\u3022\u3023\u3024\u3025\u3026\u3027\u3028\u3029\u3038\u3039\u303a' + +No = u'\xb2\xb3\xb9\xbc\xbd\xbe\u09f4\u09f5\u09f6\u09f7\u09f8\u09f9\u0bf0\u0bf1\u0bf2\u0f2a\u0f2b\u0f2c\u0f2d\u0f2e\u0f2f\u0f30\u0f31\u0f32\u0f33\u1369\u136a\u136b\u136c\u136d\u136e\u136f\u1370\u1371\u1372\u1373\u1374\u1375\u1376\u1377\u1378\u1379\u137a\u137b\u137c\u17f0\u17f1\u17f2\u17f3\u17f4\u17f5\u17f6\u17f7\u17f8\u17f9\u2070\u2074\u2075\u2076\u2077\u2078\u2079\u2080\u2081\u2082\u2083\u2084\u2085\u2086\u2087\u2088\u2089\u2153\u2154\u2155\u2156\u2157\u2158\u2159\u215a\u215b\u215c\u215d\u215e\u215f\u2460\u2461\u2462\u2463\u2464\u2465\u2466\u2467\u2468\u2469\u246a\u246b\u246c\u246d\u246e\u246f\u2470\u2471\u2472\u2473\u2474\u2475\u2476\u2477\u2478\u2479\u247a\u247b\u247c\u247d\u247e\u247f\u2480\u2481\u2482\u2483\u2484\u2485\u2486\u2487\u2488\u2489\u248a\u248b\u248c\u248d\u248e\u248f\u2490\u2491\u2492\u2493\u2494\u2495\u2496\u2497\u2498\u2499\u249a\u249b\u24ea\u24eb\u24ec\u24ed\u24ee\u24ef\u24f0\u24f1\u24f2\u24f3\u24f4\u24f5\u24f6\u24f7\u24f8\u24f9\u24fa\u24fb\u24fc\u24fd\u24fe\u24ff\u2776\u2777\u2778\u2779\u277a\u277b\u277c\u277d\u277e\u277f\u2780\u2781\u2782\u2783\u2784\u2785\u2786\u2787\u2788\u2789\u278a\u278b\u278c\u278d\u278e\u278f\u2790\u2791\u2792\u2793\u2cfd\u3192\u3193\u3194\u3195\u3220\u3221\u3222\u3223\u3224\u3225\u3226\u3227\u3228\u3229\u3251\u3252\u3253\u3254\u3255\u3256\u3257\u3258\u3259\u325a\u325b\u325c\u325d\u325e\u325f\u3280\u3281\u3282\u3283\u3284\u3285\u3286\u3287\u3288\u3289\u32b1\u32b2\u32b3\u32b4\u32b5\u32b6\u32b7\u32b8\u32b9\u32ba\u32bb\u32bc\u32bd\u32be\u32bf' + +Pc = u'_\u203f\u2040\u2054\ufe33\ufe34\ufe4d\ufe4e\ufe4f\uff3f' + +Pd = u'-\u058a\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2e17\u301c\u3030\u30a0\ufe31\ufe32\ufe58\ufe63\uff0d' + +Pe = u')]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a\u23b5\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63' + +Pf = u'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d' + +Pi = u'\xab\u2018\u201b\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c' + +Po = u'!"#%&\'*,./:;?@\\\xa1\xb7\xbf\u037e\u0387\u055a\u055b\u055c\u055d\u055e\u055f\u0589\u05be\u05c0\u05c3\u05c6\u05f3\u05f4\u060c\u060d\u061b\u061e\u061f\u066a\u066b\u066c\u066d\u06d4\u0700\u0701\u0702\u0703\u0704\u0705\u0706\u0707\u0708\u0709\u070a\u070b\u070c\u070d\u0964\u0965\u0970\u0df4\u0e4f\u0e5a\u0e5b\u0f04\u0f05\u0f06\u0f07\u0f08\u0f09\u0f0a\u0f0b\u0f0c\u0f0d\u0f0e\u0f0f\u0f10\u0f11\u0f12\u0f85\u0fd0\u0fd1\u104a\u104b\u104c\u104d\u104e\u104f\u10fb\u1361\u1362\u1363\u1364\u1365\u1366\u1367\u1368\u166d\u166e\u16eb\u16ec\u16ed\u1735\u1736\u17d4\u17d5\u17d6\u17d8\u17d9\u17da\u1800\u1801\u1802\u1803\u1804\u1805\u1807\u1808\u1809\u180a\u1944\u1945\u19de\u19df\u1a1e\u1a1f\u2016\u2017\u2020\u2021\u2022\u2023\u2024\u2025\u2026\u2027\u2030\u2031\u2032\u2033\u2034\u2035\u2036\u2037\u2038\u203b\u203c\u203d\u203e\u2041\u2042\u2043\u2047\u2048\u2049\u204a\u204b\u204c\u204d\u204e\u204f\u2050\u2051\u2053\u2055\u2056\u2057\u2058\u2059\u205a\u205b\u205c\u205d\u205e\u23b6\u2cf9\u2cfa\u2cfb\u2cfc\u2cfe\u2cff\u2e00\u2e01\u2e06\u2e07\u2e08\u2e0b\u2e0e\u2e0f\u2e10\u2e11\u2e12\u2e13\u2e14\u2e15\u2e16\u3001\u3002\u3003\u303d\u30fb\ufe10\ufe11\ufe12\ufe13\ufe14\ufe15\ufe16\ufe19\ufe30\ufe45\ufe46\ufe49\ufe4a\ufe4b\ufe4c\ufe50\ufe51\ufe52\ufe54\ufe55\ufe56\ufe57\ufe5f\ufe60\ufe61\ufe68\ufe6a\ufe6b\uff01\uff02\uff03\uff05\uff06\uff07\uff0a\uff0c\uff0e\uff0f\uff1a\uff1b\uff1f\uff20\uff3c\uff61\uff64\uff65' + +Ps = u'([{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2329\u23b4\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62' + +Sc = u'$\xa2\xa3\xa4\xa5\u060b\u09f2\u09f3\u0af1\u0bf9\u0e3f\u17db\u20a0\u20a1\u20a2\u20a3\u20a4\u20a5\u20a6\u20a7\u20a8\u20a9\u20aa\u20ab\u20ac\u20ad\u20ae\u20af\u20b0\u20b1\u20b2\u20b3\u20b4\u20b5\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6' + +Sk = u'^`\xa8\xaf\xb4\xb8\u02c2\u02c3\u02c4\u02c5\u02d2\u02d3\u02d4\u02d5\u02d6\u02d7\u02d8\u02d9\u02da\u02db\u02dc\u02dd\u02de\u02df\u02e5\u02e6\u02e7\u02e8\u02e9\u02ea\u02eb\u02ec\u02ed\u02ef\u02f0\u02f1\u02f2\u02f3\u02f4\u02f5\u02f6\u02f7\u02f8\u02f9\u02fa\u02fb\u02fc\u02fd\u02fe\u02ff\u0374\u0375\u0384\u0385\u1fbd\u1fbf\u1fc0\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed\u1fee\u1fef\u1ffd\u1ffe\u309b\u309c\ua700\ua701\ua702\ua703\ua704\ua705\ua706\ua707\ua708\ua709\ua70a\ua70b\ua70c\ua70d\ua70e\ua70f\ua710\ua711\ua712\ua713\ua714\ua715\ua716\uff3e\uff40\uffe3' + +Sm = u'+<=>|~\xac\xb1\xd7\xf7\u03f6\u2044\u2052\u207a\u207b\u207c\u208a\u208b\u208c\u2140\u2141\u2142\u2143\u2144\u214b\u2190\u2191\u2192\u2193\u2194\u219a\u219b\u21a0\u21a3\u21a6\u21ae\u21ce\u21cf\u21d2\u21d4\u21f4\u21f5\u21f6\u21f7\u21f8\u21f9\u21fa\u21fb\u21fc\u21fd\u21fe\u21ff\u2200\u2201\u2202\u2203\u2204\u2205\u2206\u2207\u2208\u2209\u220a\u220b\u220c\u220d\u220e\u220f\u2210\u2211\u2212\u2213\u2214\u2215\u2216\u2217\u2218\u2219\u221a\u221b\u221c\u221d\u221e\u221f\u2220\u2221\u2222\u2223\u2224\u2225\u2226\u2227\u2228\u2229\u222a\u222b\u222c\u222d\u222e\u222f\u2230\u2231\u2232\u2233\u2234\u2235\u2236\u2237\u2238\u2239\u223a\u223b\u223c\u223d\u223e\u223f\u2240\u2241\u2242\u2243\u2244\u2245\u2246\u2247\u2248\u2249\u224a\u224b\u224c\u224d\u224e\u224f\u2250\u2251\u2252\u2253\u2254\u2255\u2256\u2257\u2258\u2259\u225a\u225b\u225c\u225d\u225e\u225f\u2260\u2261\u2262\u2263\u2264\u2265\u2266\u2267\u2268\u2269\u226a\u226b\u226c\u226d\u226e\u226f\u2270\u2271\u2272\u2273\u2274\u2275\u2276\u2277\u2278\u2279\u227a\u227b\u227c\u227d\u227e\u227f\u2280\u2281\u2282\u2283\u2284\u2285\u2286\u2287\u2288\u2289\u228a\u228b\u228c\u228d\u228e\u228f\u2290\u2291\u2292\u2293\u2294\u2295\u2296\u2297\u2298\u2299\u229a\u229b\u229c\u229d\u229e\u229f\u22a0\u22a1\u22a2\u22a3\u22a4\u22a5\u22a6\u22a7\u22a8\u22a9\u22aa\u22ab\u22ac\u22ad\u22ae\u22af\u22b0\u22b1\u22b2\u22b3\u22b4\u22b5\u22b6\u22b7\u22b8\u22b9\u22ba\u22bb\u22bc\u22bd\u22be\u22bf\u22c0\u22c1\u22c2\u22c3\u22c4\u22c5\u22c6\u22c7\u22c8\u22c9\u22ca\u22cb\u22cc\u22cd\u22ce\u22cf\u22d0\u22d1\u22d2\u22d3\u22d4\u22d5\u22d6\u22d7\u22d8\u22d9\u22da\u22db\u22dc\u22dd\u22de\u22df\u22e0\u22e1\u22e2\u22e3\u22e4\u22e5\u22e6\u22e7\u22e8\u22e9\u22ea\u22eb\u22ec\u22ed\u22ee\u22ef\u22f0\u22f1\u22f2\u22f3\u22f4\u22f5\u22f6\u22f7\u22f8\u22f9\u22fa\u22fb\u22fc\u22fd\u22fe\u22ff\u2308\u2309\u230a\u230b\u2320\u2321\u237c\u239b\u239c\u239d\u239e\u239f\u23a0\u23a1\u23a2\u23a3\u23a4\u23a5\u23a6\u23a7\u23a8\u23a9\u23aa\u23ab\u23ac\u23ad\u23ae\u23af\u23b0\u23b1\u23b2\u23b3\u25b7\u25c1\u25f8\u25f9\u25fa\u25fb\u25fc\u25fd\u25fe\u25ff\u266f\u27c0\u27c1\u27c2\u27c3\u27c4\u27d0\u27d1\u27d2\u27d3\u27d4\u27d5\u27d6\u27d7\u27d8\u27d9\u27da\u27db\u27dc\u27dd\u27de\u27df\u27e0\u27e1\u27e2\u27e3\u27e4\u27e5\u27f0\u27f1\u27f2\u27f3\u27f4\u27f5\u27f6\u27f7\u27f8\u27f9\u27fa\u27fb\u27fc\u27fd\u27fe\u27ff\u2900\u2901\u2902\u2903\u2904\u2905\u2906\u2907\u2908\u2909\u290a\u290b\u290c\u290d\u290e\u290f\u2910\u2911\u2912\u2913\u2914\u2915\u2916\u2917\u2918\u2919\u291a\u291b\u291c\u291d\u291e\u291f\u2920\u2921\u2922\u2923\u2924\u2925\u2926\u2927\u2928\u2929\u292a\u292b\u292c\u292d\u292e\u292f\u2930\u2931\u2932\u2933\u2934\u2935\u2936\u2937\u2938\u2939\u293a\u293b\u293c\u293d\u293e\u293f\u2940\u2941\u2942\u2943\u2944\u2945\u2946\u2947\u2948\u2949\u294a\u294b\u294c\u294d\u294e\u294f\u2950\u2951\u2952\u2953\u2954\u2955\u2956\u2957\u2958\u2959\u295a\u295b\u295c\u295d\u295e\u295f\u2960\u2961\u2962\u2963\u2964\u2965\u2966\u2967\u2968\u2969\u296a\u296b\u296c\u296d\u296e\u296f\u2970\u2971\u2972\u2973\u2974\u2975\u2976\u2977\u2978\u2979\u297a\u297b\u297c\u297d\u297e\u297f\u2980\u2981\u2982\u2999\u299a\u299b\u299c\u299d\u299e\u299f\u29a0\u29a1\u29a2\u29a3\u29a4\u29a5\u29a6\u29a7\u29a8\u29a9\u29aa\u29ab\u29ac\u29ad\u29ae\u29af\u29b0\u29b1\u29b2\u29b3\u29b4\u29b5\u29b6\u29b7\u29b8\u29b9\u29ba\u29bb\u29bc\u29bd\u29be\u29bf\u29c0\u29c1\u29c2\u29c3\u29c4\u29c5\u29c6\u29c7\u29c8\u29c9\u29ca\u29cb\u29cc\u29cd\u29ce\u29cf\u29d0\u29d1\u29d2\u29d3\u29d4\u29d5\u29d6\u29d7\u29dc\u29dd\u29de\u29df\u29e0\u29e1\u29e2\u29e3\u29e4\u29e5\u29e6\u29e7\u29e8\u29e9\u29ea\u29eb\u29ec\u29ed\u29ee\u29ef\u29f0\u29f1\u29f2\u29f3\u29f4\u29f5\u29f6\u29f7\u29f8\u29f9\u29fa\u29fb\u29fe\u29ff\u2a00\u2a01\u2a02\u2a03\u2a04\u2a05\u2a06\u2a07\u2a08\u2a09\u2a0a\u2a0b\u2a0c\u2a0d\u2a0e\u2a0f\u2a10\u2a11\u2a12\u2a13\u2a14\u2a15\u2a16\u2a17\u2a18\u2a19\u2a1a\u2a1b\u2a1c\u2a1d\u2a1e\u2a1f\u2a20\u2a21\u2a22\u2a23\u2a24\u2a25\u2a26\u2a27\u2a28\u2a29\u2a2a\u2a2b\u2a2c\u2a2d\u2a2e\u2a2f\u2a30\u2a31\u2a32\u2a33\u2a34\u2a35\u2a36\u2a37\u2a38\u2a39\u2a3a\u2a3b\u2a3c\u2a3d\u2a3e\u2a3f\u2a40\u2a41\u2a42\u2a43\u2a44\u2a45\u2a46\u2a47\u2a48\u2a49\u2a4a\u2a4b\u2a4c\u2a4d\u2a4e\u2a4f\u2a50\u2a51\u2a52\u2a53\u2a54\u2a55\u2a56\u2a57\u2a58\u2a59\u2a5a\u2a5b\u2a5c\u2a5d\u2a5e\u2a5f\u2a60\u2a61\u2a62\u2a63\u2a64\u2a65\u2a66\u2a67\u2a68\u2a69\u2a6a\u2a6b\u2a6c\u2a6d\u2a6e\u2a6f\u2a70\u2a71\u2a72\u2a73\u2a74\u2a75\u2a76\u2a77\u2a78\u2a79\u2a7a\u2a7b\u2a7c\u2a7d\u2a7e\u2a7f\u2a80\u2a81\u2a82\u2a83\u2a84\u2a85\u2a86\u2a87\u2a88\u2a89\u2a8a\u2a8b\u2a8c\u2a8d\u2a8e\u2a8f\u2a90\u2a91\u2a92\u2a93\u2a94\u2a95\u2a96\u2a97\u2a98\u2a99\u2a9a\u2a9b\u2a9c\u2a9d\u2a9e\u2a9f\u2aa0\u2aa1\u2aa2\u2aa3\u2aa4\u2aa5\u2aa6\u2aa7\u2aa8\u2aa9\u2aaa\u2aab\u2aac\u2aad\u2aae\u2aaf\u2ab0\u2ab1\u2ab2\u2ab3\u2ab4\u2ab5\u2ab6\u2ab7\u2ab8\u2ab9\u2aba\u2abb\u2abc\u2abd\u2abe\u2abf\u2ac0\u2ac1\u2ac2\u2ac3\u2ac4\u2ac5\u2ac6\u2ac7\u2ac8\u2ac9\u2aca\u2acb\u2acc\u2acd\u2ace\u2acf\u2ad0\u2ad1\u2ad2\u2ad3\u2ad4\u2ad5\u2ad6\u2ad7\u2ad8\u2ad9\u2ada\u2adb\u2adc\u2add\u2ade\u2adf\u2ae0\u2ae1\u2ae2\u2ae3\u2ae4\u2ae5\u2ae6\u2ae7\u2ae8\u2ae9\u2aea\u2aeb\u2aec\u2aed\u2aee\u2aef\u2af0\u2af1\u2af2\u2af3\u2af4\u2af5\u2af6\u2af7\u2af8\u2af9\u2afa\u2afb\u2afc\u2afd\u2afe\u2aff\ufb29\ufe62\ufe64\ufe65\ufe66\uff0b\uff1c\uff1d\uff1e\uff5c\uff5e\uffe2\uffe9\uffea\uffeb\uffec' + +So = u'\xa6\xa7\xa9\xae\xb0\xb6\u0482\u060e\u060f\u06e9\u06fd\u06fe\u09fa\u0b70\u0bf3\u0bf4\u0bf5\u0bf6\u0bf7\u0bf8\u0bfa\u0f01\u0f02\u0f03\u0f13\u0f14\u0f15\u0f16\u0f17\u0f1a\u0f1b\u0f1c\u0f1d\u0f1e\u0f1f\u0f34\u0f36\u0f38\u0fbe\u0fbf\u0fc0\u0fc1\u0fc2\u0fc3\u0fc4\u0fc5\u0fc7\u0fc8\u0fc9\u0fca\u0fcb\u0fcc\u0fcf\u1360\u1390\u1391\u1392\u1393\u1394\u1395\u1396\u1397\u1398\u1399\u1940\u19e0\u19e1\u19e2\u19e3\u19e4\u19e5\u19e6\u19e7\u19e8\u19e9\u19ea\u19eb\u19ec\u19ed\u19ee\u19ef\u19f0\u19f1\u19f2\u19f3\u19f4\u19f5\u19f6\u19f7\u19f8\u19f9\u19fa\u19fb\u19fc\u19fd\u19fe\u19ff\u2100\u2101\u2103\u2104\u2105\u2106\u2108\u2109\u2114\u2116\u2117\u2118\u211e\u211f\u2120\u2121\u2122\u2123\u2125\u2127\u2129\u212e\u2132\u213a\u213b\u214a\u214c\u2195\u2196\u2197\u2198\u2199\u219c\u219d\u219e\u219f\u21a1\u21a2\u21a4\u21a5\u21a7\u21a8\u21a9\u21aa\u21ab\u21ac\u21ad\u21af\u21b0\u21b1\u21b2\u21b3\u21b4\u21b5\u21b6\u21b7\u21b8\u21b9\u21ba\u21bb\u21bc\u21bd\u21be\u21bf\u21c0\u21c1\u21c2\u21c3\u21c4\u21c5\u21c6\u21c7\u21c8\u21c9\u21ca\u21cb\u21cc\u21cd\u21d0\u21d1\u21d3\u21d5\u21d6\u21d7\u21d8\u21d9\u21da\u21db\u21dc\u21dd\u21de\u21df\u21e0\u21e1\u21e2\u21e3\u21e4\u21e5\u21e6\u21e7\u21e8\u21e9\u21ea\u21eb\u21ec\u21ed\u21ee\u21ef\u21f0\u21f1\u21f2\u21f3\u2300\u2301\u2302\u2303\u2304\u2305\u2306\u2307\u230c\u230d\u230e\u230f\u2310\u2311\u2312\u2313\u2314\u2315\u2316\u2317\u2318\u2319\u231a\u231b\u231c\u231d\u231e\u231f\u2322\u2323\u2324\u2325\u2326\u2327\u2328\u232b\u232c\u232d\u232e\u232f\u2330\u2331\u2332\u2333\u2334\u2335\u2336\u2337\u2338\u2339\u233a\u233b\u233c\u233d\u233e\u233f\u2340\u2341\u2342\u2343\u2344\u2345\u2346\u2347\u2348\u2349\u234a\u234b\u234c\u234d\u234e\u234f\u2350\u2351\u2352\u2353\u2354\u2355\u2356\u2357\u2358\u2359\u235a\u235b\u235c\u235d\u235e\u235f\u2360\u2361\u2362\u2363\u2364\u2365\u2366\u2367\u2368\u2369\u236a\u236b\u236c\u236d\u236e\u236f\u2370\u2371\u2372\u2373\u2374\u2375\u2376\u2377\u2378\u2379\u237a\u237b\u237d\u237e\u237f\u2380\u2381\u2382\u2383\u2384\u2385\u2386\u2387\u2388\u2389\u238a\u238b\u238c\u238d\u238e\u238f\u2390\u2391\u2392\u2393\u2394\u2395\u2396\u2397\u2398\u2399\u239a\u23b7\u23b8\u23b9\u23ba\u23bb\u23bc\u23bd\u23be\u23bf\u23c0\u23c1\u23c2\u23c3\u23c4\u23c5\u23c6\u23c7\u23c8\u23c9\u23ca\u23cb\u23cc\u23cd\u23ce\u23cf\u23d0\u23d1\u23d2\u23d3\u23d4\u23d5\u23d6\u23d7\u23d8\u23d9\u23da\u23db\u2400\u2401\u2402\u2403\u2404\u2405\u2406\u2407\u2408\u2409\u240a\u240b\u240c\u240d\u240e\u240f\u2410\u2411\u2412\u2413\u2414\u2415\u2416\u2417\u2418\u2419\u241a\u241b\u241c\u241d\u241e\u241f\u2420\u2421\u2422\u2423\u2424\u2425\u2426\u2440\u2441\u2442\u2443\u2444\u2445\u2446\u2447\u2448\u2449\u244a\u249c\u249d\u249e\u249f\u24a0\u24a1\u24a2\u24a3\u24a4\u24a5\u24a6\u24a7\u24a8\u24a9\u24aa\u24ab\u24ac\u24ad\u24ae\u24af\u24b0\u24b1\u24b2\u24b3\u24b4\u24b5\u24b6\u24b7\u24b8\u24b9\u24ba\u24bb\u24bc\u24bd\u24be\u24bf\u24c0\u24c1\u24c2\u24c3\u24c4\u24c5\u24c6\u24c7\u24c8\u24c9\u24ca\u24cb\u24cc\u24cd\u24ce\u24cf\u24d0\u24d1\u24d2\u24d3\u24d4\u24d5\u24d6\u24d7\u24d8\u24d9\u24da\u24db\u24dc\u24dd\u24de\u24df\u24e0\u24e1\u24e2\u24e3\u24e4\u24e5\u24e6\u24e7\u24e8\u24e9\u2500\u2501\u2502\u2503\u2504\u2505\u2506\u2507\u2508\u2509\u250a\u250b\u250c\u250d\u250e\u250f\u2510\u2511\u2512\u2513\u2514\u2515\u2516\u2517\u2518\u2519\u251a\u251b\u251c\u251d\u251e\u251f\u2520\u2521\u2522\u2523\u2524\u2525\u2526\u2527\u2528\u2529\u252a\u252b\u252c\u252d\u252e\u252f\u2530\u2531\u2532\u2533\u2534\u2535\u2536\u2537\u2538\u2539\u253a\u253b\u253c\u253d\u253e\u253f\u2540\u2541\u2542\u2543\u2544\u2545\u2546\u2547\u2548\u2549\u254a\u254b\u254c\u254d\u254e\u254f\u2550\u2551\u2552\u2553\u2554\u2555\u2556\u2557\u2558\u2559\u255a\u255b\u255c\u255d\u255e\u255f\u2560\u2561\u2562\u2563\u2564\u2565\u2566\u2567\u2568\u2569\u256a\u256b\u256c\u256d\u256e\u256f\u2570\u2571\u2572\u2573\u2574\u2575\u2576\u2577\u2578\u2579\u257a\u257b\u257c\u257d\u257e\u257f\u2580\u2581\u2582\u2583\u2584\u2585\u2586\u2587\u2588\u2589\u258a\u258b\u258c\u258d\u258e\u258f\u2590\u2591\u2592\u2593\u2594\u2595\u2596\u2597\u2598\u2599\u259a\u259b\u259c\u259d\u259e\u259f\u25a0\u25a1\u25a2\u25a3\u25a4\u25a5\u25a6\u25a7\u25a8\u25a9\u25aa\u25ab\u25ac\u25ad\u25ae\u25af\u25b0\u25b1\u25b2\u25b3\u25b4\u25b5\u25b6\u25b8\u25b9\u25ba\u25bb\u25bc\u25bd\u25be\u25bf\u25c0\u25c2\u25c3\u25c4\u25c5\u25c6\u25c7\u25c8\u25c9\u25ca\u25cb\u25cc\u25cd\u25ce\u25cf\u25d0\u25d1\u25d2\u25d3\u25d4\u25d5\u25d6\u25d7\u25d8\u25d9\u25da\u25db\u25dc\u25dd\u25de\u25df\u25e0\u25e1\u25e2\u25e3\u25e4\u25e5\u25e6\u25e7\u25e8\u25e9\u25ea\u25eb\u25ec\u25ed\u25ee\u25ef\u25f0\u25f1\u25f2\u25f3\u25f4\u25f5\u25f6\u25f7\u2600\u2601\u2602\u2603\u2604\u2605\u2606\u2607\u2608\u2609\u260a\u260b\u260c\u260d\u260e\u260f\u2610\u2611\u2612\u2613\u2614\u2615\u2616\u2617\u2618\u2619\u261a\u261b\u261c\u261d\u261e\u261f\u2620\u2621\u2622\u2623\u2624\u2625\u2626\u2627\u2628\u2629\u262a\u262b\u262c\u262d\u262e\u262f\u2630\u2631\u2632\u2633\u2634\u2635\u2636\u2637\u2638\u2639\u263a\u263b\u263c\u263d\u263e\u263f\u2640\u2641\u2642\u2643\u2644\u2645\u2646\u2647\u2648\u2649\u264a\u264b\u264c\u264d\u264e\u264f\u2650\u2651\u2652\u2653\u2654\u2655\u2656\u2657\u2658\u2659\u265a\u265b\u265c\u265d\u265e\u265f\u2660\u2661\u2662\u2663\u2664\u2665\u2666\u2667\u2668\u2669\u266a\u266b\u266c\u266d\u266e\u2670\u2671\u2672\u2673\u2674\u2675\u2676\u2677\u2678\u2679\u267a\u267b\u267c\u267d\u267e\u267f\u2680\u2681\u2682\u2683\u2684\u2685\u2686\u2687\u2688\u2689\u268a\u268b\u268c\u268d\u268e\u268f\u2690\u2691\u2692\u2693\u2694\u2695\u2696\u2697\u2698\u2699\u269a\u269b\u269c\u26a0\u26a1\u26a2\u26a3\u26a4\u26a5\u26a6\u26a7\u26a8\u26a9\u26aa\u26ab\u26ac\u26ad\u26ae\u26af\u26b0\u26b1\u2701\u2702\u2703\u2704\u2706\u2707\u2708\u2709\u270c\u270d\u270e\u270f\u2710\u2711\u2712\u2713\u2714\u2715\u2716\u2717\u2718\u2719\u271a\u271b\u271c\u271d\u271e\u271f\u2720\u2721\u2722\u2723\u2724\u2725\u2726\u2727\u2729\u272a\u272b\u272c\u272d\u272e\u272f\u2730\u2731\u2732\u2733\u2734\u2735\u2736\u2737\u2738\u2739\u273a\u273b\u273c\u273d\u273e\u273f\u2740\u2741\u2742\u2743\u2744\u2745\u2746\u2747\u2748\u2749\u274a\u274b\u274d\u274f\u2750\u2751\u2752\u2756\u2758\u2759\u275a\u275b\u275c\u275d\u275e\u2761\u2762\u2763\u2764\u2765\u2766\u2767\u2794\u2798\u2799\u279a\u279b\u279c\u279d\u279e\u279f\u27a0\u27a1\u27a2\u27a3\u27a4\u27a5\u27a6\u27a7\u27a8\u27a9\u27aa\u27ab\u27ac\u27ad\u27ae\u27af\u27b1\u27b2\u27b3\u27b4\u27b5\u27b6\u27b7\u27b8\u27b9\u27ba\u27bb\u27bc\u27bd\u27be\u2800\u2801\u2802\u2803\u2804\u2805\u2806\u2807\u2808\u2809\u280a\u280b\u280c\u280d\u280e\u280f\u2810\u2811\u2812\u2813\u2814\u2815\u2816\u2817\u2818\u2819\u281a\u281b\u281c\u281d\u281e\u281f\u2820\u2821\u2822\u2823\u2824\u2825\u2826\u2827\u2828\u2829\u282a\u282b\u282c\u282d\u282e\u282f\u2830\u2831\u2832\u2833\u2834\u2835\u2836\u2837\u2838\u2839\u283a\u283b\u283c\u283d\u283e\u283f\u2840\u2841\u2842\u2843\u2844\u2845\u2846\u2847\u2848\u2849\u284a\u284b\u284c\u284d\u284e\u284f\u2850\u2851\u2852\u2853\u2854\u2855\u2856\u2857\u2858\u2859\u285a\u285b\u285c\u285d\u285e\u285f\u2860\u2861\u2862\u2863\u2864\u2865\u2866\u2867\u2868\u2869\u286a\u286b\u286c\u286d\u286e\u286f\u2870\u2871\u2872\u2873\u2874\u2875\u2876\u2877\u2878\u2879\u287a\u287b\u287c\u287d\u287e\u287f\u2880\u2881\u2882\u2883\u2884\u2885\u2886\u2887\u2888\u2889\u288a\u288b\u288c\u288d\u288e\u288f\u2890\u2891\u2892\u2893\u2894\u2895\u2896\u2897\u2898\u2899\u289a\u289b\u289c\u289d\u289e\u289f\u28a0\u28a1\u28a2\u28a3\u28a4\u28a5\u28a6\u28a7\u28a8\u28a9\u28aa\u28ab\u28ac\u28ad\u28ae\u28af\u28b0\u28b1\u28b2\u28b3\u28b4\u28b5\u28b6\u28b7\u28b8\u28b9\u28ba\u28bb\u28bc\u28bd\u28be\u28bf\u28c0\u28c1\u28c2\u28c3\u28c4\u28c5\u28c6\u28c7\u28c8\u28c9\u28ca\u28cb\u28cc\u28cd\u28ce\u28cf\u28d0\u28d1\u28d2\u28d3\u28d4\u28d5\u28d6\u28d7\u28d8\u28d9\u28da\u28db\u28dc\u28dd\u28de\u28df\u28e0\u28e1\u28e2\u28e3\u28e4\u28e5\u28e6\u28e7\u28e8\u28e9\u28ea\u28eb\u28ec\u28ed\u28ee\u28ef\u28f0\u28f1\u28f2\u28f3\u28f4\u28f5\u28f6\u28f7\u28f8\u28f9\u28fa\u28fb\u28fc\u28fd\u28fe\u28ff\u2b00\u2b01\u2b02\u2b03\u2b04\u2b05\u2b06\u2b07\u2b08\u2b09\u2b0a\u2b0b\u2b0c\u2b0d\u2b0e\u2b0f\u2b10\u2b11\u2b12\u2b13\u2ce5\u2ce6\u2ce7\u2ce8\u2ce9\u2cea\u2e80\u2e81\u2e82\u2e83\u2e84\u2e85\u2e86\u2e87\u2e88\u2e89\u2e8a\u2e8b\u2e8c\u2e8d\u2e8e\u2e8f\u2e90\u2e91\u2e92\u2e93\u2e94\u2e95\u2e96\u2e97\u2e98\u2e99\u2e9b\u2e9c\u2e9d\u2e9e\u2e9f\u2ea0\u2ea1\u2ea2\u2ea3\u2ea4\u2ea5\u2ea6\u2ea7\u2ea8\u2ea9\u2eaa\u2eab\u2eac\u2ead\u2eae\u2eaf\u2eb0\u2eb1\u2eb2\u2eb3\u2eb4\u2eb5\u2eb6\u2eb7\u2eb8\u2eb9\u2eba\u2ebb\u2ebc\u2ebd\u2ebe\u2ebf\u2ec0\u2ec1\u2ec2\u2ec3\u2ec4\u2ec5\u2ec6\u2ec7\u2ec8\u2ec9\u2eca\u2ecb\u2ecc\u2ecd\u2ece\u2ecf\u2ed0\u2ed1\u2ed2\u2ed3\u2ed4\u2ed5\u2ed6\u2ed7\u2ed8\u2ed9\u2eda\u2edb\u2edc\u2edd\u2ede\u2edf\u2ee0\u2ee1\u2ee2\u2ee3\u2ee4\u2ee5\u2ee6\u2ee7\u2ee8\u2ee9\u2eea\u2eeb\u2eec\u2eed\u2eee\u2eef\u2ef0\u2ef1\u2ef2\u2ef3\u2f00\u2f01\u2f02\u2f03\u2f04\u2f05\u2f06\u2f07\u2f08\u2f09\u2f0a\u2f0b\u2f0c\u2f0d\u2f0e\u2f0f\u2f10\u2f11\u2f12\u2f13\u2f14\u2f15\u2f16\u2f17\u2f18\u2f19\u2f1a\u2f1b\u2f1c\u2f1d\u2f1e\u2f1f\u2f20\u2f21\u2f22\u2f23\u2f24\u2f25\u2f26\u2f27\u2f28\u2f29\u2f2a\u2f2b\u2f2c\u2f2d\u2f2e\u2f2f\u2f30\u2f31\u2f32\u2f33\u2f34\u2f35\u2f36\u2f37\u2f38\u2f39\u2f3a\u2f3b\u2f3c\u2f3d\u2f3e\u2f3f\u2f40\u2f41\u2f42\u2f43\u2f44\u2f45\u2f46\u2f47\u2f48\u2f49\u2f4a\u2f4b\u2f4c\u2f4d\u2f4e\u2f4f\u2f50\u2f51\u2f52\u2f53\u2f54\u2f55\u2f56\u2f57\u2f58\u2f59\u2f5a\u2f5b\u2f5c\u2f5d\u2f5e\u2f5f\u2f60\u2f61\u2f62\u2f63\u2f64\u2f65\u2f66\u2f67\u2f68\u2f69\u2f6a\u2f6b\u2f6c\u2f6d\u2f6e\u2f6f\u2f70\u2f71\u2f72\u2f73\u2f74\u2f75\u2f76\u2f77\u2f78\u2f79\u2f7a\u2f7b\u2f7c\u2f7d\u2f7e\u2f7f\u2f80\u2f81\u2f82\u2f83\u2f84\u2f85\u2f86\u2f87\u2f88\u2f89\u2f8a\u2f8b\u2f8c\u2f8d\u2f8e\u2f8f\u2f90\u2f91\u2f92\u2f93\u2f94\u2f95\u2f96\u2f97\u2f98\u2f99\u2f9a\u2f9b\u2f9c\u2f9d\u2f9e\u2f9f\u2fa0\u2fa1\u2fa2\u2fa3\u2fa4\u2fa5\u2fa6\u2fa7\u2fa8\u2fa9\u2faa\u2fab\u2fac\u2fad\u2fae\u2faf\u2fb0\u2fb1\u2fb2\u2fb3\u2fb4\u2fb5\u2fb6\u2fb7\u2fb8\u2fb9\u2fba\u2fbb\u2fbc\u2fbd\u2fbe\u2fbf\u2fc0\u2fc1\u2fc2\u2fc3\u2fc4\u2fc5\u2fc6\u2fc7\u2fc8\u2fc9\u2fca\u2fcb\u2fcc\u2fcd\u2fce\u2fcf\u2fd0\u2fd1\u2fd2\u2fd3\u2fd4\u2fd5\u2ff0\u2ff1\u2ff2\u2ff3\u2ff4\u2ff5\u2ff6\u2ff7\u2ff8\u2ff9\u2ffa\u2ffb\u3004\u3012\u3013\u3020\u3036\u3037\u303e\u303f\u3190\u3191\u3196\u3197\u3198\u3199\u319a\u319b\u319c\u319d\u319e\u319f\u31c0\u31c1\u31c2\u31c3\u31c4\u31c5\u31c6\u31c7\u31c8\u31c9\u31ca\u31cb\u31cc\u31cd\u31ce\u31cf\u3200\u3201\u3202\u3203\u3204\u3205\u3206\u3207\u3208\u3209\u320a\u320b\u320c\u320d\u320e\u320f\u3210\u3211\u3212\u3213\u3214\u3215\u3216\u3217\u3218\u3219\u321a\u321b\u321c\u321d\u321e\u322a\u322b\u322c\u322d\u322e\u322f\u3230\u3231\u3232\u3233\u3234\u3235\u3236\u3237\u3238\u3239\u323a\u323b\u323c\u323d\u323e\u323f\u3240\u3241\u3242\u3243\u3250\u3260\u3261\u3262\u3263\u3264\u3265\u3266\u3267\u3268\u3269\u326a\u326b\u326c\u326d\u326e\u326f\u3270\u3271\u3272\u3273\u3274\u3275\u3276\u3277\u3278\u3279\u327a\u327b\u327c\u327d\u327e\u327f\u328a\u328b\u328c\u328d\u328e\u328f\u3290\u3291\u3292\u3293\u3294\u3295\u3296\u3297\u3298\u3299\u329a\u329b\u329c\u329d\u329e\u329f\u32a0\u32a1\u32a2\u32a3\u32a4\u32a5\u32a6\u32a7\u32a8\u32a9\u32aa\u32ab\u32ac\u32ad\u32ae\u32af\u32b0\u32c0\u32c1\u32c2\u32c3\u32c4\u32c5\u32c6\u32c7\u32c8\u32c9\u32ca\u32cb\u32cc\u32cd\u32ce\u32cf\u32d0\u32d1\u32d2\u32d3\u32d4\u32d5\u32d6\u32d7\u32d8\u32d9\u32da\u32db\u32dc\u32dd\u32de\u32df\u32e0\u32e1\u32e2\u32e3\u32e4\u32e5\u32e6\u32e7\u32e8\u32e9\u32ea\u32eb\u32ec\u32ed\u32ee\u32ef\u32f0\u32f1\u32f2\u32f3\u32f4\u32f5\u32f6\u32f7\u32f8\u32f9\u32fa\u32fb\u32fc\u32fd\u32fe\u3300\u3301\u3302\u3303\u3304\u3305\u3306\u3307\u3308\u3309\u330a\u330b\u330c\u330d\u330e\u330f\u3310\u3311\u3312\u3313\u3314\u3315\u3316\u3317\u3318\u3319\u331a\u331b\u331c\u331d\u331e\u331f\u3320\u3321\u3322\u3323\u3324\u3325\u3326\u3327\u3328\u3329\u332a\u332b\u332c\u332d\u332e\u332f\u3330\u3331\u3332\u3333\u3334\u3335\u3336\u3337\u3338\u3339\u333a\u333b\u333c\u333d\u333e\u333f\u3340\u3341\u3342\u3343\u3344\u3345\u3346\u3347\u3348\u3349\u334a\u334b\u334c\u334d\u334e\u334f\u3350\u3351\u3352\u3353\u3354\u3355\u3356\u3357\u3358\u3359\u335a\u335b\u335c\u335d\u335e\u335f\u3360\u3361\u3362\u3363\u3364\u3365\u3366\u3367\u3368\u3369\u336a\u336b\u336c\u336d\u336e\u336f\u3370\u3371\u3372\u3373\u3374\u3375\u3376\u3377\u3378\u3379\u337a\u337b\u337c\u337d\u337e\u337f\u3380\u3381\u3382\u3383\u3384\u3385\u3386\u3387\u3388\u3389\u338a\u338b\u338c\u338d\u338e\u338f\u3390\u3391\u3392\u3393\u3394\u3395\u3396\u3397\u3398\u3399\u339a\u339b\u339c\u339d\u339e\u339f\u33a0\u33a1\u33a2\u33a3\u33a4\u33a5\u33a6\u33a7\u33a8\u33a9\u33aa\u33ab\u33ac\u33ad\u33ae\u33af\u33b0\u33b1\u33b2\u33b3\u33b4\u33b5\u33b6\u33b7\u33b8\u33b9\u33ba\u33bb\u33bc\u33bd\u33be\u33bf\u33c0\u33c1\u33c2\u33c3\u33c4\u33c5\u33c6\u33c7\u33c8\u33c9\u33ca\u33cb\u33cc\u33cd\u33ce\u33cf\u33d0\u33d1\u33d2\u33d3\u33d4\u33d5\u33d6\u33d7\u33d8\u33d9\u33da\u33db\u33dc\u33dd\u33de\u33df\u33e0\u33e1\u33e2\u33e3\u33e4\u33e5\u33e6\u33e7\u33e8\u33e9\u33ea\u33eb\u33ec\u33ed\u33ee\u33ef\u33f0\u33f1\u33f2\u33f3\u33f4\u33f5\u33f6\u33f7\u33f8\u33f9\u33fa\u33fb\u33fc\u33fd\u33fe\u33ff\u4dc0\u4dc1\u4dc2\u4dc3\u4dc4\u4dc5\u4dc6\u4dc7\u4dc8\u4dc9\u4dca\u4dcb\u4dcc\u4dcd\u4dce\u4dcf\u4dd0\u4dd1\u4dd2\u4dd3\u4dd4\u4dd5\u4dd6\u4dd7\u4dd8\u4dd9\u4dda\u4ddb\u4ddc\u4ddd\u4dde\u4ddf\u4de0\u4de1\u4de2\u4de3\u4de4\u4de5\u4de6\u4de7\u4de8\u4de9\u4dea\u4deb\u4dec\u4ded\u4dee\u4def\u4df0\u4df1\u4df2\u4df3\u4df4\u4df5\u4df6\u4df7\u4df8\u4df9\u4dfa\u4dfb\u4dfc\u4dfd\u4dfe\u4dff\ua490\ua491\ua492\ua493\ua494\ua495\ua496\ua497\ua498\ua499\ua49a\ua49b\ua49c\ua49d\ua49e\ua49f\ua4a0\ua4a1\ua4a2\ua4a3\ua4a4\ua4a5\ua4a6\ua4a7\ua4a8\ua4a9\ua4aa\ua4ab\ua4ac\ua4ad\ua4ae\ua4af\ua4b0\ua4b1\ua4b2\ua4b3\ua4b4\ua4b5\ua4b6\ua4b7\ua4b8\ua4b9\ua4ba\ua4bb\ua4bc\ua4bd\ua4be\ua4bf\ua4c0\ua4c1\ua4c2\ua4c3\ua4c4\ua4c5\ua4c6\ua828\ua829\ua82a\ua82b\ufdfd\uffe4\uffe8\uffed\uffee\ufffc\ufffd' + +Zl = u'\u2028' + +Zp = u'\u2029' + +Zs = u' \xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000' + +cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs'] + +def combine(*args): + return u''.join([globals()[cat] for cat in args]) + +xid_start = u'\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0640\u0641-\u064A\u066E-\u066F\u0671-\u06D3\u06D5\u06E5-\u06E6\u06EE-\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u076D\u0780-\u07A5\u07B1\u0904-\u0939\u093D\u0950\u0958-\u0961\u097D\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC-\u09DD\u09DF-\u09E1\u09F0-\u09F1\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0-\u0AE1\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B35-\u0B39\u0B3D\u0B5C-\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C60-\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0-\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D60-\u0D61\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E40-\u0E45\u0E46\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EB0\u0EB2\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDD\u0F00\u0F40-\u0F47\u0F49-\u0F6A\u0F88-\u0F8B\u1000-\u1021\u1023-\u1027\u1029-\u102A\u1050-\u1055\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1159\u115F-\u11A2\u11A8-\u11F9\u1200-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u1676\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F0\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1842\u1843\u1844-\u1877\u1880-\u18A8\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19A9\u19C1-\u19C7\u1A00-\u1A16\u1D00-\u1D2B\u1D2C-\u1D61\u1D62-\u1D77\u1D78\u1D79-\u1D9A\u1D9B-\u1DBF\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u2094\u2102\u2107\u210A-\u2113\u2115\u2118\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212E\u212F-\u2131\u2133-\u2134\u2135-\u2138\u2139\u213C-\u213F\u2145-\u2149\u2160-\u2183\u2C00-\u2C2E\u2C30-\u2C5E\u2C80-\u2CE4\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005\u3006\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303A\u303B\u303C\u3041-\u3096\u309D-\u309E\u309F\u30A1-\u30FA\u30FC-\u30FE\u30FF\u3105-\u312C\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FBB\uA000-\uA014\uA015\uA016-\uA48C\uA800-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uAC00-\uD7A3\uF900-\uFA2D\uFA30-\uFA6A\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40-\uFB41\uFB43-\uFB44\uFB46-\uFBB1\uFBD3-\uFC5D\uFC64-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDF9\uFE71\uFE73\uFE77\uFE79\uFE7B\uFE7D\uFE7F-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFF6F\uFF70\uFF71-\uFF9D\uFFA0-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC' + +xid_continue = u'\u0030-\u0039\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00B7\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0300-\u036F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u0483-\u0486\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4-\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u0615\u0621-\u063A\u0640\u0641-\u064A\u064B-\u065E\u0660-\u0669\u066E-\u066F\u0670\u0671-\u06D3\u06D5\u06D6-\u06DC\u06DF-\u06E4\u06E5-\u06E6\u06E7-\u06E8\u06EA-\u06ED\u06EE-\u06EF\u06F0-\u06F9\u06FA-\u06FC\u06FF\u0710\u0711\u0712-\u072F\u0730-\u074A\u074D-\u076D\u0780-\u07A5\u07A6-\u07B0\u07B1\u0901-\u0902\u0903\u0904-\u0939\u093C\u093D\u093E-\u0940\u0941-\u0948\u0949-\u094C\u094D\u0950\u0951-\u0954\u0958-\u0961\u0962-\u0963\u0966-\u096F\u097D\u0981\u0982-\u0983\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC\u09BD\u09BE-\u09C0\u09C1-\u09C4\u09C7-\u09C8\u09CB-\u09CC\u09CD\u09CE\u09D7\u09DC-\u09DD\u09DF-\u09E1\u09E2-\u09E3\u09E6-\u09EF\u09F0-\u09F1\u0A01-\u0A02\u0A03\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A3C\u0A3E-\u0A40\u0A41-\u0A42\u0A47-\u0A48\u0A4B-\u0A4D\u0A59-\u0A5C\u0A5E\u0A66-\u0A6F\u0A70-\u0A71\u0A72-\u0A74\u0A81-\u0A82\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABC\u0ABD\u0ABE-\u0AC0\u0AC1-\u0AC5\u0AC7-\u0AC8\u0AC9\u0ACB-\u0ACC\u0ACD\u0AD0\u0AE0-\u0AE1\u0AE2-\u0AE3\u0AE6-\u0AEF\u0B01\u0B02-\u0B03\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B35-\u0B39\u0B3C\u0B3D\u0B3E\u0B3F\u0B40\u0B41-\u0B43\u0B47-\u0B48\u0B4B-\u0B4C\u0B4D\u0B56\u0B57\u0B5C-\u0B5D\u0B5F-\u0B61\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BBF\u0BC0\u0BC1-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCC\u0BCD\u0BD7\u0BE6-\u0BEF\u0C01-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3E-\u0C40\u0C41-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55-\u0C56\u0C60-\u0C61\u0C66-\u0C6F\u0C82-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC\u0CBD\u0CBE\u0CBF\u0CC0-\u0CC4\u0CC6\u0CC7-\u0CC8\u0CCA-\u0CCB\u0CCC-\u0CCD\u0CD5-\u0CD6\u0CDE\u0CE0-\u0CE1\u0CE6-\u0CEF\u0D02-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D3E-\u0D40\u0D41-\u0D43\u0D46-\u0D48\u0D4A-\u0D4C\u0D4D\u0D57\u0D60-\u0D61\u0D66-\u0D6F\u0D82-\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD1\u0DD2-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DF2-\u0DF3\u0E01-\u0E30\u0E31\u0E32-\u0E33\u0E34-\u0E3A\u0E40-\u0E45\u0E46\u0E47-\u0E4E\u0E50-\u0E59\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EB0\u0EB1\u0EB2-\u0EB3\u0EB4-\u0EB9\u0EBB-\u0EBC\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDD\u0F00\u0F18-\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F3F\u0F40-\u0F47\u0F49-\u0F6A\u0F71-\u0F7E\u0F7F\u0F80-\u0F84\u0F86-\u0F87\u0F88-\u0F8B\u0F90-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1021\u1023-\u1027\u1029-\u102A\u102C\u102D-\u1030\u1031\u1032\u1036-\u1037\u1038\u1039\u1040-\u1049\u1050-\u1055\u1056-\u1057\u1058-\u1059\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1159\u115F-\u11A2\u11A8-\u11F9\u1200-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u1676\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F0\u1700-\u170C\u170E-\u1711\u1712-\u1714\u1720-\u1731\u1732-\u1734\u1740-\u1751\u1752-\u1753\u1760-\u176C\u176E-\u1770\u1772-\u1773\u1780-\u17B3\u17B6\u17B7-\u17BD\u17BE-\u17C5\u17C6\u17C7-\u17C8\u17C9-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1842\u1843\u1844-\u1877\u1880-\u18A8\u18A9\u1900-\u191C\u1920-\u1922\u1923-\u1926\u1927-\u1928\u1929-\u192B\u1930-\u1931\u1932\u1933-\u1938\u1939-\u193B\u1946-\u194F\u1950-\u196D\u1970-\u1974\u1980-\u19A9\u19B0-\u19C0\u19C1-\u19C7\u19C8-\u19C9\u19D0-\u19D9\u1A00-\u1A16\u1A17-\u1A18\u1A19-\u1A1B\u1D00-\u1D2B\u1D2C-\u1D61\u1D62-\u1D77\u1D78\u1D79-\u1D9A\u1D9B-\u1DBF\u1DC0-\u1DC3\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u203F-\u2040\u2054\u2071\u207F\u2090-\u2094\u20D0-\u20DC\u20E1\u20E5-\u20EB\u2102\u2107\u210A-\u2113\u2115\u2118\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212E\u212F-\u2131\u2133-\u2134\u2135-\u2138\u2139\u213C-\u213F\u2145-\u2149\u2160-\u2183\u2C00-\u2C2E\u2C30-\u2C5E\u2C80-\u2CE4\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005\u3006\u3007\u3021-\u3029\u302A-\u302F\u3031-\u3035\u3038-\u303A\u303B\u303C\u3041-\u3096\u3099-\u309A\u309D-\u309E\u309F\u30A1-\u30FA\u30FC-\u30FE\u30FF\u3105-\u312C\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FBB\uA000-\uA014\uA015\uA016-\uA48C\uA800-\uA801\uA802\uA803-\uA805\uA806\uA807-\uA80A\uA80B\uA80C-\uA822\uA823-\uA824\uA825-\uA826\uA827\uAC00-\uD7A3\uF900-\uFA2D\uFA30-\uFA6A\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1E\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40-\uFB41\uFB43-\uFB44\uFB46-\uFBB1\uFBD3-\uFC5D\uFC64-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDF9\uFE00-\uFE0F\uFE20-\uFE23\uFE33-\uFE34\uFE4D-\uFE4F\uFE71\uFE73\uFE77\uFE79\uFE7B\uFE7D\uFE7F-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFF6F\uFF70\uFF71-\uFF9D\uFF9E-\uFF9F\uFFA0-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC' + +def allexcept(*args): + newcats = cats[:] + for arg in args: + newcats.remove(arg) + return u''.join([globals()[cat] for cat in newcats]) + +if __name__ == '__main__': + import unicodedata + + categories = {} + + f = open(__file__.rstrip('co')) + try: + content = f.read() + finally: + f.close() + + header = content[:content.find('Cc =')] + footer = content[content.find("def combine("):] + + for code in range(65535): + c = unichr(code) + cat = unicodedata.category(c) + categories.setdefault(cat, []).append(c) + + f = open(__file__, 'w') + f.write(header) + + for cat in sorted(categories): + val = u''.join(categories[cat]) + if cat == 'Cs': + # Jython can't handle isolated surrogates + f.write("""\ +try: + Cs = eval(r"%r") +except UnicodeDecodeError: + Cs = '' # Jython can't handle isolated surrogates\n\n""" % val) + else: + f.write('%s = %r\n\n' % (cat, val)) + f.write('cats = %r\n\n' % sorted(categories.keys())) + + f.write(footer) + f.close() diff --git a/libs/jinja2/bccache.py b/libs/jinja2/bccache.py new file mode 100644 index 0000000..0f7f566 --- /dev/null +++ b/libs/jinja2/bccache.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +""" + jinja2.bccache + ~~~~~~~~~~~~~~ + + This module implements the bytecode cache system Jinja is optionally + using. This is useful if you have very complex template situations and + the compiliation of all those templates slow down your application too + much. + + Situations where this is useful are often forking web applications that + are initialized on the first request. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD. +""" +from os import path, listdir +import sys +import marshal +import tempfile +import cPickle as pickle +import fnmatch +from cStringIO import StringIO +try: + from hashlib import sha1 +except ImportError: + from sha import new as sha1 +from jinja2.utils import open_if_exists + + +bc_version = 2 + +# magic version used to only change with new jinja versions. With 2.6 +# we change this to also take Python version changes into account. The +# reason for this is that Python tends to segfault if fed earlier bytecode +# versions because someone thought it would be a good idea to reuse opcodes +# or make Python incompatible with earlier versions. +bc_magic = 'j2'.encode('ascii') + \ + pickle.dumps(bc_version, 2) + \ + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) + + +class Bucket(object): + """Buckets are used to store the bytecode for one template. It's created + and initialized by the bytecode cache and passed to the loading functions. + + The buckets get an internal checksum from the cache assigned and use this + to automatically reject outdated cache material. Individual bytecode + cache subclasses don't have to care about cache invalidation. + """ + + def __init__(self, environment, key, checksum): + self.environment = environment + self.key = key + self.checksum = checksum + self.reset() + + def reset(self): + """Resets the bucket (unloads the bytecode).""" + self.code = None + + def load_bytecode(self, f): + """Loads bytecode from a file or file like object.""" + # make sure the magic header is correct + magic = f.read(len(bc_magic)) + if magic != bc_magic: + self.reset() + return + # the source code of the file changed, we need to reload + checksum = pickle.load(f) + if self.checksum != checksum: + self.reset() + return + # now load the code. Because marshal is not able to load + # from arbitrary streams we have to work around that + if isinstance(f, file): + self.code = marshal.load(f) + else: + self.code = marshal.loads(f.read()) + + def write_bytecode(self, f): + """Dump the bytecode into the file or file like object passed.""" + if self.code is None: + raise TypeError('can\'t write empty bucket') + f.write(bc_magic) + pickle.dump(self.checksum, f, 2) + if isinstance(f, file): + marshal.dump(self.code, f) + else: + f.write(marshal.dumps(self.code)) + + def bytecode_from_string(self, string): + """Load bytecode from a string.""" + self.load_bytecode(StringIO(string)) + + def bytecode_to_string(self): + """Return the bytecode as string.""" + out = StringIO() + self.write_bytecode(out) + return out.getvalue() + + +class BytecodeCache(object): + """To implement your own bytecode cache you have to subclass this class + and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of + these methods are passed a :class:`~jinja2.bccache.Bucket`. + + A very basic bytecode cache that saves the bytecode on the file system:: + + from os import path + + class MyCache(BytecodeCache): + + def __init__(self, directory): + self.directory = directory + + def load_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + if path.exists(filename): + with open(filename, 'rb') as f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + with open(filename, 'wb') as f: + bucket.write_bytecode(f) + + A more advanced version of a filesystem based bytecode cache is part of + Jinja2. + """ + + def load_bytecode(self, bucket): + """Subclasses have to override this method to load bytecode into a + bucket. If they are not able to find code in the cache for the + bucket, it must not do anything. + """ + raise NotImplementedError() + + def dump_bytecode(self, bucket): + """Subclasses have to override this method to write the bytecode + from a bucket back to the cache. If it unable to do so it must not + fail silently but raise an exception. + """ + raise NotImplementedError() + + def clear(self): + """Clears the cache. This method is not used by Jinja2 but should be + implemented to allow applications to clear the bytecode cache used + by a particular environment. + """ + + def get_cache_key(self, name, filename=None): + """Returns the unique hash key for this template name.""" + hash = sha1(name.encode('utf-8')) + if filename is not None: + if isinstance(filename, unicode): + filename = filename.encode('utf-8') + hash.update('|' + filename) + return hash.hexdigest() + + def get_source_checksum(self, source): + """Returns a checksum for the source.""" + return sha1(source.encode('utf-8')).hexdigest() + + def get_bucket(self, environment, name, filename, source): + """Return a cache bucket for the given template. All arguments are + mandatory but filename may be `None`. + """ + key = self.get_cache_key(name, filename) + checksum = self.get_source_checksum(source) + bucket = Bucket(environment, key, checksum) + self.load_bytecode(bucket) + return bucket + + def set_bucket(self, bucket): + """Put the bucket into the cache.""" + self.dump_bytecode(bucket) + + +class FileSystemBytecodeCache(BytecodeCache): + """A bytecode cache that stores bytecode on the filesystem. It accepts + two arguments: The directory where the cache items are stored and a + pattern string that is used to build the filename. + + If no directory is specified the system temporary items folder is used. + + The pattern can be used to have multiple separate caches operate on the + same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` + is replaced with the cache key. + + >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') + + This bytecode cache supports clearing of the cache using the clear method. + """ + + def __init__(self, directory=None, pattern='__jinja2_%s.cache'): + if directory is None: + directory = tempfile.gettempdir() + self.directory = directory + self.pattern = pattern + + def _get_cache_filename(self, bucket): + return path.join(self.directory, self.pattern % bucket.key) + + def load_bytecode(self, bucket): + f = open_if_exists(self._get_cache_filename(bucket), 'rb') + if f is not None: + try: + bucket.load_bytecode(f) + finally: + f.close() + + def dump_bytecode(self, bucket): + f = open(self._get_cache_filename(bucket), 'wb') + try: + bucket.write_bytecode(f) + finally: + f.close() + + def clear(self): + # imported lazily here because google app-engine doesn't support + # write access on the file system and the function does not exist + # normally. + from os import remove + files = fnmatch.filter(listdir(self.directory), self.pattern % '*') + for filename in files: + try: + remove(path.join(self.directory, filename)) + except OSError: + pass + + +class MemcachedBytecodeCache(BytecodeCache): + """This class implements a bytecode cache that uses a memcache cache for + storing the information. It does not enforce a specific memcache library + (tummy's memcache or cmemcache) but will accept any class that provides + the minimal interface required. + + Libraries compatible with this class: + + - `werkzeug `_.contrib.cache + - `python-memcached `_ + - `cmemcache `_ + + (Unfortunately the django cache interface is not compatible because it + does not support storing binary data, only unicode. You can however pass + the underlying cache client to the bytecode cache which is available + as `django.core.cache.cache._client`.) + + The minimal interface for the client passed to the constructor is this: + + .. class:: MinimalClientInterface + + .. method:: set(key, value[, timeout]) + + Stores the bytecode in the cache. `value` is a string and + `timeout` the timeout of the key. If timeout is not provided + a default timeout or no timeout should be assumed, if it's + provided it's an integer with the number of seconds the cache + item should exist. + + .. method:: get(key) + + Returns the value for the cache key. If the item does not + exist in the cache the return value must be `None`. + + The other arguments to the constructor are the prefix for all keys that + is added before the actual cache key and the timeout for the bytecode in + the cache system. We recommend a high (or no) timeout. + + This bytecode cache does not support clearing of used items in the cache. + The clear method is a no-operation function. + """ + + def __init__(self, client, prefix='jinja2/bytecode/', timeout=None): + self.client = client + self.prefix = prefix + self.timeout = timeout + + def load_bytecode(self, bucket): + code = self.client.get(self.prefix + bucket.key) + if code is not None: + bucket.bytecode_from_string(code) + + def dump_bytecode(self, bucket): + args = (self.prefix + bucket.key, bucket.bytecode_to_string()) + if self.timeout is not None: + args += (self.timeout,) + self.client.set(*args) diff --git a/libs/jinja2/compiler.py b/libs/jinja2/compiler.py new file mode 100644 index 0000000..d0aadad --- /dev/null +++ b/libs/jinja2/compiler.py @@ -0,0 +1,1652 @@ +# -*- coding: utf-8 -*- +""" + jinja2.compiler + ~~~~~~~~~~~~~~~ + + Compiles nodes into python code. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" +from cStringIO import StringIO +from itertools import chain +from copy import deepcopy +from jinja2 import nodes +from jinja2.nodes import EvalContext +from jinja2.visitor import NodeVisitor +from jinja2.exceptions import TemplateAssertionError +from jinja2.utils import Markup, concat, escape, is_python_keyword, next + + +operators = { + 'eq': '==', + 'ne': '!=', + 'gt': '>', + 'gteq': '>=', + 'lt': '<', + 'lteq': '<=', + 'in': 'in', + 'notin': 'not in' +} + +try: + exec '(0 if 0 else 0)' +except SyntaxError: + have_condexpr = False +else: + have_condexpr = True + + +# what method to iterate over items do we want to use for dict iteration +# in generated code? on 2.x let's go with iteritems, on 3.x with items +if hasattr(dict, 'iteritems'): + dict_item_iter = 'iteritems' +else: + dict_item_iter = 'items' + + +# does if 0: dummy(x) get us x into the scope? +def unoptimize_before_dead_code(): + x = 42 + def f(): + if 0: dummy(x) + return f +unoptimize_before_dead_code = bool(unoptimize_before_dead_code().func_closure) + + +def generate(node, environment, name, filename, stream=None, + defer_init=False): + """Generate the python source for a node tree.""" + if not isinstance(node, nodes.Template): + raise TypeError('Can\'t compile non template nodes') + generator = CodeGenerator(environment, name, filename, stream, defer_init) + generator.visit(node) + if stream is None: + return generator.stream.getvalue() + + +def has_safe_repr(value): + """Does the node have a safe representation?""" + if value is None or value is NotImplemented or value is Ellipsis: + return True + if isinstance(value, (bool, int, long, float, complex, basestring, + xrange, Markup)): + return True + if isinstance(value, (tuple, list, set, frozenset)): + for item in value: + if not has_safe_repr(item): + return False + return True + elif isinstance(value, dict): + for key, value in value.iteritems(): + if not has_safe_repr(key): + return False + if not has_safe_repr(value): + return False + return True + return False + + +def find_undeclared(nodes, names): + """Check if the names passed are accessed undeclared. The return value + is a set of all the undeclared names from the sequence of names found. + """ + visitor = UndeclaredNameVisitor(names) + try: + for node in nodes: + visitor.visit(node) + except VisitorExit: + pass + return visitor.undeclared + + +class Identifiers(object): + """Tracks the status of identifiers in frames.""" + + def __init__(self): + # variables that are known to be declared (probably from outer + # frames or because they are special for the frame) + self.declared = set() + + # undeclared variables from outer scopes + self.outer_undeclared = set() + + # names that are accessed without being explicitly declared by + # this one or any of the outer scopes. Names can appear both in + # declared and undeclared. + self.undeclared = set() + + # names that are declared locally + self.declared_locally = set() + + # names that are declared by parameters + self.declared_parameter = set() + + def add_special(self, name): + """Register a special name like `loop`.""" + self.undeclared.discard(name) + self.declared.add(name) + + def is_declared(self, name, local_only=False): + """Check if a name is declared in this or an outer scope.""" + if name in self.declared_locally or name in self.declared_parameter: + return True + if local_only: + return False + return name in self.declared + + def copy(self): + return deepcopy(self) + + +class Frame(object): + """Holds compile time information for us.""" + + def __init__(self, eval_ctx, parent=None): + self.eval_ctx = eval_ctx + self.identifiers = Identifiers() + + # a toplevel frame is the root + soft frames such as if conditions. + self.toplevel = False + + # the root frame is basically just the outermost frame, so no if + # conditions. This information is used to optimize inheritance + # situations. + self.rootlevel = False + + # in some dynamic inheritance situations the compiler needs to add + # write tests around output statements. + self.require_output_check = parent and parent.require_output_check + + # inside some tags we are using a buffer rather than yield statements. + # this for example affects {% filter %} or {% macro %}. If a frame + # is buffered this variable points to the name of the list used as + # buffer. + self.buffer = None + + # the name of the block we're in, otherwise None. + self.block = parent and parent.block or None + + # a set of actually assigned names + self.assigned_names = set() + + # the parent of this frame + self.parent = parent + + if parent is not None: + self.identifiers.declared.update( + parent.identifiers.declared | + parent.identifiers.declared_parameter | + parent.assigned_names + ) + self.identifiers.outer_undeclared.update( + parent.identifiers.undeclared - + self.identifiers.declared + ) + self.buffer = parent.buffer + + def copy(self): + """Create a copy of the current one.""" + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.identifiers = object.__new__(self.identifiers.__class__) + rv.identifiers.__dict__.update(self.identifiers.__dict__) + return rv + + def inspect(self, nodes, hard_scope=False): + """Walk the node and check for identifiers. If the scope is hard (eg: + enforce on a python level) overrides from outer scopes are tracked + differently. + """ + visitor = FrameIdentifierVisitor(self.identifiers, hard_scope) + for node in nodes: + visitor.visit(node) + + def find_shadowed(self, extra=()): + """Find all the shadowed names. extra is an iterable of variables + that may be defined with `add_special` which may occour scoped. + """ + i = self.identifiers + return (i.declared | i.outer_undeclared) & \ + (i.declared_locally | i.declared_parameter) | \ + set(x for x in extra if i.is_declared(x)) + + def inner(self): + """Return an inner frame.""" + return Frame(self.eval_ctx, self) + + def soft(self): + """Return a soft frame. A soft frame may not be modified as + standalone thing as it shares the resources with the frame it + was created of, but it's not a rootlevel frame any longer. + """ + rv = self.copy() + rv.rootlevel = False + return rv + + __copy__ = copy + + +class VisitorExit(RuntimeError): + """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" + + +class DependencyFinderVisitor(NodeVisitor): + """A visitor that collects filter and test calls.""" + + def __init__(self): + self.filters = set() + self.tests = set() + + def visit_Filter(self, node): + self.generic_visit(node) + self.filters.add(node.name) + + def visit_Test(self, node): + self.generic_visit(node) + self.tests.add(node.name) + + def visit_Block(self, node): + """Stop visiting at blocks.""" + + +class UndeclaredNameVisitor(NodeVisitor): + """A visitor that checks if a name is accessed without being + declared. This is different from the frame visitor as it will + not stop at closure frames. + """ + + def __init__(self, names): + self.names = set(names) + self.undeclared = set() + + def visit_Name(self, node): + if node.ctx == 'load' and node.name in self.names: + self.undeclared.add(node.name) + if self.undeclared == self.names: + raise VisitorExit() + else: + self.names.discard(node.name) + + def visit_Block(self, node): + """Stop visiting a blocks.""" + + +class FrameIdentifierVisitor(NodeVisitor): + """A visitor for `Frame.inspect`.""" + + def __init__(self, identifiers, hard_scope): + self.identifiers = identifiers + self.hard_scope = hard_scope + + def visit_Name(self, node): + """All assignments to names go through this function.""" + if node.ctx == 'store': + self.identifiers.declared_locally.add(node.name) + elif node.ctx == 'param': + self.identifiers.declared_parameter.add(node.name) + elif node.ctx == 'load' and not \ + self.identifiers.is_declared(node.name, self.hard_scope): + self.identifiers.undeclared.add(node.name) + + def visit_If(self, node): + self.visit(node.test) + real_identifiers = self.identifiers + + old_names = real_identifiers.declared_locally | \ + real_identifiers.declared_parameter + + def inner_visit(nodes): + if not nodes: + return set() + self.identifiers = real_identifiers.copy() + for subnode in nodes: + self.visit(subnode) + rv = self.identifiers.declared_locally - old_names + # we have to remember the undeclared variables of this branch + # because we will have to pull them. + real_identifiers.undeclared.update(self.identifiers.undeclared) + self.identifiers = real_identifiers + return rv + + body = inner_visit(node.body) + else_ = inner_visit(node.else_ or ()) + + # the differences between the two branches are also pulled as + # undeclared variables + real_identifiers.undeclared.update(body.symmetric_difference(else_) - + real_identifiers.declared) + + # remember those that are declared. + real_identifiers.declared_locally.update(body | else_) + + def visit_Macro(self, node): + self.identifiers.declared_locally.add(node.name) + + def visit_Import(self, node): + self.generic_visit(node) + self.identifiers.declared_locally.add(node.target) + + def visit_FromImport(self, node): + self.generic_visit(node) + for name in node.names: + if isinstance(name, tuple): + self.identifiers.declared_locally.add(name[1]) + else: + self.identifiers.declared_locally.add(name) + + def visit_Assign(self, node): + """Visit assignments in the correct order.""" + self.visit(node.node) + self.visit(node.target) + + def visit_For(self, node): + """Visiting stops at for blocks. However the block sequence + is visited as part of the outer scope. + """ + self.visit(node.iter) + + def visit_CallBlock(self, node): + self.visit(node.call) + + def visit_FilterBlock(self, node): + self.visit(node.filter) + + def visit_Scope(self, node): + """Stop visiting at scopes.""" + + def visit_Block(self, node): + """Stop visiting at blocks.""" + + +class CompilerExit(Exception): + """Raised if the compiler encountered a situation where it just + doesn't make sense to further process the code. Any block that + raises such an exception is not further processed. + """ + + +class CodeGenerator(NodeVisitor): + + def __init__(self, environment, name, filename, stream=None, + defer_init=False): + if stream is None: + stream = StringIO() + self.environment = environment + self.name = name + self.filename = filename + self.stream = stream + self.created_block_context = False + self.defer_init = defer_init + + # aliases for imports + self.import_aliases = {} + + # a registry for all blocks. Because blocks are moved out + # into the global python scope they are registered here + self.blocks = {} + + # the number of extends statements so far + self.extends_so_far = 0 + + # some templates have a rootlevel extends. In this case we + # can safely assume that we're a child template and do some + # more optimizations. + self.has_known_extends = False + + # the current line number + self.code_lineno = 1 + + # registry of all filters and tests (global, not block local) + self.tests = {} + self.filters = {} + + # the debug information + self.debug_info = [] + self._write_debug_info = None + + # the number of new lines before the next write() + self._new_lines = 0 + + # the line number of the last written statement + self._last_line = 0 + + # true if nothing was written so far. + self._first_write = True + + # used by the `temporary_identifier` method to get new + # unique, temporary identifier + self._last_identifier = 0 + + # the current indentation + self._indentation = 0 + + # -- Various compilation helpers + + def fail(self, msg, lineno): + """Fail with a :exc:`TemplateAssertionError`.""" + raise TemplateAssertionError(msg, lineno, self.name, self.filename) + + def temporary_identifier(self): + """Get a new unique identifier.""" + self._last_identifier += 1 + return 't_%d' % self._last_identifier + + def buffer(self, frame): + """Enable buffering for the frame from that point onwards.""" + frame.buffer = self.temporary_identifier() + self.writeline('%s = []' % frame.buffer) + + def return_buffer_contents(self, frame): + """Return the buffer contents of the frame.""" + if frame.eval_ctx.volatile: + self.writeline('if context.eval_ctx.autoescape:') + self.indent() + self.writeline('return Markup(concat(%s))' % frame.buffer) + self.outdent() + self.writeline('else:') + self.indent() + self.writeline('return concat(%s)' % frame.buffer) + self.outdent() + elif frame.eval_ctx.autoescape: + self.writeline('return Markup(concat(%s))' % frame.buffer) + else: + self.writeline('return concat(%s)' % frame.buffer) + + def indent(self): + """Indent by one.""" + self._indentation += 1 + + def outdent(self, step=1): + """Outdent by step.""" + self._indentation -= step + + def start_write(self, frame, node=None): + """Yield or write into the frame buffer.""" + if frame.buffer is None: + self.writeline('yield ', node) + else: + self.writeline('%s.append(' % frame.buffer, node) + + def end_write(self, frame): + """End the writing process started by `start_write`.""" + if frame.buffer is not None: + self.write(')') + + def simple_write(self, s, frame, node=None): + """Simple shortcut for start_write + write + end_write.""" + self.start_write(frame, node) + self.write(s) + self.end_write(frame) + + def blockvisit(self, nodes, frame): + """Visit a list of nodes as block in a frame. If the current frame + is no buffer a dummy ``if 0: yield None`` is written automatically + unless the force_generator parameter is set to False. + """ + if frame.buffer is None: + self.writeline('if 0: yield None') + else: + self.writeline('pass') + try: + for node in nodes: + self.visit(node, frame) + except CompilerExit: + pass + + def write(self, x): + """Write a string into the output stream.""" + if self._new_lines: + if not self._first_write: + self.stream.write('\n' * self._new_lines) + self.code_lineno += self._new_lines + if self._write_debug_info is not None: + self.debug_info.append((self._write_debug_info, + self.code_lineno)) + self._write_debug_info = None + self._first_write = False + self.stream.write(' ' * self._indentation) + self._new_lines = 0 + self.stream.write(x) + + def writeline(self, x, node=None, extra=0): + """Combination of newline and write.""" + self.newline(node, extra) + self.write(x) + + def newline(self, node=None, extra=0): + """Add one or more newlines before the next write.""" + self._new_lines = max(self._new_lines, 1 + extra) + if node is not None and node.lineno != self._last_line: + self._write_debug_info = node.lineno + self._last_line = node.lineno + + def signature(self, node, frame, extra_kwargs=None): + """Writes a function call to the stream for the current node. + A leading comma is added automatically. The extra keyword + arguments may not include python keywords otherwise a syntax + error could occour. The extra keyword arguments should be given + as python dict. + """ + # if any of the given keyword arguments is a python keyword + # we have to make sure that no invalid call is created. + kwarg_workaround = False + for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): + if is_python_keyword(kwarg): + kwarg_workaround = True + break + + for arg in node.args: + self.write(', ') + self.visit(arg, frame) + + if not kwarg_workaround: + for kwarg in node.kwargs: + self.write(', ') + self.visit(kwarg, frame) + if extra_kwargs is not None: + for key, value in extra_kwargs.iteritems(): + self.write(', %s=%s' % (key, value)) + if node.dyn_args: + self.write(', *') + self.visit(node.dyn_args, frame) + + if kwarg_workaround: + if node.dyn_kwargs is not None: + self.write(', **dict({') + else: + self.write(', **{') + for kwarg in node.kwargs: + self.write('%r: ' % kwarg.key) + self.visit(kwarg.value, frame) + self.write(', ') + if extra_kwargs is not None: + for key, value in extra_kwargs.iteritems(): + self.write('%r: %s, ' % (key, value)) + if node.dyn_kwargs is not None: + self.write('}, **') + self.visit(node.dyn_kwargs, frame) + self.write(')') + else: + self.write('}') + + elif node.dyn_kwargs is not None: + self.write(', **') + self.visit(node.dyn_kwargs, frame) + + def pull_locals(self, frame): + """Pull all the references identifiers into the local scope.""" + for name in frame.identifiers.undeclared: + self.writeline('l_%s = context.resolve(%r)' % (name, name)) + + def pull_dependencies(self, nodes): + """Pull all the dependencies.""" + visitor = DependencyFinderVisitor() + for node in nodes: + visitor.visit(node) + for dependency in 'filters', 'tests': + mapping = getattr(self, dependency) + for name in getattr(visitor, dependency): + if name not in mapping: + mapping[name] = self.temporary_identifier() + self.writeline('%s = environment.%s[%r]' % + (mapping[name], dependency, name)) + + def unoptimize_scope(self, frame): + """Disable Python optimizations for the frame.""" + # XXX: this is not that nice but it has no real overhead. It + # mainly works because python finds the locals before dead code + # is removed. If that breaks we have to add a dummy function + # that just accepts the arguments and does nothing. + if frame.identifiers.declared: + self.writeline('%sdummy(%s)' % ( + unoptimize_before_dead_code and 'if 0: ' or '', + ', '.join('l_' + name for name in frame.identifiers.declared) + )) + + def push_scope(self, frame, extra_vars=()): + """This function returns all the shadowed variables in a dict + in the form name: alias and will write the required assignments + into the current scope. No indentation takes place. + + This also predefines locally declared variables from the loop + body because under some circumstances it may be the case that + + `extra_vars` is passed to `Frame.find_shadowed`. + """ + aliases = {} + for name in frame.find_shadowed(extra_vars): + aliases[name] = ident = self.temporary_identifier() + self.writeline('%s = l_%s' % (ident, name)) + to_declare = set() + for name in frame.identifiers.declared_locally: + if name not in aliases: + to_declare.add('l_' + name) + if to_declare: + self.writeline(' = '.join(to_declare) + ' = missing') + return aliases + + def pop_scope(self, aliases, frame): + """Restore all aliases and delete unused variables.""" + for name, alias in aliases.iteritems(): + self.writeline('l_%s = %s' % (name, alias)) + to_delete = set() + for name in frame.identifiers.declared_locally: + if name not in aliases: + to_delete.add('l_' + name) + if to_delete: + # we cannot use the del statement here because enclosed + # scopes can trigger a SyntaxError: + # a = 42; b = lambda: a; del a + self.writeline(' = '.join(to_delete) + ' = missing') + + def function_scoping(self, node, frame, children=None, + find_special=True): + """In Jinja a few statements require the help of anonymous + functions. Those are currently macros and call blocks and in + the future also recursive loops. As there is currently + technical limitation that doesn't allow reading and writing a + variable in a scope where the initial value is coming from an + outer scope, this function tries to fall back with a common + error message. Additionally the frame passed is modified so + that the argumetns are collected and callers are looked up. + + This will return the modified frame. + """ + # we have to iterate twice over it, make sure that works + if children is None: + children = node.iter_child_nodes() + children = list(children) + func_frame = frame.inner() + func_frame.inspect(children, hard_scope=True) + + # variables that are undeclared (accessed before declaration) and + # declared locally *and* part of an outside scope raise a template + # assertion error. Reason: we can't generate reasonable code from + # it without aliasing all the variables. + # this could be fixed in Python 3 where we have the nonlocal + # keyword or if we switch to bytecode generation + overriden_closure_vars = ( + func_frame.identifiers.undeclared & + func_frame.identifiers.declared & + (func_frame.identifiers.declared_locally | + func_frame.identifiers.declared_parameter) + ) + if overriden_closure_vars: + self.fail('It\'s not possible to set and access variables ' + 'derived from an outer scope! (affects: %s)' % + ', '.join(sorted(overriden_closure_vars)), node.lineno) + + # remove variables from a closure from the frame's undeclared + # identifiers. + func_frame.identifiers.undeclared -= ( + func_frame.identifiers.undeclared & + func_frame.identifiers.declared + ) + + # no special variables for this scope, abort early + if not find_special: + return func_frame + + func_frame.accesses_kwargs = False + func_frame.accesses_varargs = False + func_frame.accesses_caller = False + func_frame.arguments = args = ['l_' + x.name for x in node.args] + + undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs')) + + if 'caller' in undeclared: + func_frame.accesses_caller = True + func_frame.identifiers.add_special('caller') + args.append('l_caller') + if 'kwargs' in undeclared: + func_frame.accesses_kwargs = True + func_frame.identifiers.add_special('kwargs') + args.append('l_kwargs') + if 'varargs' in undeclared: + func_frame.accesses_varargs = True + func_frame.identifiers.add_special('varargs') + args.append('l_varargs') + return func_frame + + def macro_body(self, node, frame, children=None): + """Dump the function def of a macro or call block.""" + frame = self.function_scoping(node, frame, children) + # macros are delayed, they never require output checks + frame.require_output_check = False + args = frame.arguments + # XXX: this is an ugly fix for the loop nesting bug + # (tests.test_old_bugs.test_loop_call_bug). This works around + # a identifier nesting problem we have in general. It's just more + # likely to happen in loops which is why we work around it. The + # real solution would be "nonlocal" all the identifiers that are + # leaking into a new python frame and might be used both unassigned + # and assigned. + if 'loop' in frame.identifiers.declared: + args = args + ['l_loop=l_loop'] + self.writeline('def macro(%s):' % ', '.join(args), node) + self.indent() + self.buffer(frame) + self.pull_locals(frame) + self.blockvisit(node.body, frame) + self.return_buffer_contents(frame) + self.outdent() + return frame + + def macro_def(self, node, frame): + """Dump the macro definition for the def created by macro_body.""" + arg_tuple = ', '.join(repr(x.name) for x in node.args) + name = getattr(node, 'name', None) + if len(node.args) == 1: + arg_tuple += ',' + self.write('Macro(environment, macro, %r, (%s), (' % + (name, arg_tuple)) + for arg in node.defaults: + self.visit(arg, frame) + self.write(', ') + self.write('), %r, %r, %r)' % ( + bool(frame.accesses_kwargs), + bool(frame.accesses_varargs), + bool(frame.accesses_caller) + )) + + def position(self, node): + """Return a human readable position for the node.""" + rv = 'line %d' % node.lineno + if self.name is not None: + rv += ' in ' + repr(self.name) + return rv + + # -- Statement Visitors + + def visit_Template(self, node, frame=None): + assert frame is None, 'no root frame allowed' + eval_ctx = EvalContext(self.environment, self.name) + + from jinja2.runtime import __all__ as exported + self.writeline('from __future__ import division') + self.writeline('from jinja2.runtime import ' + ', '.join(exported)) + if not unoptimize_before_dead_code: + self.writeline('dummy = lambda *x: None') + + # if we want a deferred initialization we cannot move the + # environment into a local name + envenv = not self.defer_init and ', environment=environment' or '' + + # do we have an extends tag at all? If not, we can save some + # overhead by just not processing any inheritance code. + have_extends = node.find(nodes.Extends) is not None + + # find all blocks + for block in node.find_all(nodes.Block): + if block.name in self.blocks: + self.fail('block %r defined twice' % block.name, block.lineno) + self.blocks[block.name] = block + + # find all imports and import them + for import_ in node.find_all(nodes.ImportedName): + if import_.importname not in self.import_aliases: + imp = import_.importname + self.import_aliases[imp] = alias = self.temporary_identifier() + if '.' in imp: + module, obj = imp.rsplit('.', 1) + self.writeline('from %s import %s as %s' % + (module, obj, alias)) + else: + self.writeline('import %s as %s' % (imp, alias)) + + # add the load name + self.writeline('name = %r' % self.name) + + # generate the root render function. + self.writeline('def root(context%s):' % envenv, extra=1) + + # process the root + frame = Frame(eval_ctx) + frame.inspect(node.body) + frame.toplevel = frame.rootlevel = True + frame.require_output_check = have_extends and not self.has_known_extends + self.indent() + if have_extends: + self.writeline('parent_template = None') + if 'self' in find_undeclared(node.body, ('self',)): + frame.identifiers.add_special('self') + self.writeline('l_self = TemplateReference(context)') + self.pull_locals(frame) + self.pull_dependencies(node.body) + self.blockvisit(node.body, frame) + self.outdent() + + # make sure that the parent root is called. + if have_extends: + if not self.has_known_extends: + self.indent() + self.writeline('if parent_template is not None:') + self.indent() + self.writeline('for event in parent_template.' + 'root_render_func(context):') + self.indent() + self.writeline('yield event') + self.outdent(2 + (not self.has_known_extends)) + + # at this point we now have the blocks collected and can visit them too. + for name, block in self.blocks.iteritems(): + block_frame = Frame(eval_ctx) + block_frame.inspect(block.body) + block_frame.block = name + self.writeline('def block_%s(context%s):' % (name, envenv), + block, 1) + self.indent() + undeclared = find_undeclared(block.body, ('self', 'super')) + if 'self' in undeclared: + block_frame.identifiers.add_special('self') + self.writeline('l_self = TemplateReference(context)') + if 'super' in undeclared: + block_frame.identifiers.add_special('super') + self.writeline('l_super = context.super(%r, ' + 'block_%s)' % (name, name)) + self.pull_locals(block_frame) + self.pull_dependencies(block.body) + self.blockvisit(block.body, block_frame) + self.outdent() + + self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x) + for x in self.blocks), + extra=1) + + # add a function that returns the debug info + self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x + in self.debug_info)) + + def visit_Block(self, node, frame): + """Call a block and register it for the template.""" + level = 1 + if frame.toplevel: + # if we know that we are a child template, there is no need to + # check if we are one + if self.has_known_extends: + return + if self.extends_so_far > 0: + self.writeline('if parent_template is None:') + self.indent() + level += 1 + context = node.scoped and 'context.derived(locals())' or 'context' + self.writeline('for event in context.blocks[%r][0](%s):' % ( + node.name, context), node) + self.indent() + self.simple_write('event', frame) + self.outdent(level) + + def visit_Extends(self, node, frame): + """Calls the extender.""" + if not frame.toplevel: + self.fail('cannot use extend from a non top-level scope', + node.lineno) + + # if the number of extends statements in general is zero so + # far, we don't have to add a check if something extended + # the template before this one. + if self.extends_so_far > 0: + + # if we have a known extends we just add a template runtime + # error into the generated code. We could catch that at compile + # time too, but i welcome it not to confuse users by throwing the + # same error at different times just "because we can". + if not self.has_known_extends: + self.writeline('if parent_template is not None:') + self.indent() + self.writeline('raise TemplateRuntimeError(%r)' % + 'extended multiple times') + self.outdent() + + # if we have a known extends already we don't need that code here + # as we know that the template execution will end here. + if self.has_known_extends: + raise CompilerExit() + + self.writeline('parent_template = environment.get_template(', node) + self.visit(node.template, frame) + self.write(', %r)' % self.name) + self.writeline('for name, parent_block in parent_template.' + 'blocks.%s():' % dict_item_iter) + self.indent() + self.writeline('context.blocks.setdefault(name, []).' + 'append(parent_block)') + self.outdent() + + # if this extends statement was in the root level we can take + # advantage of that information and simplify the generated code + # in the top level from this point onwards + if frame.rootlevel: + self.has_known_extends = True + + # and now we have one more + self.extends_so_far += 1 + + def visit_Include(self, node, frame): + """Handles includes.""" + if node.with_context: + self.unoptimize_scope(frame) + if node.ignore_missing: + self.writeline('try:') + self.indent() + + func_name = 'get_or_select_template' + if isinstance(node.template, nodes.Const): + if isinstance(node.template.value, basestring): + func_name = 'get_template' + elif isinstance(node.template.value, (tuple, list)): + func_name = 'select_template' + elif isinstance(node.template, (nodes.Tuple, nodes.List)): + func_name = 'select_template' + + self.writeline('template = environment.%s(' % func_name, node) + self.visit(node.template, frame) + self.write(', %r)' % self.name) + if node.ignore_missing: + self.outdent() + self.writeline('except TemplateNotFound:') + self.indent() + self.writeline('pass') + self.outdent() + self.writeline('else:') + self.indent() + + if node.with_context: + self.writeline('for event in template.root_render_func(' + 'template.new_context(context.parent, True, ' + 'locals())):') + else: + self.writeline('for event in template.module._body_stream:') + + self.indent() + self.simple_write('event', frame) + self.outdent() + + if node.ignore_missing: + self.outdent() + + def visit_Import(self, node, frame): + """Visit regular imports.""" + if node.with_context: + self.unoptimize_scope(frame) + self.writeline('l_%s = ' % node.target, node) + if frame.toplevel: + self.write('context.vars[%r] = ' % node.target) + self.write('environment.get_template(') + self.visit(node.template, frame) + self.write(', %r).' % self.name) + if node.with_context: + self.write('make_module(context.parent, True, locals())') + else: + self.write('module') + if frame.toplevel and not node.target.startswith('_'): + self.writeline('context.exported_vars.discard(%r)' % node.target) + frame.assigned_names.add(node.target) + + def visit_FromImport(self, node, frame): + """Visit named imports.""" + self.newline(node) + self.write('included_template = environment.get_template(') + self.visit(node.template, frame) + self.write(', %r).' % self.name) + if node.with_context: + self.write('make_module(context.parent, True)') + else: + self.write('module') + + var_names = [] + discarded_names = [] + for name in node.names: + if isinstance(name, tuple): + name, alias = name + else: + alias = name + self.writeline('l_%s = getattr(included_template, ' + '%r, missing)' % (alias, name)) + self.writeline('if l_%s is missing:' % alias) + self.indent() + self.writeline('l_%s = environment.undefined(%r %% ' + 'included_template.__name__, ' + 'name=%r)' % + (alias, 'the template %%r (imported on %s) does ' + 'not export the requested name %s' % ( + self.position(node), + repr(name) + ), name)) + self.outdent() + if frame.toplevel: + var_names.append(alias) + if not alias.startswith('_'): + discarded_names.append(alias) + frame.assigned_names.add(alias) + + if var_names: + if len(var_names) == 1: + name = var_names[0] + self.writeline('context.vars[%r] = l_%s' % (name, name)) + else: + self.writeline('context.vars.update({%s})' % ', '.join( + '%r: l_%s' % (name, name) for name in var_names + )) + if discarded_names: + if len(discarded_names) == 1: + self.writeline('context.exported_vars.discard(%r)' % + discarded_names[0]) + else: + self.writeline('context.exported_vars.difference_' + 'update((%s))' % ', '.join(map(repr, discarded_names))) + + def visit_For(self, node, frame): + # when calculating the nodes for the inner frame we have to exclude + # the iterator contents from it + children = node.iter_child_nodes(exclude=('iter',)) + if node.recursive: + loop_frame = self.function_scoping(node, frame, children, + find_special=False) + else: + loop_frame = frame.inner() + loop_frame.inspect(children) + + # try to figure out if we have an extended loop. An extended loop + # is necessary if the loop is in recursive mode if the special loop + # variable is accessed in the body. + extended_loop = node.recursive or 'loop' in \ + find_undeclared(node.iter_child_nodes( + only=('body',)), ('loop',)) + + # if we don't have an recursive loop we have to find the shadowed + # variables at that point. Because loops can be nested but the loop + # variable is a special one we have to enforce aliasing for it. + if not node.recursive: + aliases = self.push_scope(loop_frame, ('loop',)) + + # otherwise we set up a buffer and add a function def + else: + self.writeline('def loop(reciter, loop_render_func):', node) + self.indent() + self.buffer(loop_frame) + aliases = {} + + # make sure the loop variable is a special one and raise a template + # assertion error if a loop tries to write to loop + if extended_loop: + loop_frame.identifiers.add_special('loop') + for name in node.find_all(nodes.Name): + if name.ctx == 'store' and name.name == 'loop': + self.fail('Can\'t assign to special loop variable ' + 'in for-loop target', name.lineno) + + self.pull_locals(loop_frame) + if node.else_: + iteration_indicator = self.temporary_identifier() + self.writeline('%s = 1' % iteration_indicator) + + # Create a fake parent loop if the else or test section of a + # loop is accessing the special loop variable and no parent loop + # exists. + if 'loop' not in aliases and 'loop' in find_undeclared( + node.iter_child_nodes(only=('else_', 'test')), ('loop',)): + self.writeline("l_loop = environment.undefined(%r, name='loop')" % + ("'loop' is undefined. the filter section of a loop as well " + "as the else block don't have access to the special 'loop'" + " variable of the current loop. Because there is no parent " + "loop it's undefined. Happened in loop on %s" % + self.position(node))) + + self.writeline('for ', node) + self.visit(node.target, loop_frame) + self.write(extended_loop and ', l_loop in LoopContext(' or ' in ') + + # if we have an extened loop and a node test, we filter in the + # "outer frame". + if extended_loop and node.test is not None: + self.write('(') + self.visit(node.target, loop_frame) + self.write(' for ') + self.visit(node.target, loop_frame) + self.write(' in ') + if node.recursive: + self.write('reciter') + else: + self.visit(node.iter, loop_frame) + self.write(' if (') + test_frame = loop_frame.copy() + self.visit(node.test, test_frame) + self.write('))') + + elif node.recursive: + self.write('reciter') + else: + self.visit(node.iter, loop_frame) + + if node.recursive: + self.write(', recurse=loop_render_func):') + else: + self.write(extended_loop and '):' or ':') + + # tests in not extended loops become a continue + if not extended_loop and node.test is not None: + self.indent() + self.writeline('if not ') + self.visit(node.test, loop_frame) + self.write(':') + self.indent() + self.writeline('continue') + self.outdent(2) + + self.indent() + self.blockvisit(node.body, loop_frame) + if node.else_: + self.writeline('%s = 0' % iteration_indicator) + self.outdent() + + if node.else_: + self.writeline('if %s:' % iteration_indicator) + self.indent() + self.blockvisit(node.else_, loop_frame) + self.outdent() + + # reset the aliases if there are any. + if not node.recursive: + self.pop_scope(aliases, loop_frame) + + # if the node was recursive we have to return the buffer contents + # and start the iteration code + if node.recursive: + self.return_buffer_contents(loop_frame) + self.outdent() + self.start_write(frame, node) + self.write('loop(') + self.visit(node.iter, frame) + self.write(', loop)') + self.end_write(frame) + + def visit_If(self, node, frame): + if_frame = frame.soft() + self.writeline('if ', node) + self.visit(node.test, if_frame) + self.write(':') + self.indent() + self.blockvisit(node.body, if_frame) + self.outdent() + if node.else_: + self.writeline('else:') + self.indent() + self.blockvisit(node.else_, if_frame) + self.outdent() + + def visit_Macro(self, node, frame): + macro_frame = self.macro_body(node, frame) + self.newline() + if frame.toplevel: + if not node.name.startswith('_'): + self.write('context.exported_vars.add(%r)' % node.name) + self.writeline('context.vars[%r] = ' % node.name) + self.write('l_%s = ' % node.name) + self.macro_def(node, macro_frame) + frame.assigned_names.add(node.name) + + def visit_CallBlock(self, node, frame): + children = node.iter_child_nodes(exclude=('call',)) + call_frame = self.macro_body(node, frame, children) + self.writeline('caller = ') + self.macro_def(node, call_frame) + self.start_write(frame, node) + self.visit_Call(node.call, call_frame, forward_caller=True) + self.end_write(frame) + + def visit_FilterBlock(self, node, frame): + filter_frame = frame.inner() + filter_frame.inspect(node.iter_child_nodes()) + aliases = self.push_scope(filter_frame) + self.pull_locals(filter_frame) + self.buffer(filter_frame) + self.blockvisit(node.body, filter_frame) + self.start_write(frame, node) + self.visit_Filter(node.filter, filter_frame) + self.end_write(frame) + self.pop_scope(aliases, filter_frame) + + def visit_ExprStmt(self, node, frame): + self.newline(node) + self.visit(node.node, frame) + + def visit_Output(self, node, frame): + # if we have a known extends statement, we don't output anything + # if we are in a require_output_check section + if self.has_known_extends and frame.require_output_check: + return + + if self.environment.finalize: + finalize = lambda x: unicode(self.environment.finalize(x)) + else: + finalize = unicode + + # if we are inside a frame that requires output checking, we do so + outdent_later = False + if frame.require_output_check: + self.writeline('if parent_template is None:') + self.indent() + outdent_later = True + + # try to evaluate as many chunks as possible into a static + # string at compile time. + body = [] + for child in node.nodes: + try: + const = child.as_const(frame.eval_ctx) + except nodes.Impossible: + body.append(child) + continue + # the frame can't be volatile here, becaus otherwise the + # as_const() function would raise an Impossible exception + # at that point. + try: + if frame.eval_ctx.autoescape: + if hasattr(const, '__html__'): + const = const.__html__() + else: + const = escape(const) + const = finalize(const) + except Exception: + # if something goes wrong here we evaluate the node + # at runtime for easier debugging + body.append(child) + continue + if body and isinstance(body[-1], list): + body[-1].append(const) + else: + body.append([const]) + + # if we have less than 3 nodes or a buffer we yield or extend/append + if len(body) < 3 or frame.buffer is not None: + if frame.buffer is not None: + # for one item we append, for more we extend + if len(body) == 1: + self.writeline('%s.append(' % frame.buffer) + else: + self.writeline('%s.extend((' % frame.buffer) + self.indent() + for item in body: + if isinstance(item, list): + val = repr(concat(item)) + if frame.buffer is None: + self.writeline('yield ' + val) + else: + self.writeline(val + ', ') + else: + if frame.buffer is None: + self.writeline('yield ', item) + else: + self.newline(item) + close = 1 + if frame.eval_ctx.volatile: + self.write('(context.eval_ctx.autoescape and' + ' escape or to_string)(') + elif frame.eval_ctx.autoescape: + self.write('escape(') + else: + self.write('to_string(') + if self.environment.finalize is not None: + self.write('environment.finalize(') + close += 1 + self.visit(item, frame) + self.write(')' * close) + if frame.buffer is not None: + self.write(', ') + if frame.buffer is not None: + # close the open parentheses + self.outdent() + self.writeline(len(body) == 1 and ')' or '))') + + # otherwise we create a format string as this is faster in that case + else: + format = [] + arguments = [] + for item in body: + if isinstance(item, list): + format.append(concat(item).replace('%', '%%')) + else: + format.append('%s') + arguments.append(item) + self.writeline('yield ') + self.write(repr(concat(format)) + ' % (') + idx = -1 + self.indent() + for argument in arguments: + self.newline(argument) + close = 0 + if frame.eval_ctx.volatile: + self.write('(context.eval_ctx.autoescape and' + ' escape or to_string)(') + close += 1 + elif frame.eval_ctx.autoescape: + self.write('escape(') + close += 1 + if self.environment.finalize is not None: + self.write('environment.finalize(') + close += 1 + self.visit(argument, frame) + self.write(')' * close + ', ') + self.outdent() + self.writeline(')') + + if outdent_later: + self.outdent() + + def visit_Assign(self, node, frame): + self.newline(node) + # toplevel assignments however go into the local namespace and + # the current template's context. We create a copy of the frame + # here and add a set so that the Name visitor can add the assigned + # names here. + if frame.toplevel: + assignment_frame = frame.copy() + assignment_frame.toplevel_assignments = set() + else: + assignment_frame = frame + self.visit(node.target, assignment_frame) + self.write(' = ') + self.visit(node.node, frame) + + # make sure toplevel assignments are added to the context. + if frame.toplevel: + public_names = [x for x in assignment_frame.toplevel_assignments + if not x.startswith('_')] + if len(assignment_frame.toplevel_assignments) == 1: + name = next(iter(assignment_frame.toplevel_assignments)) + self.writeline('context.vars[%r] = l_%s' % (name, name)) + else: + self.writeline('context.vars.update({') + for idx, name in enumerate(assignment_frame.toplevel_assignments): + if idx: + self.write(', ') + self.write('%r: l_%s' % (name, name)) + self.write('})') + if public_names: + if len(public_names) == 1: + self.writeline('context.exported_vars.add(%r)' % + public_names[0]) + else: + self.writeline('context.exported_vars.update((%s))' % + ', '.join(map(repr, public_names))) + + # -- Expression Visitors + + def visit_Name(self, node, frame): + if node.ctx == 'store' and frame.toplevel: + frame.toplevel_assignments.add(node.name) + self.write('l_' + node.name) + frame.assigned_names.add(node.name) + + def visit_Const(self, node, frame): + val = node.value + if isinstance(val, float): + self.write(str(val)) + else: + self.write(repr(val)) + + def visit_TemplateData(self, node, frame): + try: + self.write(repr(node.as_const(frame.eval_ctx))) + except nodes.Impossible: + self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)' + % node.data) + + def visit_Tuple(self, node, frame): + self.write('(') + idx = -1 + for idx, item in enumerate(node.items): + if idx: + self.write(', ') + self.visit(item, frame) + self.write(idx == 0 and ',)' or ')') + + def visit_List(self, node, frame): + self.write('[') + for idx, item in enumerate(node.items): + if idx: + self.write(', ') + self.visit(item, frame) + self.write(']') + + def visit_Dict(self, node, frame): + self.write('{') + for idx, item in enumerate(node.items): + if idx: + self.write(', ') + self.visit(item.key, frame) + self.write(': ') + self.visit(item.value, frame) + self.write('}') + + def binop(operator, interceptable=True): + def visitor(self, node, frame): + if self.environment.sandboxed and \ + operator in self.environment.intercepted_binops: + self.write('environment.call_binop(context, %r, ' % operator) + self.visit(node.left, frame) + self.write(', ') + self.visit(node.right, frame) + else: + self.write('(') + self.visit(node.left, frame) + self.write(' %s ' % operator) + self.visit(node.right, frame) + self.write(')') + return visitor + + def uaop(operator, interceptable=True): + def visitor(self, node, frame): + if self.environment.sandboxed and \ + operator in self.environment.intercepted_unops: + self.write('environment.call_unop(context, %r, ' % operator) + self.visit(node.node, frame) + else: + self.write('(' + operator) + self.visit(node.node, frame) + self.write(')') + return visitor + + visit_Add = binop('+') + visit_Sub = binop('-') + visit_Mul = binop('*') + visit_Div = binop('/') + visit_FloorDiv = binop('//') + visit_Pow = binop('**') + visit_Mod = binop('%') + visit_And = binop('and', interceptable=False) + visit_Or = binop('or', interceptable=False) + visit_Pos = uaop('+') + visit_Neg = uaop('-') + visit_Not = uaop('not ', interceptable=False) + del binop, uaop + + def visit_Concat(self, node, frame): + if frame.eval_ctx.volatile: + func_name = '(context.eval_ctx.volatile and' \ + ' markup_join or unicode_join)' + elif frame.eval_ctx.autoescape: + func_name = 'markup_join' + else: + func_name = 'unicode_join' + self.write('%s((' % func_name) + for arg in node.nodes: + self.visit(arg, frame) + self.write(', ') + self.write('))') + + def visit_Compare(self, node, frame): + self.visit(node.expr, frame) + for op in node.ops: + self.visit(op, frame) + + def visit_Operand(self, node, frame): + self.write(' %s ' % operators[node.op]) + self.visit(node.expr, frame) + + def visit_Getattr(self, node, frame): + self.write('environment.getattr(') + self.visit(node.node, frame) + self.write(', %r)' % node.attr) + + def visit_Getitem(self, node, frame): + # slices bypass the environment getitem method. + if isinstance(node.arg, nodes.Slice): + self.visit(node.node, frame) + self.write('[') + self.visit(node.arg, frame) + self.write(']') + else: + self.write('environment.getitem(') + self.visit(node.node, frame) + self.write(', ') + self.visit(node.arg, frame) + self.write(')') + + def visit_Slice(self, node, frame): + if node.start is not None: + self.visit(node.start, frame) + self.write(':') + if node.stop is not None: + self.visit(node.stop, frame) + if node.step is not None: + self.write(':') + self.visit(node.step, frame) + + def visit_Filter(self, node, frame): + self.write(self.filters[node.name] + '(') + func = self.environment.filters.get(node.name) + if func is None: + self.fail('no filter named %r' % node.name, node.lineno) + if getattr(func, 'contextfilter', False): + self.write('context, ') + elif getattr(func, 'evalcontextfilter', False): + self.write('context.eval_ctx, ') + elif getattr(func, 'environmentfilter', False): + self.write('environment, ') + + # if the filter node is None we are inside a filter block + # and want to write to the current buffer + if node.node is not None: + self.visit(node.node, frame) + elif frame.eval_ctx.volatile: + self.write('(context.eval_ctx.autoescape and' + ' Markup(concat(%s)) or concat(%s))' % + (frame.buffer, frame.buffer)) + elif frame.eval_ctx.autoescape: + self.write('Markup(concat(%s))' % frame.buffer) + else: + self.write('concat(%s)' % frame.buffer) + self.signature(node, frame) + self.write(')') + + def visit_Test(self, node, frame): + self.write(self.tests[node.name] + '(') + if node.name not in self.environment.tests: + self.fail('no test named %r' % node.name, node.lineno) + self.visit(node.node, frame) + self.signature(node, frame) + self.write(')') + + def visit_CondExpr(self, node, frame): + def write_expr2(): + if node.expr2 is not None: + return self.visit(node.expr2, frame) + self.write('environment.undefined(%r)' % ('the inline if-' + 'expression on %s evaluated to false and ' + 'no else section was defined.' % self.position(node))) + + if not have_condexpr: + self.write('((') + self.visit(node.test, frame) + self.write(') and (') + self.visit(node.expr1, frame) + self.write(',) or (') + write_expr2() + self.write(',))[0]') + else: + self.write('(') + self.visit(node.expr1, frame) + self.write(' if ') + self.visit(node.test, frame) + self.write(' else ') + write_expr2() + self.write(')') + + def visit_Call(self, node, frame, forward_caller=False): + if self.environment.sandboxed: + self.write('environment.call(context, ') + else: + self.write('context.call(') + self.visit(node.node, frame) + extra_kwargs = forward_caller and {'caller': 'caller'} or None + self.signature(node, frame, extra_kwargs) + self.write(')') + + def visit_Keyword(self, node, frame): + self.write(node.key + '=') + self.visit(node.value, frame) + + # -- Unused nodes for extensions + + def visit_MarkSafe(self, node, frame): + self.write('Markup(') + self.visit(node.expr, frame) + self.write(')') + + def visit_MarkSafeIfAutoescape(self, node, frame): + self.write('(context.eval_ctx.autoescape and Markup or identity)(') + self.visit(node.expr, frame) + self.write(')') + + def visit_EnvironmentAttribute(self, node, frame): + self.write('environment.' + node.name) + + def visit_ExtensionAttribute(self, node, frame): + self.write('environment.extensions[%r].%s' % (node.identifier, node.name)) + + def visit_ImportedName(self, node, frame): + self.write(self.import_aliases[node.importname]) + + def visit_InternalName(self, node, frame): + self.write(node.name) + + def visit_ContextReference(self, node, frame): + self.write('context') + + def visit_Continue(self, node, frame): + self.writeline('continue', node) + + def visit_Break(self, node, frame): + self.writeline('break', node) + + def visit_Scope(self, node, frame): + scope_frame = frame.inner() + scope_frame.inspect(node.iter_child_nodes()) + aliases = self.push_scope(scope_frame) + self.pull_locals(scope_frame) + self.blockvisit(node.body, scope_frame) + self.pop_scope(aliases, scope_frame) + + def visit_EvalContextModifier(self, node, frame): + for keyword in node.options: + self.writeline('context.eval_ctx.%s = ' % keyword.key) + self.visit(keyword.value, frame) + try: + val = keyword.value.as_const(frame.eval_ctx) + except nodes.Impossible: + frame.eval_ctx.volatile = True + else: + setattr(frame.eval_ctx, keyword.key, val) + + def visit_ScopedEvalContextModifier(self, node, frame): + old_ctx_name = self.temporary_identifier() + safed_ctx = frame.eval_ctx.save() + self.writeline('%s = context.eval_ctx.save()' % old_ctx_name) + self.visit_EvalContextModifier(node, frame) + for child in node.body: + self.visit(child, frame) + frame.eval_ctx.revert(safed_ctx) + self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name) diff --git a/libs/jinja2/constants.py b/libs/jinja2/constants.py new file mode 100644 index 0000000..cab203c --- /dev/null +++ b/libs/jinja2/constants.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +""" + jinja.constants + ~~~~~~~~~~~~~~~ + + Various constants. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" + + +#: list of lorem ipsum words used by the lipsum() helper function +LOREM_IPSUM_WORDS = u'''\ +a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at +auctor augue bibendum blandit class commodo condimentum congue consectetuer +consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend +elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames +faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac +hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum +justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem +luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie +mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non +nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque +penatibus per pharetra phasellus placerat platea porta porttitor posuere +potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus +ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit +sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor +tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices +ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus +viverra volutpat vulputate''' diff --git a/libs/jinja2/debug.py b/libs/jinja2/debug.py new file mode 100644 index 0000000..f1cc3bc --- /dev/null +++ b/libs/jinja2/debug.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +""" + jinja2.debug + ~~~~~~~~~~~~ + + Implements the debug interface for Jinja. This module does some pretty + ugly stuff with the Python traceback system in order to achieve tracebacks + with correct line numbers, locals and contents. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" +import sys +import traceback +from types import TracebackType +from jinja2.utils import CodeType, missing, internal_code +from jinja2.exceptions import TemplateSyntaxError + +# on pypy we can take advantage of transparent proxies +try: + from __pypy__ import tproxy +except ImportError: + tproxy = None + + +# how does the raise helper look like? +try: + exec "raise TypeError, 'foo'" +except SyntaxError: + raise_helper = 'raise __jinja_exception__[1]' +except TypeError: + raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]' + + +class TracebackFrameProxy(object): + """Proxies a traceback frame.""" + + def __init__(self, tb): + self.tb = tb + self._tb_next = None + + @property + def tb_next(self): + return self._tb_next + + def set_next(self, next): + if tb_set_next is not None: + tb_set_next(self.tb, next and next.tb or None) + self._tb_next = next + + @property + def is_jinja_frame(self): + return '__jinja_template__' in self.tb.tb_frame.f_globals + + def __getattr__(self, name): + return getattr(self.tb, name) + + +def make_frame_proxy(frame): + proxy = TracebackFrameProxy(frame) + if tproxy is None: + return proxy + def operation_handler(operation, *args, **kwargs): + if operation in ('__getattribute__', '__getattr__'): + return getattr(proxy, args[0]) + elif operation == '__setattr__': + proxy.__setattr__(*args, **kwargs) + else: + return getattr(proxy, operation)(*args, **kwargs) + return tproxy(TracebackType, operation_handler) + + +class ProcessedTraceback(object): + """Holds a Jinja preprocessed traceback for priting or reraising.""" + + def __init__(self, exc_type, exc_value, frames): + assert frames, 'no frames for this traceback?' + self.exc_type = exc_type + self.exc_value = exc_value + self.frames = frames + + # newly concatenate the frames (which are proxies) + prev_tb = None + for tb in self.frames: + if prev_tb is not None: + prev_tb.set_next(tb) + prev_tb = tb + prev_tb.set_next(None) + + def render_as_text(self, limit=None): + """Return a string with the traceback.""" + lines = traceback.format_exception(self.exc_type, self.exc_value, + self.frames[0], limit=limit) + return ''.join(lines).rstrip() + + def render_as_html(self, full=False): + """Return a unicode string with the traceback as rendered HTML.""" + from jinja2.debugrenderer import render_traceback + return u'%s\n\n' % ( + render_traceback(self, full=full), + self.render_as_text().decode('utf-8', 'replace') + ) + + @property + def is_template_syntax_error(self): + """`True` if this is a template syntax error.""" + return isinstance(self.exc_value, TemplateSyntaxError) + + @property + def exc_info(self): + """Exception info tuple with a proxy around the frame objects.""" + return self.exc_type, self.exc_value, self.frames[0] + + @property + def standard_exc_info(self): + """Standard python exc_info for re-raising""" + tb = self.frames[0] + # the frame will be an actual traceback (or transparent proxy) if + # we are on pypy or a python implementation with support for tproxy + if type(tb) is not TracebackType: + tb = tb.tb + return self.exc_type, self.exc_value, tb + + +def make_traceback(exc_info, source_hint=None): + """Creates a processed traceback object from the exc_info.""" + exc_type, exc_value, tb = exc_info + if isinstance(exc_value, TemplateSyntaxError): + exc_info = translate_syntax_error(exc_value, source_hint) + initial_skip = 0 + else: + initial_skip = 1 + return translate_exception(exc_info, initial_skip) + + +def translate_syntax_error(error, source=None): + """Rewrites a syntax error to please traceback systems.""" + error.source = source + error.translated = True + exc_info = (error.__class__, error, None) + filename = error.filename + if filename is None: + filename = '' + return fake_exc_info(exc_info, filename, error.lineno) + + +def translate_exception(exc_info, initial_skip=0): + """If passed an exc_info it will automatically rewrite the exceptions + all the way down to the correct line numbers and frames. + """ + tb = exc_info[2] + frames = [] + + # skip some internal frames if wanted + for x in xrange(initial_skip): + if tb is not None: + tb = tb.tb_next + initial_tb = tb + + while tb is not None: + # skip frames decorated with @internalcode. These are internal + # calls we can't avoid and that are useless in template debugging + # output. + if tb.tb_frame.f_code in internal_code: + tb = tb.tb_next + continue + + # save a reference to the next frame if we override the current + # one with a faked one. + next = tb.tb_next + + # fake template exceptions + template = tb.tb_frame.f_globals.get('__jinja_template__') + if template is not None: + lineno = template.get_corresponding_lineno(tb.tb_lineno) + tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, + lineno)[2] + + frames.append(make_frame_proxy(tb)) + tb = next + + # if we don't have any exceptions in the frames left, we have to + # reraise it unchanged. + # XXX: can we backup here? when could this happen? + if not frames: + raise exc_info[0], exc_info[1], exc_info[2] + + return ProcessedTraceback(exc_info[0], exc_info[1], frames) + + +def fake_exc_info(exc_info, filename, lineno): + """Helper for `translate_exception`.""" + exc_type, exc_value, tb = exc_info + + # figure the real context out + if tb is not None: + real_locals = tb.tb_frame.f_locals.copy() + ctx = real_locals.get('context') + if ctx: + locals = ctx.get_all() + else: + locals = {} + for name, value in real_locals.iteritems(): + if name.startswith('l_') and value is not missing: + locals[name[2:]] = value + + # if there is a local called __jinja_exception__, we get + # rid of it to not break the debug functionality. + locals.pop('__jinja_exception__', None) + else: + locals = {} + + # assamble fake globals we need + globals = { + '__name__': filename, + '__file__': filename, + '__jinja_exception__': exc_info[:2], + + # we don't want to keep the reference to the template around + # to not cause circular dependencies, but we mark it as Jinja + # frame for the ProcessedTraceback + '__jinja_template__': None + } + + # and fake the exception + code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') + + # if it's possible, change the name of the code. This won't work + # on some python environments such as google appengine + try: + if tb is None: + location = 'template' + else: + function = tb.tb_frame.f_code.co_name + if function == 'root': + location = 'top-level template code' + elif function.startswith('block_'): + location = 'block "%s"' % function[6:] + else: + location = 'template' + code = CodeType(0, code.co_nlocals, code.co_stacksize, + code.co_flags, code.co_code, code.co_consts, + code.co_names, code.co_varnames, filename, + location, code.co_firstlineno, + code.co_lnotab, (), ()) + except: + pass + + # execute the code and catch the new traceback + try: + exec code in globals, locals + except: + exc_info = sys.exc_info() + new_tb = exc_info[2].tb_next + + # return without this frame + return exc_info[:2] + (new_tb,) + + +def _init_ugly_crap(): + """This function implements a few ugly things so that we can patch the + traceback objects. The function returned allows resetting `tb_next` on + any python traceback object. Do not attempt to use this on non cpython + interpreters + """ + import ctypes + from types import TracebackType + + # figure out side of _Py_ssize_t + if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): + _Py_ssize_t = ctypes.c_int64 + else: + _Py_ssize_t = ctypes.c_int + + # regular python + class _PyObject(ctypes.Structure): + pass + _PyObject._fields_ = [ + ('ob_refcnt', _Py_ssize_t), + ('ob_type', ctypes.POINTER(_PyObject)) + ] + + # python with trace + if hasattr(sys, 'getobjects'): + class _PyObject(ctypes.Structure): + pass + _PyObject._fields_ = [ + ('_ob_next', ctypes.POINTER(_PyObject)), + ('_ob_prev', ctypes.POINTER(_PyObject)), + ('ob_refcnt', _Py_ssize_t), + ('ob_type', ctypes.POINTER(_PyObject)) + ] + + class _Traceback(_PyObject): + pass + _Traceback._fields_ = [ + ('tb_next', ctypes.POINTER(_Traceback)), + ('tb_frame', ctypes.POINTER(_PyObject)), + ('tb_lasti', ctypes.c_int), + ('tb_lineno', ctypes.c_int) + ] + + def tb_set_next(tb, next): + """Set the tb_next attribute of a traceback object.""" + if not (isinstance(tb, TracebackType) and + (next is None or isinstance(next, TracebackType))): + raise TypeError('tb_set_next arguments must be traceback objects') + obj = _Traceback.from_address(id(tb)) + if tb.tb_next is not None: + old = _Traceback.from_address(id(tb.tb_next)) + old.ob_refcnt -= 1 + if next is None: + obj.tb_next = ctypes.POINTER(_Traceback)() + else: + next = _Traceback.from_address(id(next)) + next.ob_refcnt += 1 + obj.tb_next = ctypes.pointer(next) + + return tb_set_next + + +# try to get a tb_set_next implementation if we don't have transparent +# proxies. +tb_set_next = None +if tproxy is None: + try: + from jinja2._debugsupport import tb_set_next + except ImportError: + try: + tb_set_next = _init_ugly_crap() + except: + pass + del _init_ugly_crap diff --git a/libs/jinja2/defaults.py b/libs/jinja2/defaults.py new file mode 100644 index 0000000..d2d4544 --- /dev/null +++ b/libs/jinja2/defaults.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +""" + jinja2.defaults + ~~~~~~~~~~~~~~~ + + Jinja default filters and tags. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" +from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner + + +# defaults for the parser / lexer +BLOCK_START_STRING = '{%' +BLOCK_END_STRING = '%}' +VARIABLE_START_STRING = '{{' +VARIABLE_END_STRING = '}}' +COMMENT_START_STRING = '{#' +COMMENT_END_STRING = '#}' +LINE_STATEMENT_PREFIX = None +LINE_COMMENT_PREFIX = None +TRIM_BLOCKS = False +NEWLINE_SEQUENCE = '\n' + + +# default filters, tests and namespace +from jinja2.filters import FILTERS as DEFAULT_FILTERS +from jinja2.tests import TESTS as DEFAULT_TESTS +DEFAULT_NAMESPACE = { + 'range': xrange, + 'dict': lambda **kw: kw, + 'lipsum': generate_lorem_ipsum, + 'cycler': Cycler, + 'joiner': Joiner +} + + +# export all constants +__all__ = tuple(x for x in locals().keys() if x.isupper()) diff --git a/libs/jinja2/environment.py b/libs/jinja2/environment.py new file mode 100644 index 0000000..7a9a59f --- /dev/null +++ b/libs/jinja2/environment.py @@ -0,0 +1,1121 @@ +# -*- coding: utf-8 -*- +""" + jinja2.environment + ~~~~~~~~~~~~~~~~~~ + + Provides a class that holds runtime and parsing time options. + + :copyright: (c) 2010 by the Jinja Team. + :license: BSD, see LICENSE for more details. +""" +import os +import sys +from jinja2 import nodes +from jinja2.defaults import * +from jinja2.lexer import get_lexer, TokenStream +from jinja2.parser import Parser +from jinja2.optimizer import optimize +from jinja2.compiler import generate +from jinja2.runtime import Undefined, new_context +from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ + TemplatesNotFound +from jinja2.utils import import_string, LRUCache, Markup, missing, \ + concat, consume, internalcode, _encode_filename + + +# for direct template usage we have up to ten living environments +_spontaneous_environments = LRUCache(10) + +# the function to create jinja traceback objects. This is dynamically +# imported on the first exception in the exception handler. +_make_traceback = None + + +def get_spontaneous_environment(*args): + """Return a new spontaneous environment. A spontaneous environment is an + unnamed and unaccessible (in theory) environment that is used for + templates generated from a string and not from the file system. + """ + try: + env = _spontaneous_environments.get(args) + except TypeError: + return Environment(*args) + if env is not None: + return env + _spontaneous_environments[args] = env = Environment(*args) + env.shared = True + return env + + +def create_cache(size): + """Return the cache class for the given size.""" + if size == 0: + return None + if size < 0: + return {} + return LRUCache(size) + + +def copy_cache(cache): + """Create an empty copy of the given cache.""" + if cache is None: + return None + elif type(cache) is dict: + return {} + return LRUCache(cache.capacity) + + +def load_extensions(environment, extensions): + """Load the extensions from the list and bind it to the environment. + Returns a dict of instanciated environments. + """ + result = {} + for extension in extensions: + if isinstance(extension, basestring): + extension = import_string(extension) + result[extension.identifier] = extension(environment) + return result + + +def _environment_sanity_check(environment): + """Perform a sanity check on the environment.""" + assert issubclass(environment.undefined, Undefined), 'undefined must ' \ + 'be a subclass of undefined because filters depend on it.' + assert environment.block_start_string != \ + environment.variable_start_string != \ + environment.comment_start_string, 'block, variable and comment ' \ + 'start strings must be different' + assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ + 'newline_sequence set to unknown line ending string.' + return environment + + +class Environment(object): + r"""The core component of Jinja is the `Environment`. It contains + important shared variables like configuration, filters, tests, + globals and others. Instances of this class may be modified if + they are not shared and if no template was loaded so far. + Modifications on environments after the first template was loaded + will lead to surprising effects and undefined behavior. + + Here the possible initialization parameters: + + `block_start_string` + The string marking the begin of a block. Defaults to ``'{%'``. + + `block_end_string` + The string marking the end of a block. Defaults to ``'%}'``. + + `variable_start_string` + The string marking the begin of a print statement. + Defaults to ``'{{'``. + + `variable_end_string` + The string marking the end of a print statement. Defaults to + ``'}}'``. + + `comment_start_string` + The string marking the begin of a comment. Defaults to ``'{#'``. + + `comment_end_string` + The string marking the end of a comment. Defaults to ``'#}'``. + + `line_statement_prefix` + If given and a string, this will be used as prefix for line based + statements. See also :ref:`line-statements`. + + `line_comment_prefix` + If given and a string, this will be used as prefix for line based + based comments. See also :ref:`line-statements`. + + .. versionadded:: 2.2 + + `trim_blocks` + If this is set to ``True`` the first newline after a block is + removed (block, not variable tag!). Defaults to `False`. + + `newline_sequence` + The sequence that starts a newline. Must be one of ``'\r'``, + ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a + useful default for Linux and OS X systems as well as web + applications. + + `extensions` + List of Jinja extensions to use. This can either be import paths + as strings or extension classes. For more information have a + look at :ref:`the extensions documentation `. + + `optimized` + should the optimizer be enabled? Default is `True`. + + `undefined` + :class:`Undefined` or a subclass of it that is used to represent + undefined values in the template. + + `finalize` + A callable that can be used to process the result of a variable + expression before it is output. For example one can convert + `None` implicitly into an empty string here. + + `autoescape` + If set to true the XML/HTML autoescaping feature is enabled by + default. For more details about auto escaping see + :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also + be a callable that is passed the template name and has to + return `True` or `False` depending on autoescape should be + enabled by default. + + .. versionchanged:: 2.4 + `autoescape` can now be a function + + `loader` + The template loader for this environment. + + `cache_size` + The size of the cache. Per default this is ``50`` which means + that if more than 50 templates are loaded the loader will clean + out the least recently used template. If the cache size is set to + ``0`` templates are recompiled all the time, if the cache size is + ``-1`` the cache will not be cleaned. + + `auto_reload` + Some loaders load templates from locations where the template + sources may change (ie: file system or database). If + `auto_reload` is set to `True` (default) every time a template is + requested the loader checks if the source changed and if yes, it + will reload the template. For higher performance it's possible to + disable that. + + `bytecode_cache` + If set to a bytecode cache object, this object will provide a + cache for the internal Jinja bytecode so that templates don't + have to be parsed if they were not changed. + + See :ref:`bytecode-cache` for more information. + """ + + #: if this environment is sandboxed. Modifying this variable won't make + #: the environment sandboxed though. For a real sandboxed environment + #: have a look at jinja2.sandbox. This flag alone controls the code + #: generation by the compiler. + sandboxed = False + + #: True if the environment is just an overlay + overlayed = False + + #: the environment this environment is linked to if it is an overlay + linked_to = None + + #: shared environments have this set to `True`. A shared environment + #: must not be modified + shared = False + + #: these are currently EXPERIMENTAL undocumented features. + exception_handler = None + exception_formatter = None + + def __init__(self, + block_start_string=BLOCK_START_STRING, + block_end_string=BLOCK_END_STRING, + variable_start_string=VARIABLE_START_STRING, + variable_end_string=VARIABLE_END_STRING, + comment_start_string=COMMENT_START_STRING, + comment_end_string=COMMENT_END_STRING, + line_statement_prefix=LINE_STATEMENT_PREFIX, + line_comment_prefix=LINE_COMMENT_PREFIX, + trim_blocks=TRIM_BLOCKS, + newline_sequence=NEWLINE_SEQUENCE, + extensions=(), + optimized=True, + undefined=Undefined, + finalize=None, + autoescape=False, + loader=None, + cache_size=50, + auto_reload=True, + bytecode_cache=None): + # !!Important notice!! + # The constructor accepts quite a few arguments that should be + # passed by keyword rather than position. However it's important to + # not change the order of arguments because it's used at least + # internally in those cases: + # - spontaneus environments (i18n extension and Template) + # - unittests + # If parameter changes are required only add parameters at the end + # and don't change the arguments (or the defaults!) of the arguments + # existing already. + + # lexer / parser information + self.block_start_string = block_start_string + self.block_end_string = block_end_string + self.variable_start_string = variable_start_string + self.variable_end_string = variable_end_string + self.comment_start_string = comment_start_string + self.comment_end_string = comment_end_string + self.line_statement_prefix = line_statement_prefix + self.line_comment_prefix = line_comment_prefix + self.trim_blocks = trim_blocks + self.newline_sequence = newline_sequence + + # runtime information + self.undefined = undefined + self.optimized = optimized + self.finalize = finalize + self.autoescape = autoescape + + # defaults + self.filters = DEFAULT_FILTERS.copy() + self.tests = DEFAULT_TESTS.copy() + self.globals = DEFAULT_NAMESPACE.copy() + + # set the loader provided + self.loader = loader + self.bytecode_cache = None + self.cache = create_cache(cache_size) + self.bytecode_cache = bytecode_cache + self.auto_reload = auto_reload + + # load extensions + self.extensions = load_extensions(self, extensions) + + _environment_sanity_check(self) + + def add_extension(self, extension): + """Adds an extension after the environment was created. + + .. versionadded:: 2.5 + """ + self.extensions.update(load_extensions(self, [extension])) + + def extend(self, **attributes): + """Add the items to the instance of the environment if they do not exist + yet. This is used by :ref:`extensions ` to register + callbacks and configuration values without breaking inheritance. + """ + for key, value in attributes.iteritems(): + if not hasattr(self, key): + setattr(self, key, value) + + def overlay(self, block_start_string=missing, block_end_string=missing, + variable_start_string=missing, variable_end_string=missing, + comment_start_string=missing, comment_end_string=missing, + line_statement_prefix=missing, line_comment_prefix=missing, + trim_blocks=missing, extensions=missing, optimized=missing, + undefined=missing, finalize=missing, autoescape=missing, + loader=missing, cache_size=missing, auto_reload=missing, + bytecode_cache=missing): + """Create a new overlay environment that shares all the data with the + current environment except of cache and the overridden attributes. + Extensions cannot be removed for an overlayed environment. An overlayed + environment automatically gets all the extensions of the environment it + is linked to plus optional extra extensions. + + Creating overlays should happen after the initial environment was set + up completely. Not all attributes are truly linked, some are just + copied over so modifications on the original environment may not shine + through. + """ + args = dict(locals()) + del args['self'], args['cache_size'], args['extensions'] + + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.overlayed = True + rv.linked_to = self + + for key, value in args.iteritems(): + if value is not missing: + setattr(rv, key, value) + + if cache_size is not missing: + rv.cache = create_cache(cache_size) + else: + rv.cache = copy_cache(self.cache) + + rv.extensions = {} + for key, value in self.extensions.iteritems(): + rv.extensions[key] = value.bind(rv) + if extensions is not missing: + rv.extensions.update(load_extensions(rv, extensions)) + + return _environment_sanity_check(rv) + + lexer = property(get_lexer, doc="The lexer for this environment.") + + def iter_extensions(self): + """Iterates over the extensions by priority.""" + return iter(sorted(self.extensions.values(), + key=lambda x: x.priority)) + + def getitem(self, obj, argument): + """Get an item or attribute of an object but prefer the item.""" + try: + return obj[argument] + except (TypeError, LookupError): + if isinstance(argument, basestring): + try: + attr = str(argument) + except Exception: + pass + else: + try: + return getattr(obj, attr) + except AttributeError: + pass + return self.undefined(obj=obj, name=argument) + + def getattr(self, obj, attribute): + """Get an item or attribute of an object but prefer the attribute. + Unlike :meth:`getitem` the attribute *must* be a bytestring. + """ + try: + return getattr(obj, attribute) + except AttributeError: + pass + try: + return obj[attribute] + except (TypeError, LookupError, AttributeError): + return self.undefined(obj=obj, name=attribute) + + @internalcode + def parse(self, source, name=None, filename=None): + """Parse the sourcecode and return the abstract syntax tree. This + tree of nodes is used by the compiler to convert the template into + executable source- or bytecode. This is useful for debugging or to + extract information from templates. + + If you are :ref:`developing Jinja2 extensions ` + this gives you a good overview of the node tree generated. + """ + try: + return self._parse(source, name, filename) + except TemplateSyntaxError: + exc_info = sys.exc_info() + self.handle_exception(exc_info, source_hint=source) + + def _parse(self, source, name, filename): + """Internal parsing function used by `parse` and `compile`.""" + return Parser(self, source, name, _encode_filename(filename)).parse() + + def lex(self, source, name=None, filename=None): + """Lex the given sourcecode and return a generator that yields + tokens as tuples in the form ``(lineno, token_type, value)``. + This can be useful for :ref:`extension development ` + and debugging templates. + + This does not perform preprocessing. If you want the preprocessing + of the extensions to be applied you have to filter source through + the :meth:`preprocess` method. + """ + source = unicode(source) + try: + return self.lexer.tokeniter(source, name, filename) + except TemplateSyntaxError: + exc_info = sys.exc_info() + self.handle_exception(exc_info, source_hint=source) + + def preprocess(self, source, name=None, filename=None): + """Preprocesses the source with all extensions. This is automatically + called for all parsing and compiling methods but *not* for :meth:`lex` + because there you usually only want the actual source tokenized. + """ + return reduce(lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), unicode(source)) + + def _tokenize(self, source, name, filename=None, state=None): + """Called by the parser to do the preprocessing and filtering + for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. + """ + source = self.preprocess(source, name, filename) + stream = self.lexer.tokenize(source, name, filename, state) + for ext in self.iter_extensions(): + stream = ext.filter_stream(stream) + if not isinstance(stream, TokenStream): + stream = TokenStream(stream, name, filename) + return stream + + def _generate(self, source, name, filename, defer_init=False): + """Internal hook that can be overriden to hook a different generate + method in. + + .. versionadded:: 2.5 + """ + return generate(source, self, name, filename, defer_init=defer_init) + + def _compile(self, source, filename): + """Internal hook that can be overriden to hook a different compile + method in. + + .. versionadded:: 2.5 + """ + return compile(source, filename, 'exec') + + @internalcode + def compile(self, source, name=None, filename=None, raw=False, + defer_init=False): + """Compile a node or template source code. The `name` parameter is + the load name of the template after it was joined using + :meth:`join_path` if necessary, not the filename on the file system. + the `filename` parameter is the estimated filename of the template on + the file system. If the template came from a database or memory this + can be omitted. + + The return value of this method is a python code object. If the `raw` + parameter is `True` the return value will be a string with python + code equivalent to the bytecode returned otherwise. This method is + mainly used internally. + + `defer_init` is use internally to aid the module code generator. This + causes the generated code to be able to import without the global + environment variable to be set. + + .. versionadded:: 2.4 + `defer_init` parameter added. + """ + source_hint = None + try: + if isinstance(source, basestring): + source_hint = source + source = self._parse(source, name, filename) + if self.optimized: + source = optimize(source, self) + source = self._generate(source, name, filename, + defer_init=defer_init) + if raw: + return source + if filename is None: + filename = '