Skip to content

Instantly share code, notes, and snippets.

@terrancesnyder
Created August 8, 2024 15:21
Show Gist options
  • Save terrancesnyder/cdc5c4e533a33b13ba0a1801bace7740 to your computer and use it in GitHub Desktop.
Save terrancesnyder/cdc5c4e533a33b13ba0a1801bace7740 to your computer and use it in GitHub Desktop.
JSON encode LUA configmap for istio
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
name: lua-libs
namespace: gateways
data:
JSON.lua: "-- -*- coding: utf-8 -*-\n--\n-- Simple JSON encoding and decoding in
pure Lua.\n--\n-- Copyright 2010-2017 Jeffrey Friedl\n-- http://regex.info/blog/\n--
Latest version: http://regex.info/blog/lua/json\n--\n-- This code is released
under a Creative Commons CC-BY \"Attribution\" License:\n-- http://creativecommons.org/licenses/by/3.0/deed.en_US\n--\n--
It can be used for any purpose so long as:\n-- 1) the copyright notice above
is maintained\n-- 2) the web-page links above are maintained\n-- 3) the
'AUTHOR_NOTE' string below is maintained\n--\nlocal VERSION = '20170927.26' --
version history at end of file\nlocal AUTHOR_NOTE = \"-[ JSON.lua package by Jeffrey
Friedl (http://regex.info/blog/lua/json) version 20170927.26 ]-\"\n\n--\n-- The
'AUTHOR_NOTE' variable exists so that information about the source\n-- of the
package is maintained even in compiled versions. It's also\n-- included in OBJDEF
below mostly to quiet warnings about unused variables.\n--\nlocal OBJDEF = {\n
\ VERSION = VERSION,\n AUTHOR_NOTE = AUTHOR_NOTE,\n}\n\n\n--\n-- Simple
JSON encoding and decoding in pure Lua.\n-- JSON definition: http://www.json.org/\n--\n--\n--
\ JSON = assert(loadfile \"JSON.lua\")() -- one-time load of the routines\n--\n--
\ local lua_value = JSON:decode(raw_json_text)\n--\n-- local raw_json_text
\ = JSON:encode(lua_table_or_value)\n-- local pretty_json_text = JSON:encode_pretty(lua_table_or_value)
-- \"pretty printed\" version for human readability\n--\n--\n--\n-- DECODING (from
a JSON string to a Lua table)\n--\n--\n-- JSON = assert(loadfile \"JSON.lua\")()
-- one-time load of the routines\n--\n-- local lua_value = JSON:decode(raw_json_text)\n--\n--
\ If the JSON text is for an object or an array, e.g.\n-- { \"what\": \"books\",
\"count\": 3 }\n-- or\n-- [ \"Larry\", \"Curly\", \"Moe\" ]\n--\n-- the
result is a Lua table, e.g.\n-- { what = \"books\", count = 3 }\n-- or\n--
\ { \"Larry\", \"Curly\", \"Moe\" }\n--\n--\n-- The encode and decode routines
accept an optional second argument,\n-- \"etc\", which is not used during encoding
or decoding, but upon error\n-- is passed along to error handlers. It can be
of any type (including nil).\n--\n--\n--\n-- ERROR HANDLING DURING DECODE\n--\n--
\ With most errors during decoding, this code calls\n--\n-- JSON:onDecodeError(message,
text, location, etc)\n--\n-- with a message about the error, and if known, the
JSON text being\n-- parsed and the byte count where the problem was discovered.
You can\n-- replace the default JSON:onDecodeError() with your own function.\n--\n--
\ The default onDecodeError() merely augments the message with data\n-- about
the text and the location (and, an 'etc' argument had been\n-- provided to decode(),
its value is tacked onto the message as well),\n-- and then calls JSON.assert(),
which itself defaults to Lua's built-in\n-- assert(), and can also be overridden.\n--\n--
\ For example, in an Adobe Lightroom plugin, you might use something like\n--\n--
\ function JSON:onDecodeError(message, text, location, etc)\n-- LrErrors.throwUserError(\"Internal
Error: invalid JSON data\")\n-- end\n--\n-- or even just\n--\n-- function
JSON.assert(message)\n-- LrErrors.throwUserError(\"Internal Error:
\" .. message)\n-- end\n--\n-- If JSON:decode() is passed a nil, this
is called instead:\n--\n-- JSON:onDecodeOfNilError(message, nil, nil, etc)\n--\n--
\ and if JSON:decode() is passed HTML instead of JSON, this is called:\n--\n--
\ JSON:onDecodeOfHTMLError(message, text, nil, etc)\n--\n-- The use of the
'etc' argument allows stronger coordination between\n-- decoding and error reporting,
especially when you provide your own\n-- error-handling routines. Continuing
with the the Adobe Lightroom\n-- plugin example:\n--\n-- function JSON:onDecodeError(message,
text, location, etc)\n-- local note = \"Internal Error: invalid JSON
data\"\n-- if type(etc) = 'table' and etc.photo then\n-- note
= note .. \" while processing for \" .. etc.photo:getFormattedMetadata('fileName')\n--
\ end\n-- LrErrors.throwUserError(note)\n-- end\n--\n--
\ :\n-- :\n--\n-- for i, photo in ipairs(photosToProcess)
do\n-- : \n-- : \n-- local
data = JSON:decode(someJsonText, { photo = photo })\n-- : \n--
\ : \n-- end\n--\n--\n--\n-- If the JSON text
passed to decode() has trailing garbage (e.g. as with the JSON \"[123]xyzzy\"),\n--
\ the method\n--\n-- JSON:onTrailingGarbage(json_text, location, parsed_value,
etc)\n--\n-- is invoked, where:\n--\n-- 'json_text' is the original JSON
text being parsed,\n-- 'location' is the count of bytes into 'json_text'
where the garbage starts (6 in the example),\n-- 'parsed_value' is the Lua
result of what was successfully parsed ({123} in the example),\n-- 'etc'
is as above.\n--\n-- If JSON:onTrailingGarbage() does not abort, it should return
the value decode() should return,\n-- or nil + an error message.\n--\n-- local
new_value, error_message = JSON:onTrailingGarbage()\n--\n-- The default JSON:onTrailingGarbage()
simply invokes JSON:onDecodeError(\"trailing garbage\"...),\n-- but you can
have this package ignore trailing garbage via\n--\n-- function JSON:onTrailingGarbage(json_text,
location, parsed_value, etc)\n-- return parsed_value\n-- end\n--\n--\n--
DECODING AND STRICT TYPES\n--\n-- Because both JSON objects and JSON arrays
are converted to Lua tables,\n-- it's not normally possible to tell which original
JSON type a\n-- particular Lua table was derived from, or guarantee decode-encode\n--
\ round-trip equivalency.\n--\n-- However, if you enable strictTypes, e.g.\n--\n--
\ JSON = assert(loadfile \"JSON.lua\")() --load the routines\n-- JSON.strictTypes
= true\n--\n-- then the Lua table resulting from the decoding of a JSON object
or\n-- JSON array is marked via Lua metatable, so that when re-encoded with\n--
\ JSON:encode() it ends up as the appropriate JSON type.\n--\n-- (This is not
the default because other routines may not work well with\n-- tables that have
a metatable set, for example, Lightroom API calls.)\n--\n--\n-- ENCODING (from
a lua table to a JSON string)\n--\n-- JSON = assert(loadfile \"JSON.lua\")()
-- one-time load of the routines\n--\n-- local raw_json_text = JSON:encode(lua_table_or_value)\n--
\ local pretty_json_text = JSON:encode_pretty(lua_table_or_value) -- \"pretty
printed\" version for human readability\n-- local custom_pretty = JSON:encode(lua_table_or_value,
etc, { pretty = true, indent = \"| \", align_keys = false })\n--\n-- On error
during encoding, this code calls:\n--\n-- JSON:onEncodeError(message, etc)\n--\n--
\ which you can override in your local JSON object. Also see \"HANDLING UNSUPPORTED
VALUE TYPES\" below.\n--\n-- The 'etc' in the error call is the second argument
to encode() and encode_pretty(), or nil if it wasn't provided.\n--\n--\n--\n--\n--
ENCODING OPTIONS\n--\n-- An optional third argument, a table of options, can
be provided to encode().\n--\n-- encode_options = {\n-- -- options
for making \"pretty\" human-readable JSON (see \"PRETTY-PRINTING\" below)\n--
\ pretty = true, -- turn pretty formatting on\n-- indent
\ = \" \", -- use this indent for each level of an array/object\n--
\ align_keys = false, -- if true, align the keys in a way that sounds
like it should be nice, but is actually ugly\n-- array_newline = false,
\ -- if true, array elements become one to a line rather than inline\n-- \n--
\ -- other output-related options\n-- null = \"\\0\",
\ -- see \"ENCODING JSON NULL VALUES\" below\n-- stringsAreUtf8 = false,
\ -- see \"HANDLING UNICODE LINE AND PARAGRAPH SEPARATORS FOR JAVA\" below\n--
\ }\n-- \n-- json_string = JSON:encode(mytable, etc, encode_options)\n--\n--\n--\n--
For reference, the defaults are:\n--\n-- pretty = false\n--
\ null = nil,\n-- stringsAreUtf8 = false,\n--\n--\n--\n--
PRETTY-PRINTING\n--\n-- Enabling the 'pretty' encode option helps generate human-readable
JSON.\n--\n-- pretty = JSON:encode(val, etc, {\n-- pretty
= true,\n-- indent = \" \",\n-- align_keys
= false,\n-- })\n--\n-- encode_pretty()
is also provided: it's identical to encode() except\n-- that encode_pretty()
provides a default options table if none given in the call:\n--\n-- { pretty
= true, indent = \" \", align_keys = false, array_newline = false }\n--\n-- For
example, if\n--\n-- JSON:encode(data)\n--\n-- produces:\n--\n-- {\"city\":\"Kyoto\",\"climate\":{\"avg_temp\":16,\"humidity\":\"high\",\"snowfall\":\"minimal\"},\"country\":\"Japan\",\"wards\":11}\n--\n--
\ then\n--\n-- JSON:encode_pretty(data)\n--\n-- produces:\n--\n-- {\n--
\ \"city\": \"Kyoto\",\n-- \"climate\": {\n-- \"avg_temp\":
16,\n-- \"humidity\": \"high\",\n-- \"snowfall\": \"minimal\"\n--
\ },\n-- \"country\": \"Japan\",\n-- \"wards\": 11\n-- }\n--\n--
\ The following lines all return identical strings:\n-- JSON:encode_pretty(data)\n--
\ JSON:encode_pretty(data, nil, { pretty = true, indent = \" \", align_keys
= false, array_newline = false})\n-- JSON:encode_pretty(data, nil, { pretty
= true, indent = \" \" })\n-- JSON:encode (data, nil, { pretty =
true, indent = \" \" })\n--\n-- An example of setting your own indent string:\n--\n--
\ JSON:encode_pretty(data, nil, { pretty = true, indent = \"| \" })\n--\n--
\ produces:\n--\n-- {\n-- | \"city\": \"Kyoto\",\n-- | \"climate\":
{\n-- | | \"avg_temp\": 16,\n-- | | \"humidity\": \"high\",\n--
\ | | \"snowfall\": \"minimal\"\n-- | },\n-- | \"country\":
\"Japan\",\n-- | \"wards\": 11\n-- }\n--\n-- An example of setting
align_keys to true:\n--\n-- JSON:encode_pretty(data, nil, { pretty = true,
indent = \" \", align_keys = true })\n-- \n-- produces:\n-- \n-- {\n--
\ \"city\": \"Kyoto\",\n-- \"climate\": {\n-- \"avg_temp\":
16,\n-- \"humidity\": \"high\",\n-- \"snowfall\":
\"minimal\"\n-- },\n-- \"country\": \"Japan\",\n-- \"wards\":
11\n-- }\n--\n-- which I must admit is kinda ugly, sorry. This was the
default for\n-- encode_pretty() prior to version 20141223.14.\n--\n--\n-- HANDLING
UNICODE LINE AND PARAGRAPH SEPARATORS FOR JAVA\n--\n-- If the 'stringsAreUtf8'
encode option is set to true, consider Lua strings not as a sequence of bytes,\n--
\ but as a sequence of UTF-8 characters.\n--\n-- Currently, the only practical
effect of setting this option is that Unicode LINE and PARAGRAPH\n-- separators,
if found in a string, are encoded with a JSON escape instead of being dumped as
is.\n-- The JSON is valid either way, but encoding this way, apparently, allows
the resulting JSON\n-- to also be valid Java.\n--\n-- AMBIGUOUS SITUATIONS
DURING THE ENCODING\n--\n-- During the encode, if a Lua table being encoded
contains both string\n-- and numeric keys, it fits neither JSON's idea of an
object, nor its\n-- idea of an array. To get around this, when any string key
exists (or\n-- when non-positive numeric keys exist), numeric keys are converted
to\n-- strings.\n--\n-- For example, \n-- JSON:encode({ \"one\", \"two\",
\"three\", SOMESTRING = \"some string\" }))\n-- produces the JSON object\n--
\ {\"1\":\"one\",\"2\":\"two\",\"3\":\"three\",\"SOMESTRING\":\"some string\"}\n--\n--
\ To prohibit this conversion and instead make it an error condition, set\n--
\ JSON.noKeyConversion = true\n--\n--\n-- ENCODING JSON NULL VALUES\n--\n--
\ Lua tables completely omit keys whose value is nil, so without special handling
there's\n-- no way to represent JSON object's null value in a Lua table. For
example\n-- JSON:encode({ username = \"admin\", password = nil })\n--\n--
\ produces:\n--\n-- {\"username\":\"admin\"}\n--\n-- In order to actually
produce\n--\n-- {\"username\":\"admin\", \"password\":null}\n--\n\n-- one
can include a string value for a \"null\" field in the options table passed to
encode().... \n-- any Lua table entry with that value becomes null in the JSON
output:\n--\n-- JSON:encode({ username = \"admin\", password = \"xyzzy\"
}, -- First arg is the Lua table to encode as JSON.\n-- nil,
\ -- Second arg is the 'etc' value, ignored
here\n-- { null = \"xyzzy\" }) -- Third
arg is th options table\n--\n-- produces:\n--\n-- {\"username\":\"admin\",
\"password\":null}\n--\n-- Just be sure to use a string that is otherwise unlikely
to appear in your data.\n-- The string \"\\0\" (a string with one null byte)
may well be appropriate for many applications.\n--\n-- The \"null\" options
also applies to Lua tables that become JSON arrays.\n-- JSON:encode({ \"one\",
\"two\", nil, nil })\n--\n-- produces\n--\n-- [\"one\",\"two\"]\n--\n--
\ while\n--\n-- NullPlaceholder = \"\\0\"\n-- encode_options = { null
= NullPlaceholder }\n-- JSON:encode({ \"one\", \"two\", NullPlaceholder,
NullPlaceholder}, nil, encode_options)\n-- produces\n--\n-- [\"one\",\"two\",null,null]\n--\n--\n--\n--
HANDLING LARGE AND/OR PRECISE NUMBERS\n--\n--\n-- Without special handling,
numbers in JSON can lose precision in Lua.\n-- For example:\n-- \n-- T
= JSON:decode('{ \"small\":12345, \"big\":12345678901234567890123456789, \"precise\":9876.67890123456789012345
\ }')\n--\n-- print(\"small: \", type(T.small), T.small)\n-- print(\"big:
\ \", type(T.big), T.big)\n-- print(\"precise: \", type(T.precise),
\ T.precise)\n-- \n-- produces\n-- \n-- small: number 12345\n--
\ big: number 1.2345678901235e+28\n-- precise: number
\ 9876.6789012346\n--\n-- Precision is lost with both 'big' and 'precise'.\n--\n--
\ This package offers ways to try to handle this better (for some definitions
of \"better\")...\n--\n-- The most precise method is by setting the global:\n--
\ \n-- JSON.decodeNumbersAsObjects = true\n-- \n-- When this is set,
numeric JSON data is encoded into Lua in a form that preserves the exact\n-- JSON
numeric presentation when re-encoded back out to JSON, or accessed in Lua as a
string.\n--\n-- This is done by encoding the numeric data with a Lua table/metatable
that returns\n-- the possibly-imprecise numeric form when accessed numerically,
but the original precise\n-- representation when accessed as a string.\n--\n--
\ Consider the example above, with this option turned on:\n--\n-- JSON.decodeNumbersAsObjects
= true\n-- \n-- T = JSON:decode('{ \"small\":12345, \"big\":12345678901234567890123456789,
\"precise\":9876.67890123456789012345 }')\n--\n-- print(\"small: \", type(T.small),
\ T.small)\n-- print(\"big: \", type(T.big), T.big)\n-- print(\"precise:
\", type(T.precise), T.precise)\n-- \n-- This now produces:\n-- \n-- small:
\ table 12345\n-- big: table 12345678901234567890123456789\n--
\ precise: table 9876.67890123456789012345\n-- \n-- However,
within Lua you can still use the values (e.g. T.precise in the example above)
in numeric\n-- contexts. In such cases you'll get the possibly-imprecise numeric
version, but in string contexts\n-- and when the data finds its way to this
package's encode() function, the original full-precision\n-- representation
is used.\n--\n-- You can force access to the string or numeric version via\n--
\ JSON:forceString()\n-- JSON:forceNumber()\n-- For example,\n--
\ local probably_okay = JSON:forceNumber(T.small) -- 'probably_okay' is
a number\n--\n-- Code the inspects the JSON-turned-Lua data using type() can
run into troubles because what used to\n-- be a number can now be a table (e.g.
as the small/big/precise example above shows). Update these\n-- situations to
use JSON:isNumber(item), which returns nil if the item is neither a number nor
one\n-- of these number objects. If it is either, it returns the number itself.
For completeness there's\n-- also JSON:isString(item).\n--\n-- If you want
to try to avoid the hassles of this \"number as an object\" kludge for all but
really\n-- big numbers, you can set JSON.decodeNumbersAsObjects and then also
set one or both of\n-- JSON:decodeIntegerObjectificationLength\n--
\ JSON:decodeDecimalObjectificationLength\n-- They refer to the length
of the part of the number before and after a decimal point. If they are\n-- set
and their part is at least that number of digits, objectification occurs. If both
are set,\n-- objectification occurs when either length is met.\n--\n-- -----------------------\n--\n--
\ Even without using the JSON.decodeNumbersAsObjects option, you can encode numbers
in your Lua\n-- table that retain high precision upon encoding to JSON, by using
the JSON:asNumber() function:\n--\n-- T = {\n-- imprecise = 123456789123456789.123456789123456789,\n--
\ precise = JSON:asNumber(\"123456789123456789.123456789123456789\")\n--
\ }\n--\n-- print(JSON:encode_pretty(T))\n--\n-- This produces:\n--\n--
\ { \n-- \"precise\": 123456789123456789.123456789123456789,\n-- \"imprecise\":
1.2345678912346e+17\n-- }\n--\n--\n-- -----------------------\n--\n-- A
different way to handle big/precise JSON numbers is to have decode() merely return
the exact\n-- string representation of the number instead of the number itself.
This approach might be useful\n-- when the numbers are merely some kind of opaque
object identifier and you want to work with them\n-- in Lua as strings anyway.\n--
\ \n-- This approach is enabled by setting\n--\n-- JSON.decodeIntegerStringificationLength
= 10\n--\n-- The value is the number of digits (of the integer part of the number)
at which to stringify numbers.\n-- NOTE: this setting is ignored if JSON.decodeNumbersAsObjects
is true, as that takes precedence.\n--\n-- Consider our previous example with
this option set to 10:\n--\n-- JSON.decodeIntegerStringificationLength =
10\n-- \n-- T = JSON:decode('{ \"small\":12345, \"big\":12345678901234567890123456789,
\"precise\":9876.67890123456789012345 }')\n--\n-- print(\"small: \", type(T.small),
\ T.small)\n-- print(\"big: \", type(T.big), T.big)\n-- print(\"precise:
\", type(T.precise), T.precise)\n--\n-- This produces:\n--\n-- small:
\ number 12345\n-- big: string 12345678901234567890123456789\n--
\ precise: number 9876.6789012346\n--\n-- The long integer of the
'big' field is at least JSON.decodeIntegerStringificationLength digits\n-- in
length, so it's converted not to a Lua integer but to a Lua string. Using a value
of 0 or 1 ensures\n-- that all JSON numeric data becomes strings in Lua.\n--\n--
\ Note that unlike\n-- JSON.decodeNumbersAsObjects = true\n-- this stringification
is simple and unintelligent: the JSON number simply becomes a Lua string, and
that's the end of it.\n-- If the string is then converted back to JSON, it's
still a string. After running the code above, adding\n-- print(JSON:encode(T))\n--
\ produces\n-- {\"big\":\"12345678901234567890123456789\",\"precise\":9876.6789012346,\"small\":12345}\n--
\ which is unlikely to be desired.\n--\n-- There's a comparable option for
the length of the decimal part of a number:\n--\n-- JSON.decodeDecimalStringificationLength\n--\n--
\ This can be used alone or in conjunction with\n--\n-- JSON.decodeIntegerStringificationLength\n--\n--
\ to trip stringification on precise numbers with at least JSON.decodeIntegerStringificationLength
digits after\n-- the decimal point. (Both are ignored if JSON.decodeNumbersAsObjects
is true.)\n--\n-- This example:\n--\n-- JSON.decodeIntegerStringificationLength
= 10\n-- JSON.decodeDecimalStringificationLength = 5\n--\n-- T = JSON:decode('{
\ \"small\":12345, \"big\":12345678901234567890123456789, \"precise\":9876.67890123456789012345
\ }')\n-- \n-- print(\"small: \", type(T.small), T.small)\n--
\ print(\"big: \", type(T.big), T.big)\n-- print(\"precise:
\", type(T.precise), T.precise)\n--\n-- produces:\n--\n-- small: number
\ 12345\n-- big: string 12345678901234567890123456789\n-- precise:
\ string 9876.67890123456789012345\n--\n--\n-- HANDLING UNSUPPORTED VALUE
TYPES\n--\n-- Among the encoding errors that might be raised is an attempt to
convert a table value that has a type\n-- that this package hasn't accounted
for: a function, userdata, or a thread. You can handle these types as table\n--
\ values (but not as table keys) if you supply a JSON:unsupportedTypeEncoder()
method along the lines of the\n-- following example:\n-- \n-- function
JSON:unsupportedTypeEncoder(value_of_unsupported_type)\n-- if type(value_of_unsupported_type)
== 'function' then\n-- return \"a function value\"\n-- else\n--
\ return nil\n-- end\n-- end\n-- \n-- Your
unsupportedTypeEncoder() method is actually called with a bunch of arguments:\n--\n--
\ self:unsupportedTypeEncoder(value, parents, etc, options, indent, for_key)\n--\n--
\ The 'value' is the function, thread, or userdata to be converted to JSON.\n--\n--
\ The 'etc' and 'options' arguments are those passed to the original encode().
The other arguments are\n-- probably of little interest; see the source code.
(Note that 'for_key' is never true, as this function\n-- is invoked only on
table values; table keys of these types still trigger the onEncodeError method.)\n--\n--
\ If your unsupportedTypeEncoder() method returns a string, it's inserted into
the JSON as is.\n-- If it returns nil plus an error message, that error message
is passed through to an onEncodeError invocation.\n-- If it returns only nil,
processing falls through to a default onEncodeError invocation.\n--\n-- If you
want to handle everything in a simple way:\n--\n-- function JSON:unsupportedTypeEncoder(value)\n--
\ return tostring(value)\n-- end\n--\n--\n-- SUMMARY OF METHODS
YOU CAN OVERRIDE IN YOUR LOCAL LUA JSON OBJECT\n--\n-- assert\n-- onDecodeError\n--
\ onDecodeOfNilError\n-- onDecodeOfHTMLError\n-- onTrailingGarbage\n--
\ onEncodeError\n-- unsupportedTypeEncoder\n--\n-- If you want to create
a separate Lua JSON object with its own error handlers,\n-- you can reload JSON.lua
or use the :new() method.\n--\n---------------------------------------------------------------------------\n\nlocal
default_pretty_indent = \" \"\nlocal default_pretty_options = { pretty = true,
indent = default_pretty_indent, align_keys = false, array_newline = false }\n\nlocal
isArray = { __tostring = function() return \"JSON array\" end } isArray.__index
\ = isArray\nlocal isObject = { __tostring = function() return \"JSON object\"
\ end } isObject.__index = isObject\n\nfunction OBJDEF:newArray(tbl)\n
\ return setmetatable(tbl or {}, isArray)\nend\n\nfunction OBJDEF:newObject(tbl)\n
\ return setmetatable(tbl or {}, isObject)\nend\n\n\n\n\nlocal function getnum(op)\n
\ return type(op) == 'number' and op or op.N\nend\n\nlocal isNumber = {\n __tostring
= function(T) return T.S end,\n __unm = function(op) return getnum(op)
end,\n\n __concat = function(op1, op2) return tostring(op1) .. tostring(op2)
end,\n __add = function(op1, op2) return getnum(op1) + getnum(op2)
\ end,\n __sub = function(op1, op2) return getnum(op1) - getnum(op2)
\ end,\n __mul = function(op1, op2) return getnum(op1) * getnum(op2)
\ end,\n __div = function(op1, op2) return getnum(op1) / getnum(op2)
\ end,\n __mod = function(op1, op2) return getnum(op1) % getnum(op2)
\ end,\n __pow = function(op1, op2) return getnum(op1) ^ getnum(op2)
\ end,\n __lt = function(op1, op2) return getnum(op1) < getnum(op2)
\ end,\n __eq = function(op1, op2) return getnum(op1) == getnum(op2)
\ end,\n __le = function(op1, op2) return getnum(op1) <= getnum(op2)
\ end,\n}\nisNumber.__index = isNumber\n\nfunction OBJDEF:asNumber(item)\n\n if
getmetatable(item) == isNumber then\n -- it's already a JSON number object.\n
\ return item\n elseif type(item) == 'table' and type(item.S) == 'string'
and type(item.N) == 'number' then\n -- it's a number-object table that lost
its metatable, so give it one\n return setmetatable(item, isNumber)\n else\n
\ -- the normal situation... given a number or a string representation of
a number....\n local holder = {\n S = tostring(item), -- S is the
representation of the number as a string, which remains precise\n N =
tonumber(item), -- N is the number as a Lua number.\n }\n return setmetatable(holder,
isNumber)\n end\nend\n\n--\n-- Given an item that might be a normal string or
number, or might be an 'isNumber' object defined above,\n-- return the string
version. This shouldn't be needed often because the 'isNumber' object should autoconvert\n--
to a string in most cases, but it's here to allow it to be forced when needed.\n--\nfunction
OBJDEF:forceString(item)\n if type(item) == 'table' and type(item.S) == 'string'
then\n return item.S\n else\n return tostring(item)\n end\nend\n\n--\n--
Given an item that might be a normal string or number, or might be an 'isNumber'
object defined above,\n-- return the numeric version.\n--\nfunction OBJDEF:forceNumber(item)\n
\ if type(item) == 'table' and type(item.N) == 'number' then\n return item.N\n
\ else\n return tonumber(item)\n end\nend\n\n--\n-- If the given item
is a number, return it. Otherwise, return nil.\n-- This, this can be used both
in a conditional and to access the number when you're not sure its form.\n--\nfunction
OBJDEF:isNumber(item)\n if type(item) == 'number' then\n return item\n
\ elseif type(item) == 'table' and type(item.N) == 'number' then\n return
item.N\n else\n return nil\n end\nend\n\nfunction OBJDEF:isString(item)\n
\ if type(item) == 'string' then\n return item\n elseif type(item) ==
'table' and type(item.S) == 'string' then\n return item.S\n else\n return
nil\n end\nend\n\n\nlocal function unicode_codepoint_as_utf8(codepoint)\n --\n
\ -- codepoint is a number\n --\n if codepoint <= 127 then\n return
string.char(codepoint)\n\n elseif codepoint <= 2047 then\n --\n --
110yyyxx 10xxxxxx <-- useful notation from http://en.wikipedia.org/wiki/Utf8\n
\ --\n local highpart = math.floor(codepoint / 0x40)\n local lowpart
\ = codepoint - (0x40 * highpart)\n return string.char(0xC0 + highpart,\n
\ 0x80 + lowpart)\n\n elseif codepoint <= 65535 then\n
\ --\n -- 1110yyyy 10yyyyxx 10xxxxxx\n --\n local highpart
\ = math.floor(codepoint / 0x1000)\n local remainder = codepoint - 0x1000
* highpart\n local midpart = math.floor(remainder / 0x40)\n local
lowpart = remainder - 0x40 * midpart\n\n highpart = 0xE0 + highpart\n midpart
\ = 0x80 + midpart\n lowpart = 0x80 + lowpart\n\n --\n -- Check
for an invalid character (thanks Andy R. at Adobe).\n -- See table 3.7, page
93, in http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf#G28070\n --\n
\ if ( highpart == 0xE0 and midpart < 0xA0 ) or\n ( highpart == 0xED
and midpart > 0x9F ) or\n ( highpart == 0xF0 and midpart < 0x90 ) or\n
\ ( highpart == 0xF4 and midpart > 0x8F )\n then\n return
\"?\"\n else\n return string.char(highpart,\n midpart,\n
\ lowpart)\n end\n\n else\n --\n --
11110zzz 10zzyyyy 10yyyyxx 10xxxxxx\n --\n local highpart = math.floor(codepoint
/ 0x40000)\n local remainder = codepoint - 0x40000 * highpart\n local
midA = math.floor(remainder / 0x1000)\n remainder = remainder
- 0x1000 * midA\n local midB = math.floor(remainder / 0x40)\n local
lowpart = remainder - 0x40 * midB\n\n return string.char(0xF0 + highpart,\n
\ 0x80 + midA,\n 0x80 + midB,\n
\ 0x80 + lowpart)\n end\nend\n\nfunction OBJDEF:onDecodeError(message,
text, location, etc)\n if text then\n if location then\n message
= string.format(\"%s at byte %d of: %s\", message, location, text)\n else\n
\ message = string.format(\"%s: %s\", message, text)\n end\n end\n\n
\ if etc ~= nil then\n message = message .. \" (\" .. OBJDEF:encode(etc)
.. \")\"\n end\n\n if self.assert then\n self.assert(false, message)\n
\ else\n assert(false, message)\n end\nend\n\nfunction OBJDEF:onTrailingGarbage(json_text,
location, parsed_value, etc)\n return self:onDecodeError(\"trailing garbage\",
json_text, location, etc)\nend\n\nOBJDEF.onDecodeOfNilError = OBJDEF.onDecodeError\nOBJDEF.onDecodeOfHTMLError
= OBJDEF.onDecodeError\n\nfunction OBJDEF:onEncodeError(message, etc)\n if etc
~= nil then\n message = message .. \" (\" .. OBJDEF:encode(etc) .. \")\"\n
\ end\n\n if self.assert then\n self.assert(false, message)\n else\n
\ assert(false, message)\n end\nend\n\nlocal function grok_number(self,
text, start, options)\n --\n -- Grab the integer part\n --\n local integer_part
= text:match('^-?[1-9]%d*', start)\n or text:match(\"^-?0\",
\ start)\n\n if not integer_part then\n self:onDecodeError(\"expected
number\", text, start, options.etc)\n return nil, start -- in case the error
method doesn't abort, return something sensible\n end\n\n local i = start
+ integer_part:len()\n\n --\n -- Grab an optional decimal part\n --\n local
decimal_part = text:match('^%.%d+', i) or \"\"\n\n i = i + decimal_part:len()\n\n
\ --\n -- Grab an optional exponential part\n --\n local exponent_part
= text:match('^[eE][-+]?%d+', i) or \"\"\n\n i = i + exponent_part:len()\n\n
\ local full_number_text = integer_part .. decimal_part .. exponent_part\n\n
\ if options.decodeNumbersAsObjects then\n\n local objectify = false\n\n
\ if not options.decodeIntegerObjectificationLength and not options.decodeDecimalObjectificationLength
then\n -- no options, so objectify\n objectify = true\n\n elseif
(options.decodeIntegerObjectificationLength\n and\n (integer_part:len()
>= options.decodeIntegerObjectificationLength or exponent_part:len() > 0))\n\n
\ or\n (options.decodeDecimalObjectificationLength \n and\n
\ (decimal_part:len() >= options.decodeDecimalObjectificationLength or
exponent_part:len() > 0))\n then\n -- have options and they are triggered,
so objectify\n objectify = true\n end\n\n if objectify then\n
\ return OBJDEF:asNumber(full_number_text), i\n end\n -- else,
fall through to try to return as a straight-up number\n\n else\n\n -- Not
always decoding numbers as objects, so perhaps encode as strings?\n\n --\n
\ -- If we're told to stringify only under certain conditions, so do.\n --
We punt a bit when there's an exponent by just stringifying no matter what.\n
\ -- I suppose we should really look to see whether the exponent is actually
big enough one\n -- way or the other to trip stringification, but I'll be
lazy about it until someone asks.\n --\n if (options.decodeIntegerStringificationLength\n
\ and\n (integer_part:len() >= options.decodeIntegerStringificationLength
or exponent_part:len() > 0))\n\n or\n\n (options.decodeDecimalStringificationLength
\n and\n (decimal_part:len() >= options.decodeDecimalStringificationLength
or exponent_part:len() > 0))\n then\n return full_number_text, i
-- this returns the exact string representation seen in the original JSON\n end\n\n
\ end\n\n\n local as_number = tonumber(full_number_text)\n\n if not as_number
then\n self:onDecodeError(\"bad number\", text, start, options.etc)\n return
nil, start -- in case the error method doesn't abort, return something sensible\n
\ end\n\n return as_number, i\nend\n\n\nlocal function grok_string(self, text,
start, options)\n\n if text:sub(start,start) ~= '\"' then\n self:onDecodeError(\"expected
string's opening quote\", text, start, options.etc)\n return nil, start --
in case the error method doesn't abort, return something sensible\n end\n\n
\ local i = start + 1 -- +1 to bypass the initial quote\n local text_len =
text:len()\n local VALUE = \"\"\n while i <= text_len do\n local c =
text:sub(i,i)\n if c == '\"' then\n return VALUE, i + 1\n end\n
\ if c ~= '\\\\' then\n VALUE = VALUE .. c\n i = i + 1\n elseif
text:match('^\\\\b', i) then\n VALUE = VALUE .. \"\\b\"\n i =
i + 2\n elseif text:match('^\\\\f', i) then\n VALUE = VALUE .. \"\\f\"\n
\ i = i + 2\n elseif text:match('^\\\\n', i) then\n VALUE
= VALUE .. \"\\n\"\n i = i + 2\n elseif text:match('^\\\\r', i) then\n
\ VALUE = VALUE .. \"\\r\"\n i = i + 2\n elseif text:match('^\\\\t',
i) then\n VALUE = VALUE .. \"\\t\"\n i = i + 2\n else\n local
hex = text:match('^\\\\u([0123456789aAbBcCdDeEfF][0123456789aAbBcCdDeEfF][0123456789aAbBcCdDeEfF][0123456789aAbBcCdDeEfF])',
i)\n if hex then\n i = i + 6 -- bypass what we just read\n\n
\ -- We have a Unicode codepoint. It could be standalone, or if in the
proper range and\n -- followed by another in a specific range, it'll
be a two-code surrogate pair.\n local codepoint = tonumber(hex, 16)\n
\ if codepoint >= 0xD800 and codepoint <= 0xDBFF then\n --
it's a hi surrogate... see whether we have a following low\n local
lo_surrogate = text:match('^\\\\u([dD][cdefCDEF][0123456789aAbBcCdDeEfF][0123456789aAbBcCdDeEfF])',
i)\n if lo_surrogate then\n i = i + 6 -- bypass
the low surrogate we just read\n codepoint = 0x2400 + (codepoint
- 0xD800) * 0x400 + tonumber(lo_surrogate, 16)\n else\n --
not a proper low, so we'll just leave the first codepoint as is and spit it out.\n
\ end\n end\n VALUE = VALUE .. unicode_codepoint_as_utf8(codepoint)\n\n
\ else\n\n -- just pass through what's escaped\n VALUE
= VALUE .. text:match('^\\\\(.)', i)\n i = i + 2\n end\n end\n
\ end\n\n self:onDecodeError(\"unclosed string\", text, start, options.etc)\n
\ return nil, start -- in case the error method doesn't abort, return something
sensible\nend\n\nlocal function skip_whitespace(text, start)\n\n local _, match_end
= text:find(\"^[ \\n\\r\\t]+\", start) -- [http://www.ietf.org/rfc/rfc4627.txt]
Section 2\n if match_end then\n return match_end + 1\n else\n return
start\n end\nend\n\nlocal grok_one -- assigned later\n\nlocal function grok_object(self,
text, start, options)\n\n if text:sub(start,start) ~= '{' then\n self:onDecodeError(\"expected
'{'\", text, start, options.etc)\n return nil, start -- in case the error
method doesn't abort, return something sensible\n end\n\n local i = skip_whitespace(text,
start + 1) -- +1 to skip the '{'\n\n local VALUE = self.strictTypes and self:newObject
{ } or { }\n\n if text:sub(i,i) == '}' then\n return VALUE, i + 1\n end\n
\ local text_len = text:len()\n while i <= text_len do\n local key, new_i
= grok_string(self, text, i, options)\n\n i = skip_whitespace(text, new_i)\n\n
\ if text:sub(i, i) ~= ':' then\n self:onDecodeError(\"expected colon\",
text, i, options.etc)\n return nil, i -- in case the error method doesn't
abort, return something sensible\n end\n\n i = skip_whitespace(text,
i + 1)\n\n local new_val, new_i = grok_one(self, text, i, options)\n\n VALUE[key]
= new_val\n\n --\n -- Expect now either '}' to end things, or a ','
to allow us to continue.\n --\n i = skip_whitespace(text, new_i)\n\n
\ local c = text:sub(i,i)\n\n if c == '}' then\n return VALUE,
i + 1\n end\n\n if text:sub(i, i) ~= ',' then\n self:onDecodeError(\"expected
comma or '}'\", text, i, options.etc)\n return nil, i -- in case the error
method doesn't abort, return something sensible\n end\n\n i = skip_whitespace(text,
i + 1)\n end\n\n self:onDecodeError(\"unclosed '{'\", text, start, options.etc)\n
\ return nil, start -- in case the error method doesn't abort, return something
sensible\nend\n\nlocal function grok_array(self, text, start, options)\n if
text:sub(start,start) ~= '[' then\n self:onDecodeError(\"expected '['\",
text, start, options.etc)\n return nil, start -- in case the error method
doesn't abort, return something sensible\n end\n\n local i = skip_whitespace(text,
start + 1) -- +1 to skip the '['\n local VALUE = self.strictTypes and self:newArray
{ } or { }\n if text:sub(i,i) == ']' then\n return VALUE, i + 1\n end\n\n
\ local VALUE_INDEX = 1\n\n local text_len = text:len()\n while i <= text_len
do\n local val, new_i = grok_one(self, text, i, options)\n\n -- can't
table.insert(VALUE, val) here because it's a no-op if val is nil\n VALUE[VALUE_INDEX]
= val\n VALUE_INDEX = VALUE_INDEX + 1\n\n i = skip_whitespace(text,
new_i)\n\n --\n -- Expect now either ']' to end things, or a ',' to
allow us to continue.\n --\n local c = text:sub(i,i)\n if c ==
']' then\n return VALUE, i + 1\n end\n if text:sub(i, i) ~=
',' then\n self:onDecodeError(\"expected comma or ']'\", text, i, options.etc)\n
\ return nil, i -- in case the error method doesn't abort, return something
sensible\n end\n i = skip_whitespace(text, i + 1)\n end\n self:onDecodeError(\"unclosed
'['\", text, start, options.etc)\n return nil, i -- in case the error method
doesn't abort, return something sensible\nend\n\n\ngrok_one = function(self, text,
start, options)\n -- Skip any whitespace\n start = skip_whitespace(text, start)\n\n
\ if start > text:len() then\n self:onDecodeError(\"unexpected end of string\",
text, nil, options.etc)\n return nil, start -- in case the error method doesn't
abort, return something sensible\n end\n\n if text:find('^\"', start) then\n
\ return grok_string(self, text, start, options.etc)\n\n elseif text:find('^[-0123456789
]', start) then\n return grok_number(self, text, start, options)\n\n elseif
text:find('^%{', start) then\n return grok_object(self, text, start, options)\n\n
\ elseif text:find('^%[', start) then\n return grok_array(self, text, start,
options)\n\n elseif text:find('^true', start) then\n return true, start
+ 4\n\n elseif text:find('^false', start) then\n return false, start +
5\n\n elseif text:find('^null', start) then\n return options.null, start
+ 4\n\n else\n self:onDecodeError(\"can't parse JSON\", text, start, options.etc)\n
\ return nil, 1 -- in case the error method doesn't abort, return something
sensible\n end\nend\n\nfunction OBJDEF:decode(text, etc, options)\n --\n --
If the user didn't pass in a table of decode options, make an empty one.\n --\n
\ if type(options) ~= 'table' then\n options = {}\n end\n\n --\n --
If they passed in an 'etc' argument, stuff it into the options.\n -- (If not,
any 'etc' field in the options they passed in remains to be used)\n --\n if
etc ~= nil then\n options.etc = etc\n end\n\n\n if type(self) ~= 'table'
or self.__index ~= OBJDEF then\n local error_message = \"JSON:decode must
be called in method format\"\n OBJDEF:onDecodeError(error_message, nil, nil,
options.etc)\n return nil, error_message -- in case the error method doesn't
abort, return something sensible\n end\n\n if text == nil then\n local
error_message = \"nil passed to JSON:decode()\"\n self:onDecodeOfNilError(error_message,
nil, nil, options.etc)\n return nil, error_message -- in case the error method
doesn't abort, return something sensible\n\n elseif type(text) ~= 'string' then\n
\ local error_message = \"expected string argument to JSON:decode()\"\n self:onDecodeError(string.format(\"%s,
got %s\", error_message, type(text)), nil, nil, options.etc)\n return nil,
error_message -- in case the error method doesn't abort, return something sensible\n
\ end\n\n if text:match('^%s*$') then\n -- an empty string is nothing,
but not an error\n return nil\n end\n\n if text:match('^%s*<') then\n
\ -- Can't be JSON... we'll assume it's HTML\n local error_message =
\"HTML passed to JSON:decode()\"\n self:onDecodeOfHTMLError(error_message,
text, nil, options.etc)\n return nil, error_message -- in case the error
method doesn't abort, return something sensible\n end\n\n --\n -- Ensure
that it's not UTF-32 or UTF-16.\n -- Those are perfectly valid encodings for
JSON (as per RFC 4627 section 3),\n -- but this package can't handle them.\n
\ --\n if text:sub(1,1):byte() == 0 or (text:len() >= 2 and text:sub(2,2):byte()
== 0) then\n local error_message = \"JSON package groks only UTF-8, sorry\"\n
\ self:onDecodeError(error_message, text, nil, options.etc)\n return
nil, error_message -- in case the error method doesn't abort, return something
sensible\n end\n\n --\n -- apply global options\n --\n if options.decodeNumbersAsObjects
== nil then\n options.decodeNumbersAsObjects = self.decodeNumbersAsObjects\n
\ end\n if options.decodeIntegerObjectificationLength == nil then\n options.decodeIntegerObjectificationLength
= self.decodeIntegerObjectificationLength\n end\n if options.decodeDecimalObjectificationLength
== nil then\n options.decodeDecimalObjectificationLength = self.decodeDecimalObjectificationLength\n
\ end\n if options.decodeIntegerStringificationLength == nil then\n options.decodeIntegerStringificationLength
= self.decodeIntegerStringificationLength\n end\n if options.decodeDecimalStringificationLength
== nil then\n options.decodeDecimalStringificationLength = self.decodeDecimalStringificationLength\n
\ end\n\n\n --\n -- Finally, go parse it\n --\n local success, value,
next_i = pcall(grok_one, self, text, 1, options)\n\n if success then\n\n local
error_message = nil\n if next_i ~= #text + 1 then\n -- something's
left over after we parsed the first thing.... whitespace is allowed.\n next_i
= skip_whitespace(text, next_i)\n\n -- if we have something left over
now, it's trailing garbage\n if next_i ~= #text + 1 then\n value,
error_message = self:onTrailingGarbage(text, next_i, value, options.etc)\n end\n
\ end\n return value, error_message\n\n else\n\n -- If JSON:onDecodeError()
didn't abort out of the pcall, we'll have received\n -- the error message
here as \"value\", so pass it along as an assert.\n local error_message =
value\n if self.assert then\n self.assert(false, error_message)\n
\ else\n assert(false, error_message)\n end\n -- ...and
if we're still here (because the assert didn't throw an error),\n -- return
a nil and throw the error message on as a second arg\n return nil, error_message\n\n
\ end\nend\n\nlocal function backslash_replacement_function(c)\n if c == \"\\n\"
then\n return \"\\\\n\"\n elseif c == \"\\r\" then\n return \"\\\\r\"\n
\ elseif c == \"\\t\" then\n return \"\\\\t\"\n elseif c == \"\\b\" then\n
\ return \"\\\\b\"\n elseif c == \"\\f\" then\n return \"\\\\f\"\n
\ elseif c == '\"' then\n return '\\\\\"'\n elseif c == '\\\\' then\n
\ return '\\\\\\\\'\n else\n return string.format(\"\\\\u%04x\", c:byte())\n
\ end\nend\n\nlocal chars_to_be_escaped_in_JSON_string\n = '['\n .. '\"'
\ -- class sub-pattern to match a double quote\n .. '%\\\\' -- class sub-pattern
to match a backslash\n .. '%z' -- class sub-pattern to match a null\n ..
\ '\\001' .. '-' .. '\\031' -- class sub-pattern to match control characters\n
\ .. ']'\n\n\nlocal LINE_SEPARATOR_as_utf8 = unicode_codepoint_as_utf8(0x2028)\nlocal
PARAGRAPH_SEPARATOR_as_utf8 = unicode_codepoint_as_utf8(0x2029)\nlocal function
json_string_literal(value, options)\n local newval = value:gsub(chars_to_be_escaped_in_JSON_string,
backslash_replacement_function)\n if options.stringsAreUtf8 then\n --\n
\ -- This feels really ugly to just look into a string for the sequence of
bytes that we know to be a particular utf8 character,\n -- but utf8 was designed
purposefully to make this kind of thing possible. Still, feels dirty.\n --
I'd rather decode the byte stream into a character stream, but it's not technically
needed so\n -- not technically worth it.\n --\n newval = newval:gsub(LINE_SEPARATOR_as_utf8,
'\\\\u2028'):gsub(PARAGRAPH_SEPARATOR_as_utf8,'\\\\u2029')\n end\n return
'\"' .. newval .. '\"'\nend\n\nlocal function object_or_array(self, T, etc)\n
\ --\n -- We need to inspect all the keys... if there are any strings, we'll
convert to a JSON\n -- object. If there are only numbers, it's a JSON array.\n
\ --\n -- If we'll be converting to a JSON object, we'll want to sort the keys
so that the\n -- end result is deterministic.\n --\n local string_keys =
{ }\n local number_keys = { }\n local number_keys_must_be_strings = false\n
\ local maximum_number_key\n\n for key in pairs(T) do\n if type(key) ==
'string' then\n table.insert(string_keys, key)\n elseif type(key)
== 'number' then\n table.insert(number_keys, key)\n if key <=
0 or key >= math.huge then\n number_keys_must_be_strings = true\n elseif
not maximum_number_key or key > maximum_number_key then\n maximum_number_key
= key\n end\n elseif type(key) == 'boolean' then\n table.insert(string_keys,
tostring(key))\n else\n self:onEncodeError(\"can't encode table with
a key of type \" .. type(key), etc)\n end\n end\n\n if #string_keys ==
0 and not number_keys_must_be_strings then\n --\n -- An empty table,
or a numeric-only array\n --\n if #number_keys > 0 then\n return
nil, maximum_number_key -- an array\n elseif tostring(T) == \"JSON array\"
then\n return nil\n elseif tostring(T) == \"JSON object\" then\n
\ return { }\n else\n -- have to guess, so we'll pick array,
since empty arrays are likely more common than empty objects\n return
nil\n end\n end\n\n table.sort(string_keys)\n\n local map\n if #number_keys
> 0 then\n --\n -- If we're here then we have either mixed string/number
keys, or numbers inappropriate for a JSON array\n -- It's not ideal, but
we'll turn the numbers into strings so that we can at least create a JSON object.\n
\ --\n\n if self.noKeyConversion then\n self:onEncodeError(\"a
table with both numeric and string keys could be an object or array; aborting\",
etc)\n end\n\n --\n -- Have to make a shallow copy of the source
table so we can remap the numeric keys to be strings\n --\n map = {
}\n for key, val in pairs(T) do\n map[key] = val\n end\n\n table.sort(number_keys)\n\n
\ --\n -- Throw numeric keys in there as strings\n --\n for
_, number_key in ipairs(number_keys) do\n local string_key = tostring(number_key)\n
\ if map[string_key] == nil then\n table.insert(string_keys
, string_key)\n map[string_key] = T[number_key]\n else\n self:onEncodeError(\"conflict
converting table with mixed-type keys into a JSON object: key \" .. number_key
.. \" exists both as a string and a number.\", etc)\n end\n end\n
\ end\n\n return string_keys, nil, map\nend\n\n--\n-- Encode\n--\n-- 'options'
is nil, or a table with possible keys:\n--\n-- pretty -- If true, return
a pretty-printed version.\n--\n-- indent -- A string (usually of spaces)
used to indent each nested level.\n--\n-- align_keys -- If true, align
all the keys when formatting a table. The result is uglier than one might at first
imagine.\n-- Results are undefined if 'align_keys' is true
but 'pretty' is not.\n--\n-- array_newline -- If true, array elements are
formatted each to their own line. The default is to all fall inline.\n-- Results
are undefined if 'array_newline' is true but 'pretty' is not.\n--\n-- null
\ -- If this exists with a string value, table elements with this value
are output as JSON null.\n--\n-- stringsAreUtf8 -- If true, consider Lua strings
not as a sequence of bytes, but as a sequence of UTF-8 characters.\n-- (Currently,
the only practical effect of setting this option is that Unicode LINE and PARAGRAPH\n--
\ separators, if found in a string, are encoded with a JSON
escape instead of as raw UTF-8.\n-- The JSON is valid either
way, but encoding this way, apparently, allows the resulting JSON\n-- to
also be valid Java.)\n--\n--\nlocal function encode_value(self, value, parents,
etc, options, indent, for_key)\n\n --\n -- keys in a JSON object can never
be null, so we don't even consider options.null when converting a key value\n
\ --\n if value == nil or (not for_key and options and options.null and value
== options.null) then\n return 'null'\n\n elseif type(value) == 'string'
then\n return json_string_literal(value, options)\n\n elseif type(value)
== 'number' then\n if value ~= value then\n --\n -- NaN (Not
a Number).\n -- JSON has no NaN, so we have to fudge the best we can.
This should really be a package option.\n --\n return \"null\"\n
\ elseif value >= math.huge then\n --\n -- Positive infinity.
JSON has no INF, so we have to fudge the best we can. This should\n --
really be a package option. Note: at least with some implementations, positive
infinity\n -- is both \">= math.huge\" and \"<= -math.huge\", which makes
no sense but that's how it is.\n -- Negative infinity is properly \"<=
-math.huge\". So, we must be sure to check the \">=\"\n -- case first.\n
\ --\n return \"1e+9999\"\n elseif value <= -math.huge then\n
\ --\n -- Negative infinity.\n -- JSON has no INF, so we
have to fudge the best we can. This should really be a package option.\n --\n
\ return \"-1e+9999\"\n else\n return tostring(value)\n end\n\n
\ elseif type(value) == 'boolean' then\n return tostring(value)\n\n elseif
type(value) ~= 'table' then\n\n if self.unsupportedTypeEncoder then\n local
user_value, user_error = self:unsupportedTypeEncoder(value, parents, etc, options,
indent, for_key)\n -- If the user's handler returns a string, use that.
If it returns nil plus an error message, bail with that.\n -- If only
nil returned, fall through to the default error handler.\n if type(user_value)
== 'string' then\n return user_value\n elseif user_value ~=
nil then\n self:onEncodeError(\"unsupportedTypeEncoder method returned
a \" .. type(user_value), etc)\n elseif user_error then\n self:onEncodeError(tostring(user_error),
etc)\n end\n end\n\n self:onEncodeError(\"can't convert \" ..
type(value) .. \" to JSON\", etc)\n\n elseif getmetatable(value) == isNumber
then\n return tostring(value)\n else\n --\n -- A table to be
converted to either a JSON object or array.\n --\n local T = value\n\n
\ if type(options) ~= 'table' then\n options = {}\n end\n if
type(indent) ~= 'string' then\n indent = \"\"\n end\n\n if parents[T]
then\n self:onEncodeError(\"table \" .. tostring(T) .. \" is a child of
itself\", etc)\n else\n parents[T] = true\n end\n\n local
result_value\n\n local object_keys, maximum_number_key, map = object_or_array(self,
T, etc)\n if maximum_number_key then\n --\n -- An array...\n
\ --\n local key_indent\n if options.array_newline then\n
\ key_indent = indent .. tostring(options.indent or \"\")\n else\n
\ key_indent = indent\n end\n\n local ITEMS = { }\n
\ for i = 1, maximum_number_key do\n table.insert(ITEMS, encode_value(self,
T[i], parents, etc, options, key_indent))\n end\n\n if options.array_newline
then\n result_value = \"[\\n\" .. key_indent .. table.concat(ITEMS,
\",\\n\" .. key_indent) .. \"\\n\" .. indent .. \"]\"\n elseif options.pretty
then\n result_value = \"[ \" .. table.concat(ITEMS, \", \") .. \" ]\"\n
\ else\n result_value = \"[\" .. table.concat(ITEMS, \",\")
\ .. \"]\"\n end\n\n elseif object_keys then\n --\n --
An object\n --\n local TT = map or T\n\n if options.pretty
then\n\n local KEYS = { }\n local max_key_length = 0\n for
_, key in ipairs(object_keys) do\n local encoded = encode_value(self,
tostring(key), parents, etc, options, indent, true)\n if options.align_keys
then\n max_key_length = math.max(max_key_length, #encoded)\n
\ end\n table.insert(KEYS, encoded)\n end\n
\ local key_indent = indent .. tostring(options.indent or \"\")\n local
subtable_indent = key_indent .. string.rep(\" \", max_key_length) .. (options.align_keys
and \" \" or \"\")\n local FORMAT = \"%s%\" .. string.format(\"%d\",
max_key_length) .. \"s: %s\"\n\n local COMBINED_PARTS = { }\n for
i, key in ipairs(object_keys) do\n local encoded_val = encode_value(self,
TT[key], parents, etc, options, subtable_indent)\n table.insert(COMBINED_PARTS,
string.format(FORMAT, key_indent, KEYS[i], encoded_val))\n end\n result_value
= \"{\\n\" .. table.concat(COMBINED_PARTS, \",\\n\") .. \"\\n\" .. indent .. \"}\"\n\n
\ else\n\n local PARTS = { }\n for _, key in ipairs(object_keys)
do\n local encoded_val = encode_value(self, TT[key], parents,
etc, options, indent)\n local encoded_key = encode_value(self, tostring(key),
parents, etc, options, indent, true)\n table.insert(PARTS, string.format(\"%s:%s\",
encoded_key, encoded_val))\n end\n result_value = \"{\"
.. table.concat(PARTS, \",\") .. \"}\"\n\n end\n else\n --\n
\ -- An empty array/object... we'll treat it as an array, though it should
really be an option\n --\n result_value = \"[]\"\n end\n\n
\ parents[T] = false\n return result_value\n end\nend\n\nlocal function
top_level_encode(self, value, etc, options)\n local val = encode_value(self,
value, {}, etc, options)\n if val == nil then\n --PRIVATE(\"may need to
revert to the previous public verison if I can't figure out what the guy wanted\")\n
\ return val\n else\n return val\n end\nend\n\nfunction OBJDEF:encode(value,
etc, options)\n if type(self) ~= 'table' or self.__index ~= OBJDEF then\n OBJDEF:onEncodeError(\"JSON:encode
must be called in method format\", etc)\n end\n\n --\n -- If the user didn't
pass in a table of decode options, make an empty one.\n --\n if type(options)
~= 'table' then\n options = {}\n end\n\n return top_level_encode(self,
value, etc, options)\nend\n\nfunction OBJDEF:encode_pretty(value, etc, options)\n
\ if type(self) ~= 'table' or self.__index ~= OBJDEF then\n OBJDEF:onEncodeError(\"JSON:encode_pretty
must be called in method format\", etc)\n end\n\n --\n -- If the user didn't
pass in a table of decode options, use the default pretty ones\n --\n if type(options)
~= 'table' then\n options = default_pretty_options\n end\n\n return top_level_encode(self,
value, etc, options)\nend\n\nfunction OBJDEF.__tostring()\n return \"JSON encode/decode
package\"\nend\n\nOBJDEF.__index = OBJDEF\n\nfunction OBJDEF:new(args)\n local
new = { }\n\n if args then\n for key, val in pairs(args) do\n new[key]
= val\n end\n end\n\n return setmetatable(new, OBJDEF)\nend\n\nreturn
OBJDEF:new()\n\n--\n-- Version history:\n--\n-- 20170927.26 Use option.null
in decoding as well. Thanks to Max Sindwani for the bump, and sorry to Oliver
Hitz\n-- whose first mention of it four years ago was completely
missed by me.\n--\n-- 20170823.25 Added support for JSON:unsupportedTypeEncoder().\n--
\ Thanks to Chronos Phaenon Eosphoros (https://github.com/cpeosphoros)
for the idea.\n--\n-- 20170819.24 Added support for boolean keys in tables.\n--\n--
\ 20170416.23 Added the \"array_newline\" formatting option suggested by yurenchen
(http://www.yurenchen.com/)\n--\n-- 20161128.22 Added:\n-- JSON:isString()\n--
\ JSON:isNumber()\n-- JSON:decodeIntegerObjectificationLength\n--
\ JSON:decodeDecimalObjectificationLength\n--\n-- 20161109.21
\ Oops, had a small boo-boo in the previous update.\n--\n-- 20161103.20 Used
to silently ignore trailing garbage when decoding. Now fails via JSON:onTrailingGarbage()\n--
\ http://seriot.ch/parsing_json.php\n--\n-- Built-in
error message about \"expected comma or ']'\" had mistakenly referred to '['\n--\n--
\ Updated the built-in error reporting to refer to bytes rather
than characters.\n--\n-- The decode() method no longer assumes
that error handlers abort.\n--\n-- Made the VERSION string a string
instead of a number\n--\n\n-- 20160916.19 Fixed the isNumber.__index assignment
(thanks to Jack Taylor)\n-- \n-- 20160730.18 Added JSON:forceString() and
JSON:forceNumber()\n--\n-- 20160728.17 Added concatenation to the metatable
for JSON:asNumber()\n--\n-- 20160709.16 Could crash if not passed an options
table (thanks jarno heikkinen <jarnoh@capturemonkey.com>).\n--\n-- Made
JSON:asNumber() a bit more resilient to being passed the results of itself.\n--\n--
\ 20160526.15 Added the ability to easily encode null values in JSON, via the
new \"null\" encoding option.\n-- (Thanks to Adam B for bringing
up the issue.)\n--\n-- Added some support for very large numbers
and precise floats via\n-- JSON.decodeNumbersAsObjects\n--
\ JSON.decodeIntegerStringificationLength\n-- JSON.decodeDecimalStringificationLength\n--\n--
\ Added the \"stringsAreUtf8\" encoding option. (Hat tip to http://lua-users.org/wiki/JsonModules
)\n--\n-- 20141223.14 The encode_pretty() routine produced fine results for
small datasets, but isn't really\n-- appropriate for anything
large, so with help from Alex Aulbach I've made the encode routines\n-- more
flexible, and changed the default encode_pretty() to be more generally useful.\n--\n--
\ Added a third 'options' argument to the encode() and encode_pretty()
routines, to control\n-- how the encoding takes place.\n--\n--
\ Updated docs to add assert() call to the loadfile() line, just
as good practice so that\n-- if there is a problem loading JSON.lua,
the appropriate error message will percolate up.\n--\n-- 20140920.13 Put back
(in a way that doesn't cause warnings about unused variables) the author string,\n--
\ so that the source of the package, and its version number, are
visible in compiled copies.\n--\n-- 20140911.12 Minor lua cleanup.\n-- Fixed
internal reference to 'JSON.noKeyConversion' to reference 'self' instead of 'JSON'.\n--
\ (Thanks to SmugMug's David Parry for these.)\n--\n-- 20140418.11
\ JSON nulls embedded within an array were being ignored, such that\n-- [\"1\",null,null,null,null,null,\"seven\"],\n--
\ would return\n-- {1,\"seven\"}\n-- It's
now fixed to properly return\n-- {1, nil, nil, nil, nil, nil,
\"seven\"}\n-- Thanks to \"haddock\" for catching the error.\n--\n--
\ 20140116.10 The user's JSON.assert() wasn't always being used. Thanks to
\"blue\" for the heads up.\n--\n-- 20131118.9 Update for Lua 5.3... it seems
that tostring(2/1) produces \"2.0\" instead of \"2\",\n-- and
this caused some problems.\n--\n-- 20131031.8 Unified the code for encode()
and encode_pretty(); they had been stupidly separate,\n-- and
had of course diverged (encode_pretty didn't get the fixes that encode got, so\n--
\ sometimes produced incorrect results; thanks to Mattie for the
heads up).\n--\n-- Handle encoding tables with non-positive numeric
keys (unlikely, but possible).\n--\n-- If a table has both numeric
and string keys, or its numeric keys are inappropriate\n-- (such
as being non-positive or infinite), the numeric keys are turned into\n-- string
keys appropriate for a JSON object. So, as before,\n-- JSON:encode({
\"one\", \"two\", \"three\" })\n-- produces the array\n-- [\"one\",\"two\",\"three\"]\n--
\ but now something with mixed key types like\n-- JSON:encode({
\"one\", \"two\", \"three\", SOMESTRING = \"some string\" }))\n-- instead
of throwing an error produces an object:\n-- {\"1\":\"one\",\"2\":\"two\",\"3\":\"three\",\"SOMESTRING\":\"some
string\"}\n--\n-- To maintain the prior throw-an-error semantics,
set\n-- JSON.noKeyConversion = true\n-- \n--
\ 20131004.7 Release under a Creative Commons CC-BY license, which I should
have done from day one, sorry.\n--\n-- 20130120.6 Comment update: added a
link to the specific page on my blog where this code can\n-- be
found, so that folks who come across the code outside of my blog can find updates\n--
\ more easily.\n--\n-- 20111207.5 Added support for the 'etc'
arguments, for better error reporting.\n--\n-- 20110731.4 More feedback from
David Kolf on how to make the tests for Nan/Infinity system independent.\n--\n--
\ 20110730.3 Incorporated feedback from David Kolf at http://lua-users.org/wiki/JsonModules:\n--\n--
\ * When encoding lua for JSON, Sparse numeric arrays are now
handled by\n-- spitting out full arrays, such that\n-- JSON:encode({\"one\",
\"two\", [10] = \"ten\"})\n-- returns\n-- [\"one\",\"two\",null,null,null,null,null,null,null,\"ten\"]\n--\n--
\ In 20100810.2 and earlier, only up to the first non-null
value would have been retained.\n--\n-- * When encoding lua
for JSON, numeric value NaN gets spit out as null, and infinity as \"1+e9999\".\n--
\ Version 20100810.2 and earlier created invalid JSON in both
cases.\n--\n-- * Unicode surrogate pairs are now detected when
decoding JSON.\n--\n-- 20100810.2 added some checking to ensure that an invalid
Unicode character couldn't leak in to the UTF-8 encoding\n--\n-- 20100731.1
\ initial public release\n--\n"
uuid.lua: |
---------------------------------------------------------------------------------------
-- Copyright 2012 Rackspace (original), 2013 Thijs Schreijer (modifications)
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS-IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- see http://www.ietf.org/rfc/rfc4122.txt
--
-- Note that this is not a true version 4 (random) UUID. Since `os.time()` precision is only 1 second, it would be hard
-- to guarantee spacial uniqueness when two hosts generate a uuid after being seeded during the same second. This
-- is solved by using the node field from a version 1 UUID. It represents the mac address.
--
-- 28-apr-2013 modified by Thijs Schreijer from the original [Rackspace code](https://github.com/kans/zirgo/blob/807250b1af6725bad4776c931c89a784c1e34db2/util/uuid.lua) as a generic Lua module.
-- Regarding the above mention on `os.time()`; the modifications use the `socket.gettime()` function from LuaSocket
-- if available and hence reduce that problem (provided LuaSocket has been loaded before uuid).
--
-- **6-nov-2015 Please take note of this issue**; [https://github.com/Mashape/kong/issues/478](https://github.com/Mashape/kong/issues/478)
-- It demonstrates the problem of using time as a random seed. Specifically when used from multiple processes.
-- So make sure to seed only once, application wide. And to not have multiple processes do that
-- simultaneously (like nginx does for example).
local M = {}
local math = require('math')
local os = require('os')
local string = require('string')
local bitsize = 32 -- bitsize assumed for Lua VM. See randomseed function below.
local lua_version = tonumber(_VERSION:match("%d%.*%d*")) -- grab Lua version used
local MATRIX_AND = {{0,0},{0,1} }
local MATRIX_OR = {{0,1},{1,1}}
local HEXES = '0123456789abcdef'
local math_floor = math.floor
local math_random = math.random
local math_abs = math.abs
local string_sub = string.sub
local to_number = tonumber
local assert = assert
local type = type
-- performs the bitwise operation specified by truth matrix on two numbers.
local function BITWISE(x, y, matrix)
local z = 0
local pow = 1
while x > 0 or y > 0 do
z = z + (matrix[x%2+1][y%2+1] * pow)
pow = pow * 2
x = math_floor(x/2)
y = math_floor(y/2)
end
return z
end
local function INT2HEX(x)
local s,base = '',16
local d
while x > 0 do
d = x % base + 1
x = math_floor(x/base)
s = string_sub(HEXES, d, d)..s
end
while #s < 2 do s = "0" .. s end
return s
end
----------------------------------------------------------------------------
-- Creates a new uuid. Either provide a unique hex string, or make sure the
-- random seed is properly set. The module table itself is a shortcut to this
-- function, so `my_uuid = uuid.new()` equals `my_uuid = uuid()`.
--
-- For proper use there are 3 options;
--
-- 1. first require `luasocket`, then call `uuid.seed()`, and request a uuid using no
-- parameter, eg. `my_uuid = uuid()`
-- 2. use `uuid` without `luasocket`, set a random seed using `uuid.randomseed(some_good_seed)`,
-- and request a uuid using no parameter, eg. `my_uuid = uuid()`
-- 3. use `uuid` without `luasocket`, and request a uuid using an unique hex string,
-- eg. `my_uuid = uuid(my_networkcard_macaddress)`
--
-- @return a properly formatted uuid string
-- @param hwaddr (optional) string containing a unique hex value (e.g.: `00:0c:29:69:41:c6`), to be used to compensate for the lesser `math_random()` function. Use a mac address for solid results. If omitted, a fully randomized uuid will be generated, but then you must ensure that the random seed is set properly!
-- @usage
-- local uuid = require("uuid")
-- print("here's a new uuid: ",uuid())
function M.new(hwaddr)
-- bytes are treated as 8bit unsigned bytes.
local bytes = {
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255),
math_random(0, 255)
}
if hwaddr then
assert(type(hwaddr)=="string", "Expected hex string, got "..type(hwaddr))
-- Cleanup provided string, assume mac address, so start from back and cleanup until we've got 12 characters
local i,str = #hwaddr, hwaddr
hwaddr = ""
while i>0 and #hwaddr<12 do
local c = str:sub(i,i):lower()
if HEXES:find(c, 1, true) then
-- valid HEX character, so append it
hwaddr = c..hwaddr
end
i = i - 1
end
assert(#hwaddr == 12, "Provided string did not contain at least 12 hex characters, retrieved '"..hwaddr.."' from '"..str.."'")
-- no split() in lua. :(
bytes[11] = to_number(hwaddr:sub(1, 2), 16)
bytes[12] = to_number(hwaddr:sub(3, 4), 16)
bytes[13] = to_number(hwaddr:sub(5, 6), 16)
bytes[14] = to_number(hwaddr:sub(7, 8), 16)
bytes[15] = to_number(hwaddr:sub(9, 10), 16)
bytes[16] = to_number(hwaddr:sub(11, 12), 16)
end
-- set the version
bytes[7] = BITWISE(bytes[7], 0x0f, MATRIX_AND)
bytes[7] = BITWISE(bytes[7], 0x40, MATRIX_OR)
-- set the variant
bytes[9] = BITWISE(bytes[7], 0x3f, MATRIX_AND)
bytes[9] = BITWISE(bytes[7], 0x80, MATRIX_OR)
return INT2HEX(bytes[1])..INT2HEX(bytes[2])..INT2HEX(bytes[3])..INT2HEX(bytes[4]).."-"..
INT2HEX(bytes[5])..INT2HEX(bytes[6]).."-"..
INT2HEX(bytes[7])..INT2HEX(bytes[8]).."-"..
INT2HEX(bytes[9])..INT2HEX(bytes[10]).."-"..
INT2HEX(bytes[11])..INT2HEX(bytes[12])..INT2HEX(bytes[13])..INT2HEX(bytes[14])..INT2HEX(bytes[15])..INT2HEX(bytes[16])
end
----------------------------------------------------------------------------
-- Improved randomseed function.
-- Lua 5.1 and 5.2 both truncate the seed given if it exceeds the integer
-- range. If this happens, the seed will be 0 or 1 and all randomness will
-- be gone (each application run will generate the same sequence of random
-- numbers in that case). This improved version drops the most significant
-- bits in those cases to get the seed within the proper range again.
-- @param seed the random seed to set (integer from 0 - 2^32, negative values will be made positive)
-- @return the (potentially modified) seed used
-- @usage
-- local socket = require("socket") -- gettime() has higher precision than os.time()
-- local uuid = require("uuid")
-- -- see also example at uuid.seed()
-- uuid.randomseed(socket.gettime()*10000)
-- print("here's a new uuid: ",uuid())
function M.randomseed(seed)
seed = math_floor(math_abs(seed))
if seed >= (2^bitsize) then
-- integer overflow, so reduce to prevent a bad seed
seed = seed - math_floor(seed / 2^bitsize) * (2^bitsize)
end
if lua_version < 5.2 then
-- 5.1 uses (incorrect) signed int
math.randomseed(seed - 2^(bitsize-1))
else
-- 5.2 uses (correct) unsigned int
math.randomseed(seed)
end
return seed
end
----------------------------------------------------------------------------
-- Seeds the random generator.
-- It does so in 2 possible ways;
--
-- 1. use `os.time()`: this only offers resolution to one second (used when
-- LuaSocket hasn't been loaded yet
-- 2. use luasocket `gettime()` function, but it only does so when LuaSocket
-- has been required already.
-- @usage
-- local socket = require("socket") -- gettime() has higher precision than os.time()
-- -- LuaSocket loaded, so below line does the same as the example from randomseed()
-- uuid.seed()
-- print("here's a new uuid: ",uuid())
function M.seed()
if package.loaded["socket"] and package.loaded["socket"].gettime then
return M.randomseed(package.loaded["socket"].gettime()*10000)
else
return M.randomseed(os.time())
end
end
return setmetatable( M, { __call = function(self, hwaddr) return self.new(hwaddr) end} )
local M = {}
function M.base64_decode(input)
local b = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
input = input:gsub('[^'..b..'=]', '')
return (input:gsub('.', function(x)
if (x == '=') then return '' end
local r, f = '', (b:find(x) - 1)
for i = 6, 1, -1 do r = r .. (f % 2 ^ i - f % 2 ^ (i - 1) > 0 and '1' or '0') end
return r;
end):gsub('%d%d%d?%d?%d?%d?%d?%d?', function(x)
if (#x ~= 8) then return '' end
local c = 0
for i = 1, 8 do c = c + (x:sub(i, i) == '1' and 2 ^ (8 - i) or 0) end
return string.char(c)
end))
end
return M
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment