// Code generated for freebsd/amd64 by 'generator -mlong-double-64 --package-name libsqlite3 --prefix-enumerator=_ --prefix-external=x_ --prefix-field=F --prefix-static-internal=_ --prefix-static-none=_ --prefix-tagged-enum=_ --prefix-tagged-struct=T --prefix-tagged-union=T --prefix-typename=T --prefix-undefined=_ -ignore-unsupported-alignment -DHAVE_USLEEP -DLONGDOUBLE_TYPE=double -DNDEBUG -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_DBSTAT_VTAB -DSQLITE_ENABLE_FTS5 -DSQLITE_ENABLE_GEOPOLY -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_OFFSET_SQL_FUNC -DSQLITE_ENABLE_PREUPDATE_HOOK -DSQLITE_ENABLE_RBU -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_SNAPSHOT -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_HAVE_ZLIB=1 -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE=1 -DSQLITE_WITHOUT_ZONEMALLOC -Dpread64=pread -Dpwrite64=pwrite -extended-errors -o sqlite3.go sqlite3.c -I/tmp/libsqlite3/sqlite-amalgamation-3450300/ccgo -DSQLITE_MUTEX_NOOP -DSQLITE_OS_UNIX=1 -ltcl8.6 -eval-all-macros', DO NOT EDIT.
//go:build freebsd && amd64
// +build freebsd,amd64
package sqlite3
import (
"reflect"
"unsafe"
"modernc.org/libc"
)
var (
_ reflect.Type
_ unsafe.Pointer
)
const ACCESSPERMS = 511
const ALLBITS = -1
const ALLPERMS = 4095
const AT_EACCESS = 256
const AT_EMPTY_PATH = 16384
const AT_FDCWD = -100
const AT_REMOVEDIR = 2048
const AT_RESOLVE_BENEATH = 8192
const AT_SYMLINK_FOLLOW = 1024
const AT_SYMLINK_NOFOLLOW = 512
const BIG_ENDIAN = 4321
const BITVEC_MXHASH = 0
const BITVEC_NBIT = 0
const BITVEC_NELEM = 0
const BITVEC_NINT = 0
const BITVEC_NPTR = 0
const BITVEC_SZ = 512
const BITVEC_SZELEM = 8
const BITVEC_TELEM = 0
const BITVEC_USIZE = 0
const BTALLOC_ANY = 0
const BTALLOC_EXACT = 1
const BTALLOC_LE = 2
const BTCF_AtLast = 8
const BTCF_Incrblob = 16
const BTCF_Multiple = 32
const BTCF_Pinned = 64
const BTCF_ValidNKey = 2
const BTCF_ValidOvfl = 4
const BTCF_WriteFlag = 1
const BTCURSOR_FIRST_UNINIT = 0
const BTCURSOR_MAX_DEPTH = 20
const BTREE_APPEND = 8
const BTREE_APPLICATION_ID = 8
const BTREE_AUTOVACUUM_FULL = 1
const BTREE_AUTOVACUUM_INCR = 2
const BTREE_AUTOVACUUM_NONE = 0
const BTREE_AUXDELETE = 4
const BTREE_BLOBKEY = 2
const BTREE_BULKLOAD = 1
const BTREE_DATA_VERSION = 15
const BTREE_DEFAULT_CACHE_SIZE = 3
const BTREE_FILE_FORMAT = 2
const BTREE_FORDELETE = 8
const BTREE_FREE_PAGE_COUNT = 0
const BTREE_HINT_RANGE = 0
const BTREE_INCR_VACUUM = 7
const BTREE_INTKEY = 1
const BTREE_LARGEST_ROOT_PAGE = 4
const BTREE_MEMORY = 2
const BTREE_OMIT_JOURNAL = 1
const BTREE_PREFORMAT = 128
const BTREE_SAVEPOSITION = 2
const BTREE_SCHEMA_VERSION = 1
const BTREE_SEEK_EQ = 2
const BTREE_SINGLE = 4
const BTREE_TEXT_ENCODING = 5
const BTREE_UNORDERED = 8
const BTREE_USER_VERSION = 6
const BTREE_WRCSR = 4
const BTS_EXCLUSIVE = 64
const BTS_FAST_SECURE = 12
const BTS_INITIALLY_EMPTY = 16
const BTS_NO_WAL = 32
const BTS_OVERWRITE = 8
const BTS_PAGESIZE_FIXED = 2
const BTS_PENDING = 128
const BTS_READ_ONLY = 1
const BTS_SECURE_DELETE = 4
const BUFSIZ = 1024
const BYTE_ORDER = 1234
const CACHE_STALE = 0
const CC_AND = 24
const CC_BANG = 15
const CC_BOM = 30
const CC_COMMA = 23
const CC_DIGIT = 3
const CC_DOLLAR = 4
const CC_DOT = 26
const CC_EQ = 14
const CC_GT = 13
const CC_ID = 27
const CC_ILLEGAL = 28
const CC_KYWD = 2
const CC_KYWD0 = 1
const CC_LP = 17
const CC_LT = 12
const CC_MINUS = 11
const CC_NUL = 29
const CC_PERCENT = 22
const CC_PIPE = 10
const CC_PLUS = 20
const CC_QUOTE = 8
const CC_QUOTE2 = 9
const CC_RP = 18
const CC_SEMI = 19
const CC_SLASH = 16
const CC_SPACE = 7
const CC_STAR = 21
const CC_TILDA = 25
const CC_VARALPHA = 5
const CC_VARNUM = 6
const CC_X = 0
const CKCNSTRNT_COLUMN = 1
const CKCNSTRNT_ROWID = 2
const CLK_TCK = 128
const CLOCKS_PER_SEC = 128
const CLOCK_BOOTTIME = 5
const CLOCK_MONOTONIC = 4
const CLOCK_MONOTONIC_COARSE = 12
const CLOCK_MONOTONIC_FAST = 12
const CLOCK_MONOTONIC_PRECISE = 11
const CLOCK_PROCESS_CPUTIME_ID = 15
const CLOCK_PROF = 2
const CLOCK_REALTIME = 0
const CLOCK_REALTIME_COARSE = 10
const CLOCK_REALTIME_FAST = 10
const CLOCK_REALTIME_PRECISE = 9
const CLOCK_SECOND = 13
const CLOCK_THREAD_CPUTIME_ID = 14
const CLOCK_UPTIME = 5
const CLOCK_UPTIME_FAST = 8
const CLOCK_UPTIME_PRECISE = 7
const CLOCK_VIRTUAL = 1
const CLOSE_RANGE_CLOEXEC = 4
const COLFLAG_BUSY = 256
const COLFLAG_GENERATED = 96
const COLFLAG_HASCOLL = 512
const COLFLAG_HASTYPE = 4
const COLFLAG_HIDDEN = 2
const COLFLAG_NOEXPAND = 1024
const COLFLAG_NOINSERT = 98
const COLFLAG_NOTAVAIL = 128
const COLFLAG_PRIMKEY = 1
const COLFLAG_SORTERREF = 16
const COLFLAG_STORED = 64
const COLFLAG_UNIQUE = 8
const COLFLAG_VIRTUAL = 32
const COLNAME_COLUMN = 4
const COLNAME_DATABASE = 2
const COLNAME_DECLTYPE = 1
const COLNAME_N = 5
const COLNAME_NAME = 0
const COLNAME_TABLE = 3
const COLTYPE_ANY = 1
const COLTYPE_BLOB = 2
const COLTYPE_CUSTOM = 0
const COLTYPE_INT = 3
const COLTYPE_INTEGER = 4
const COLTYPE_REAL = 5
const COLTYPE_TEXT = 6
const CPUCLOCK_WHICH_PID = 0
const CPUCLOCK_WHICH_TID = 1
const CURSOR_FAULT = 4
const CURSOR_INVALID = 1
const CURSOR_REQUIRESEEK = 3
const CURSOR_SKIPNEXT = 2
const CURSOR_VALID = 0
const CURTYPE_BTREE = 0
const CURTYPE_PSEUDO = 3
const CURTYPE_SORTER = 1
const CURTYPE_VTAB = 2
const DBFLAG_EncodingFixed = 64
const DBFLAG_InternalFunc = 32
const DBFLAG_PreferBuiltin = 2
const DBFLAG_SchemaChange = 1
const DBFLAG_SchemaKnownOk = 16
const DBFLAG_Vacuum = 4
const DBFLAG_VacuumInto = 8
const DBSTAT_PAGE_PADDING_BYTES = 256
const DB_ResetWanted = 8
const DB_SchemaLoaded = 1
const DB_UnresetViews = 2
const DEFFILEMODE = 438
const DIRECT_MODE = 0
const DOTLOCK_SUFFIX = ".lock"
const DST_AUST = 2
const DST_CAN = 6
const DST_EET = 5
const DST_MET = 4
const DST_NONE = 0
const DST_USA = 1
const DST_WET = 3
const E2BIG = 7
const EACCES = 13
const EADDRINUSE = 48
const EADDRNOTAVAIL = 49
const EAFNOSUPPORT = 47
const EAGAIN = 35
const EALREADY = 37
const EAUTH = 80
const EBADF = 9
const EBADMSG = 89
const EBADRPC = 72
const EBUSY = 16
const ECANCELED = 85
const ECAPMODE = 94
const ECHILD = 10
const ECONNABORTED = 53
const ECONNREFUSED = 61
const ECONNRESET = 54
const EDEADLK = 11
const EDESTADDRREQ = 39
const EDOM = 33
const EDOOFUS = 88
const EDQUOT = 69
const EEXIST = 17
const EFAULT = 14
const EFBIG = 27
const EFTYPE = 79
const EHOSTDOWN = 64
const EHOSTUNREACH = 65
const EIDRM = 82
const EILSEQ = 86
const EINPROGRESS = 36
const EINTEGRITY = 97
const EINTR = 4
const EINVAL = 22
const EIO = 5
const EISCONN = 56
const EISDIR = 21
const ELAST = 97
const ELOOP = 62
const EMFILE = 24
const EMLINK = 31
const EMSGSIZE = 40
const EMULTIHOP = 90
const ENAMETOOLONG = 63
const ENAME_NAME = 0
const ENAME_ROWID = 3
const ENAME_SPAN = 1
const ENAME_TAB = 2
const ENEEDAUTH = 81
const ENETDOWN = 50
const ENETRESET = 52
const ENETUNREACH = 51
const ENFILE = 23
const ENOATTR = 87
const ENOBUFS = 55
const ENODEV = 19
const ENOENT = 2
const ENOEXEC = 8
const ENOLCK = 77
const ENOLINK = 91
const ENOMEM = 12
const ENOMSG = 83
const ENOPROTOOPT = 42
const ENOSPC = 28
const ENOSYS = 78
const ENOTBLK = 15
const ENOTCAPABLE = 93
const ENOTCONN = 57
const ENOTDIR = 20
const ENOTEMPTY = 66
const ENOTRECOVERABLE = 95
const ENOTSOCK = 38
const ENOTSUP = 45
const ENOTTY = 25
const ENXIO = 6
const EOF = -1
const EOPNOTSUPP = 45
const EOVERFLOW = 84
const EOWNERDEAD = 96
const EPERM = 1
const EPFNOSUPPORT = 46
const EPIPE = 32
const EPROCLIM = 67
const EPROCUNAVAIL = 76
const EPROGMISMATCH = 75
const EPROGUNAVAIL = 74
const EPROTO = 92
const EPROTONOSUPPORT = 43
const EPROTOTYPE = 41
const EP_Agg = 16
const EP_CanBeNull = 2097152
const EP_Collate = 512
const EP_Commuted = 1024
const EP_ConstFunc = 1048576
const EP_DblQuoted = 128
const EP_Distinct = 4
const EP_FixedCol = 32
const EP_FromDDL = 1073741824
const EP_FullSize = 131072
const EP_HasFunc = 8
const EP_IfNullRow = 262144
const EP_Immutable = 2
const EP_InfixFunc = 256
const EP_InnerON = 2
const EP_IntValue = 2048
const EP_IsFalse = 536870912
const EP_IsTrue = 268435456
const EP_Leaf = 8388608
const EP_NoReduce = 1
const EP_OuterON = 1
const EP_Propagate = 4194824
const EP_Quoted = 67108864
const EP_Reduced = 16384
const EP_Skip = 8192
const EP_Static = 134217728
const EP_Subquery = 4194304
const EP_Subrtn = 33554432
const EP_TokenOnly = 65536
const EP_Unlikely = 524288
const EP_VarSelect = 64
const EP_Win = 32768
const EP_WinFunc = 16777216
const EP_xIsSelect = 4096
const ERANGE = 34
const EREMOTE = 71
const EROFS = 30
const ERPCMISMATCH = 73
const ESHUTDOWN = 58
const ESOCKTNOSUPPORT = 44
const ESPIPE = 29
const ESRCH = 3
const ESTALE = 70
const ETIMEDOUT = 60
const ETOOMANYREFS = 59
const ETXTBSY = 26
const EU4_EXPR = 2
const EU4_IDX = 1
const EU4_NONE = 0
const EUSERS = 68
const EWOULDBLOCK = 35
const EXCLUDED_TABLE_NUMBER = 2
const EXCLUSIVE_LOCK = 4
const EXDEV = 18
const EXIT_FAILURE = 1
const EXIT_SUCCESS = 0
const EXPRDUP_REDUCE = 1
const EXPR_FULLSIZE = 0
const FAPPEND = 8
const FASYNC = 64
const FDSYNC = 16777216
const FD_CLOEXEC = 1
const FD_NONE = -200
const FD_SETSIZE = 1024
const FFSYNC = 128
const FILENAME_MAX = 1024
const FLAG_SIGNED = 1
const FLAG_STRING = 4
const FNDELAY = 4
const FNONBLOCK = 4
const FOPEN_MAX = 20
const FP_FAST_FMAF = 1
const FP_ILOGB0 = -2147483647
const FP_ILOGBNAN = 2147483647
const FP_INFINITE = 1
const FP_NAN = 2
const FP_NORMAL = 4
const FP_SUBNORMAL = 8
const FP_ZERO = 16
const FRDAHEAD = 512
const FREAD = 1
const FTS5CSR_EOF = 1
const FTS5CSR_FREE_ZRANK = 16
const FTS5CSR_REQUIRE_CONTENT = 2
const FTS5CSR_REQUIRE_DOCSIZE = 4
const FTS5CSR_REQUIRE_INST = 8
const FTS5CSR_REQUIRE_POSLIST = 64
const FTS5CSR_REQUIRE_RESEEK = 32
const FTS5INDEX_QUERY_DESC = 2
const FTS5INDEX_QUERY_NOOUTPUT = 32
const FTS5INDEX_QUERY_NOTOKENDATA = 128
const FTS5INDEX_QUERY_PREFIX = 1
const FTS5INDEX_QUERY_SCAN = 8
const FTS5INDEX_QUERY_SCANONETERM = 256
const FTS5INDEX_QUERY_SKIPEMPTY = 16
const FTS5INDEX_QUERY_SKIPHASH = 64
const FTS5INDEX_QUERY_TEST_NOIDX = 4
const FTS5TOKEN = 0
const FTS5_AND = 2
const FTS5_AVERAGES_ROWID = 1
const FTS5_BI_MATCH = 1
const FTS5_BI_ORDER_DESC = 128
const FTS5_BI_ORDER_RANK = 32
const FTS5_BI_ORDER_ROWID = 64
const FTS5_BI_RANK = 2
const FTS5_BI_ROWID_EQ = 4
const FTS5_BI_ROWID_GE = 16
const FTS5_BI_ROWID_LE = 8
const FTS5_CARET = 12
const FTS5_COLON = 5
const FTS5_COMMA = 13
const FTS5_CONTENT_EXTERNAL = 2
const FTS5_CONTENT_NONE = 1
const FTS5_CONTENT_NORMAL = 0
const FTS5_CORRUPT = 267
const FTS5_CURRENT_VERSION = 4
const FTS5_CURRENT_VERSION_SECUREDELETE = 5
const FTS5_DATA_DLI_B = 1
const FTS5_DATA_HEIGHT_B = 5
const FTS5_DATA_ID_B = 16
const FTS5_DATA_PADDING = 20
const FTS5_DATA_PAGE_B = 31
const FTS5_DATA_ZERO_PADDING = 8
const FTS5_DEFAULT_AUTOMERGE = 4
const FTS5_DEFAULT_CRISISMERGE = 16
const FTS5_DEFAULT_DELETE_AUTOMERGE = 10
const FTS5_DEFAULT_HASHSIZE = 1048576
const FTS5_DEFAULT_NEARDIST = 10
const FTS5_DEFAULT_PAGE_SIZE = 4050
const FTS5_DEFAULT_RANK = "bm25"
const FTS5_DEFAULT_USERMERGE = 4
const FTS5_DETAIL_COLUMNS = 2
const FTS5_DETAIL_FULL = 0
const FTS5_DETAIL_NONE = 1
const FTS5_EOF = 0
const FTS5_LCP = 7
const FTS5_LP = 10
const FTS5_MAIN_PREFIX = 48
const FTS5_MAX_LEVEL = 64
const FTS5_MAX_PAGE_SIZE = 65536
const FTS5_MAX_PREFIX_INDEXES = 31
const FTS5_MAX_SEGMENT = 2000
const FTS5_MAX_TOKEN_SIZE = 32768
const FTS5_MERGE_NLIST = 16
const FTS5_MINUS = 6
const FTS5_MIN_DLIDX_SIZE = 4
const FTS5_NOINLINE = "SQLITE_NOINLINE"
const FTS5_NOT = 3
const FTS5_OPT_WORK_UNIT = 1000
const FTS5_OR = 1
const FTS5_PATTERN_GLOB = 66
const FTS5_PATTERN_LIKE = 65
const FTS5_PATTERN_NONE = 0
const FTS5_PLAN_MATCH = 1
const FTS5_PLAN_ROWID = 6
const FTS5_PLAN_SCAN = 5
const FTS5_PLAN_SORTED_MATCH = 4
const FTS5_PLAN_SOURCE = 2
const FTS5_PLAN_SPECIAL = 3
const FTS5_PLUS = 14
const FTS5_PORTER_MAX_TOKEN = 64
const FTS5_RANK_NAME = "rank"
const FTS5_RCP = 8
const FTS5_REMOVE_DIACRITICS_COMPLEX = 2
const FTS5_REMOVE_DIACRITICS_NONE = 0
const FTS5_REMOVE_DIACRITICS_SIMPLE = 1
const FTS5_ROWID_NAME = "rowid"
const FTS5_RP = 11
const FTS5_SEGITER_ONETERM = 1
const FTS5_SEGITER_REVERSE = 2
const FTS5_STAR = 15
const FTS5_STMT_DELETE_CONTENT = 5
const FTS5_STMT_DELETE_DOCSIZE = 7
const FTS5_STMT_INSERT_CONTENT = 3
const FTS5_STMT_LOOKUP = 2
const FTS5_STMT_LOOKUP_DOCSIZE = 8
const FTS5_STMT_REPLACE_CONFIG = 9
const FTS5_STMT_REPLACE_CONTENT = 4
const FTS5_STMT_REPLACE_DOCSIZE = 6
const FTS5_STMT_SCAN = 10
const FTS5_STMT_SCAN_ASC = 0
const FTS5_STMT_SCAN_DESC = 1
const FTS5_STRING = 9
const FTS5_STRUCTURE_ROWID = 10
const FTS5_STRUCTURE_V2 = "\xff\x00\x00\x01"
const FTS5_TERM = 4
const FTS5_TOKENIZE_AUX = 8
const FTS5_TOKENIZE_DOCUMENT = 4
const FTS5_TOKENIZE_PREFIX = 2
const FTS5_TOKENIZE_QUERY = 1
const FTS5_TOKEN_COLOCATED = 1
const FTS5_VOCAB_COL = 0
const FTS5_VOCAB_COL_SCHEMA = "term, col, doc, cnt"
const FTS5_VOCAB_INSTANCE = 2
const FTS5_VOCAB_INST_SCHEMA = "term, doc, col, offset"
const FTS5_VOCAB_ROW = 1
const FTS5_VOCAB_ROW_SCHEMA = "term, doc, cnt"
const FTS5_VOCAB_TERM_EQ = 1
const FTS5_VOCAB_TERM_GE = 2
const FTS5_VOCAB_TERM_LE = 4
const FTS5_WORK_UNIT = 64
const FULLY_WITHIN = 2
const FUNC_PERFECT_MATCH = 6
const FWRITE = 2
const F_ADD_SEALS = 19
const F_CANCEL = 5
const F_DUP2FD = 10
const F_DUP2FD_CLOEXEC = 18
const F_DUPFD = 0
const F_DUPFD_CLOEXEC = 17
const F_GETFD = 1
const F_GETFL = 3
const F_GETLK = 11
const F_GETOWN = 5
const F_GET_SEALS = 20
const F_ISUNIONSTACK = 21
const F_KINFO = 22
const F_LOCK = 1
const F_OGETLK = 7
const F_OK = 0
const F_OSETLK = 8
const F_OSETLKW = 9
const F_RDAHEAD = 16
const F_RDLCK = 1
const F_READAHEAD = 15
const F_SEAL_GROW = 4
const F_SEAL_SEAL = 1
const F_SEAL_SHRINK = 2
const F_SEAL_WRITE = 8
const F_SETFD = 2
const F_SETFL = 4
const F_SETLK = 12
const F_SETLKW = 13
const F_SETLK_REMOTE = 14
const F_SETOWN = 6
const F_TEST = 3
const F_TLOCK = 2
const F_ULOCK = 0
const F_UNLCK = 2
const F_UNLCKSYS = 4
const F_WRLCK = 3
const GCC_VERSION = 4002001
const GEOPOLY_PI = 3.141592653589793
const H4DISC = 7
const HASHSIZE = 97
const HASHTABLE_HASH_1 = 383
const HASHTABLE_NPAGE = 4096
const HASHTABLE_NPAGE_ONE = 4096
const HASHTABLE_NSLOT = 8192
const HAVE_FCHOWN = 1
const HAVE_FULLFSYNC = 0
const HAVE_GETHOSTUUID = 0
const HAVE_LSTAT = 1
const HAVE_MREMAP = 0
const HAVE_READLINK = 1
const HAVE_USLEEP = 1
const HUGE = "MAXFLOAT"
const HUGE_VAL = 0
const HUGE_VALF = 0
const HUGE_VALL = 0
const INCRINIT_NORMAL = 0
const INCRINIT_ROOT = 2
const INCRINIT_TASK = 1
const INFINITY = 0
const INHERIT_COPY = 1
const INHERIT_NONE = 2
const INHERIT_SHARE = 0
const INHERIT_ZERO = 3
const INITFLAG_AlterAdd = 3
const INITFLAG_AlterDrop = 2
const INITFLAG_AlterMask = 3
const INITFLAG_AlterRename = 1
const INLINEFUNC_affinity = 4
const INLINEFUNC_coalesce = 0
const INLINEFUNC_expr_compare = 3
const INLINEFUNC_expr_implies_expr = 2
const INLINEFUNC_iif = 5
const INLINEFUNC_implies_nonnull_row = 1
const INLINEFUNC_sqlite_offset = 6
const INLINEFUNC_unlikely = 99
const INTERFACE = 1
const IN_INDEX_EPH = 2
const IN_INDEX_INDEX_ASC = 3
const IN_INDEX_INDEX_DESC = 4
const IN_INDEX_LOOP = 4
const IN_INDEX_MEMBERSHIP = 2
const IN_INDEX_NOOP = 5
const IN_INDEX_NOOP_OK = 1
const IN_INDEX_ROWID = 1
const IOCPARM_MASK = 8191
const IOCPARM_MAX = 8192
const IOCPARM_SHIFT = 13
const IOC_DIRMASK = 3758096384
const IOC_IN = 2147483648
const IOC_INOUT = 3221225472
const IOC_OUT = 1073741824
const IOC_VOID = 536870912
const ITIMER_PROF = 2
const ITIMER_REAL = 0
const ITIMER_VIRTUAL = 1
const IsStat4 = 1
const JEACH_ATOM = 3
const JEACH_FULLKEY = 6
const JEACH_ID = 4
const JEACH_JSON = 8
const JEACH_KEY = 0
const JEACH_PARENT = 5
const JEACH_PATH = 7
const JEACH_ROOT = 9
const JEACH_TYPE = 2
const JEACH_VALUE = 1
const JEDIT_DEL = 1
const JEDIT_INS = 3
const JEDIT_REPL = 2
const JEDIT_SET = 4
const JSONB_ARRAY = 11
const JSONB_FALSE = 2
const JSONB_FLOAT = 5
const JSONB_FLOAT5 = 6
const JSONB_INT = 3
const JSONB_INT5 = 4
const JSONB_NULL = 0
const JSONB_OBJECT = 12
const JSONB_TEXT = 7
const JSONB_TEXT5 = 9
const JSONB_TEXTJ = 8
const JSONB_TEXTRAW = 10
const JSONB_TRUE = 1
const JSON_ABPATH = 3
const JSON_BLOB = 8
const JSON_CACHE_ID = -429938
const JSON_CACHE_SIZE = 4
const JSON_EDITABLE = 1
const JSON_INVALID_CHAR = 629145
const JSON_ISSET = 4
const JSON_JSON = 1
const JSON_KEEPERROR = 2
const JSON_LOOKUP_ERROR = 4294967295
const JSON_LOOKUP_NOTFOUND = 4294967294
const JSON_LOOKUP_PATHERROR = 4294967293
const JSON_MAX_DEPTH = 1000
const JSON_MERGE_BADPATCH = 2
const JSON_MERGE_BADTARGET = 1
const JSON_MERGE_OK = 0
const JSON_MERGE_OOM = 3
const JSON_SQL = 2
const JSON_SUBTYPE = 74
const JSTRING_ERR = 4
const JSTRING_MALFORMED = 2
const JSTRING_OOM = 1
const JT_CROSS = 2
const JT_ERROR = 128
const JT_INNER = 1
const JT_LEFT = 8
const JT_LTORJ = 64
const JT_NATURAL = 4
const JT_OUTER = 32
const JT_RIGHT = 16
const KEYINFO_ORDER_BIGNULL = 2
const KEYINFO_ORDER_DESC = 1
const LEGACY_SCHEMA_TABLE = "sqlite_master"
const LEGACY_TEMP_SCHEMA_TABLE = "sqlite_temp_master"
const LITTLE_ENDIAN = 1234
const LOCATE_NOERR = 2
const LOCATE_VIEW = 1
const LOCK_EX = 2
const LOCK_NB = 4
const LOCK_SH = 1
const LOCK_UN = 8
const LONGDOUBLE_TYPE = 0
const LOOKASIDE_SMALL = 128
const L_INCR = 1
const L_SET = 0
const L_XTND = 2
const L_ctermid = 1024
const L_cuserid = 17
const L_tmpnam = 1024
const M10d_Any = 1
const M10d_No = 2
const M10d_Yes = 0
const MADV_AUTOSYNC = 7
const MADV_CORE = 9
const MADV_DONTNEED = 4
const MADV_FREE = 5
const MADV_NOCORE = 8
const MADV_NORMAL = 0
const MADV_NOSYNC = 6
const MADV_PROTECT = 10
const MADV_RANDOM = 1
const MADV_SEQUENTIAL = 2
const MADV_WILLNEED = 3
const MAP_32BIT = 524288
const MAP_ALIGNED_SUPER = 16777216
const MAP_ALIGNMENT_MASK = 4278190080
const MAP_ALIGNMENT_SHIFT = 24
const MAP_ANON = 4096
const MAP_ANONYMOUS = 4096
const MAP_COPY = 2
const MAP_EXCL = 16384
const MAP_FAILED = -1
const MAP_FILE = 0
const MAP_FIXED = 16
const MAP_GUARD = 8192
const MAP_HASSEMAPHORE = 512
const MAP_NOCORE = 131072
const MAP_NOSYNC = 2048
const MAP_PREFAULT_READ = 262144
const MAP_PRIVATE = 2
const MAP_RESERVED0020 = 32
const MAP_RESERVED0040 = 64
const MAP_RESERVED0080 = 128
const MAP_RESERVED0100 = 256
const MAP_SHARED = 1
const MAP_STACK = 1024
const MATH_ERREXCEPT = 2
const MATH_ERRNO = 1
const MAX_PATHNAME = 512
const MAX_SECTOR_SIZE = 65536
const MCL_CURRENT = 1
const MCL_FUTURE = 2
const MEMJOURNAL_DFLT_FILECHUNKSIZE = 1024
const MEMTYPE_HEAP = 1
const MEMTYPE_LOOKASIDE = 2
const MEMTYPE_PCACHE = 4
const MEM_AffMask = 63
const MEM_Agg = 32768
const MEM_Blob = 16
const MEM_Cleared = 256
const MEM_Dyn = 4096
const MEM_Ephem = 16384
const MEM_FromBind = 64
const MEM_Int = 4
const MEM_IntReal = 32
const MEM_Null = 1
const MEM_Real = 8
const MEM_Static = 8192
const MEM_Str = 2
const MEM_Subtype = 2048
const MEM_Term = 512
const MEM_TypeMask = 3519
const MEM_Undefined = 0
const MEM_Zero = 1024
const MFD_ALLOW_SEALING = 2
const MFD_CLOEXEC = 1
const MFD_HUGETLB = 4
const MFD_HUGE_16GB = 2281701376
const MFD_HUGE_16MB = 1610612736
const MFD_HUGE_1GB = 2013265920
const MFD_HUGE_1MB = 1342177280
const MFD_HUGE_256MB = 1879048192
const MFD_HUGE_2GB = 2080374784
const MFD_HUGE_2MB = 1409286144
const MFD_HUGE_32MB = 1677721600
const MFD_HUGE_512KB = 1275068416
const MFD_HUGE_512MB = 1946157056
const MFD_HUGE_64KB = 1073741824
const MFD_HUGE_8MB = 1543503872
const MFD_HUGE_MASK = 4227858432
const MFD_HUGE_SHIFT = 26
const MINCORE_INCORE = 1
const MINCORE_MODIFIED = 4
const MINCORE_MODIFIED_OTHER = 16
const MINCORE_REFERENCED = 2
const MINCORE_REFERENCED_OTHER = 8
const MINCORE_SUPER = 96
const MSVC_VERSION = 0
const MS_ASYNC = 1
const MS_INVALIDATE = 2
const MS_SYNC = 0
const M_1_PI = 0
const M_2_PI = 0
const M_2_SQRTPI = 0
const M_E = 0
const M_LN10 = 0
const M_LN2 = 0
const M_LOG10E = 0
const M_LOG2E = 0
const M_PI = 3.141592653589793
const M_PI_2 = 0
const M_PI_4 = 0
const M_SQRT1_2 = 0
const M_SQRT2 = 0
const NAN = 0
const NB = 3
const NC_AllowAgg = 1
const NC_AllowWin = 16384
const NC_Complex = 8192
const NC_FromDDL = 262144
const NC_GenCol = 8
const NC_HasAgg = 16
const NC_HasWin = 32768
const NC_IdxExpr = 32
const NC_InAggFunc = 131072
const NC_IsCheck = 4
const NC_IsDDL = 65536
const NC_MinMaxAgg = 4096
const NC_NoSelect = 524288
const NC_OrderAgg = 134217728
const NC_PartIdx = 2
const NC_SelfRef = 46
const NC_Subquery = 64
const NC_UAggInfo = 256
const NC_UBaseReg = 1024
const NC_UEList = 128
const NC_UUpsert = 512
const NC_Where = 1048576
const NDEBUG = 1
const NETGRAPHDISC = 6
const NFDBITS = 0
const NN = 1
const NOT_WITHIN = 0
const NO_LOCK = 0
const N_OR_COST = 3
const N_SORT_BUCKET = 32
const N_STATEMENT = 8
const OE_Abort = 2
const OE_Cascade = 10
const OE_Default = 11
const OE_Fail = 3
const OE_Ignore = 4
const OE_None = 0
const OE_Replace = 5
const OE_Restrict = 7
const OE_Rollback = 1
const OE_SetDflt = 9
const OE_SetNull = 8
const OE_Update = 6
const OMIT_TEMPDB = 0
const ONEPASS_MULTI = 2
const ONEPASS_OFF = 0
const ONEPASS_SINGLE = 1
const OPFLAG_APPEND = 8
const OPFLAG_AUXDELETE = 4
const OPFLAG_BULKCSR = 1
const OPFLAG_BYTELENARG = 192
const OPFLAG_EPHEM = 1
const OPFLAG_FORDELETE = 8
const OPFLAG_ISNOOP = 64
const OPFLAG_ISUPDATE = 4
const OPFLAG_LASTROWID = 32
const OPFLAG_LENGTHARG = 64
const OPFLAG_NCHANGE = 1
const OPFLAG_NOCHNG = 1
const OPFLAG_NOCHNG_MAGIC = 109
const OPFLAG_P2ISREG = 16
const OPFLAG_PERMUTE = 1
const OPFLAG_PREFORMAT = 128
const OPFLAG_SAVEPOSITION = 2
const OPFLAG_SEEKEQ = 2
const OPFLAG_TYPEOFARG = 128
const OPFLAG_USESEEKRESULT = 16
const OPFLG_IN1 = 2
const OPFLG_IN2 = 4
const OPFLG_IN3 = 8
const OPFLG_JUMP = 1
const OPFLG_NCYCLE = 64
const OPFLG_OUT2 = 16
const OPFLG_OUT3 = 32
const OP_Abortable = 189
const OP_Add = 106
const OP_AddImm = 86
const OP_Affinity = 96
const OP_AggFinal = 165
const OP_AggInverse = 161
const OP_AggStep = 162
const OP_AggStep1 = 163
const OP_AggValue = 164
const OP_And = 44
const OP_AutoCommit = 1
const OP_BeginSubrtn = 74
const OP_BitAnd = 102
const OP_BitNot = 114
const OP_BitOr = 103
const OP_Blob = 77
const OP_Cast = 88
const OP_Checkpoint = 3
const OP_Clear = 145
const OP_Close = 122
const OP_ClrSubtype = 180
const OP_CollSeq = 85
const OP_Column = 94
const OP_ColumnsUsed = 123
const OP_Compare = 90
const OP_Concat = 111
const OP_Copy = 80
const OP_Count = 98
const OP_CreateBtree = 147
const OP_CursorHint = 185
const OP_CursorLock = 167
const OP_CursorUnlock = 168
const OP_DecrJumpZero = 61
const OP_DeferredSeek = 141
const OP_Delete = 130
const OP_Destroy = 144
const OP_Divide = 109
const OP_DropIndex = 152
const OP_DropTable = 151
const OP_DropTrigger = 154
const OP_ElseEq = 58
const OP_EndCoroutine = 68
const OP_Eq = 53
const OP_Expire = 166
const OP_Explain = 188
const OP_Filter = 64
const OP_FilterAdd = 183
const OP_FinishSeek = 143
const OP_FkCheck = 83
const OP_FkCounter = 158
const OP_FkIfZero = 49
const OP_Found = 29
const OP_Function = 66
const OP_Ge = 57
const OP_GetSubtype = 181
const OP_Gosub = 10
const OP_Goto = 9
const OP_Gt = 54
const OP_Halt = 70
const OP_HaltIfNull = 69
const OP_IdxDelete = 140
const OP_IdxGE = 45
const OP_IdxGT = 41
const OP_IdxInsert = 138
const OP_IdxLE = 40
const OP_IdxLT = 42
const OP_IdxRowid = 142
const OP_If = 16
const OP_IfNoHope = 26
const OP_IfNot = 17
const OP_IfNotOpen = 25
const OP_IfNotZero = 60
const OP_IfNullRow = 20
const OP_IfPos = 59
const OP_IfSmaller = 33
const OP_IncrVacuum = 62
const OP_Init = 8
const OP_InitCoroutine = 11
const OP_Insert = 128
const OP_Int64 = 72
const OP_IntCopy = 82
const OP_Integer = 71
const OP_IntegrityCk = 155
const OP_IsNull = 50
const OP_IsTrue = 91
const OP_IsType = 18
const OP_JournalMode = 4
const OP_Jump = 14
const OP_Last = 32
const OP_Le = 55
const OP_LoadAnalysis = 150
const OP_Lt = 56
const OP_MakeRecord = 97
const OP_MaxPgcnt = 179
const OP_MemMax = 159
const OP_Move = 79
const OP_Multiply = 108
const OP_MustBeInt = 13
const OP_Ne = 52
const OP_NewRowid = 127
const OP_Next = 39
const OP_NoConflict = 27
const OP_Noop = 187
const OP_Not = 19
const OP_NotExists = 31
const OP_NotFound = 28
const OP_NotNull = 51
const OP_Null = 75
const OP_NullRow = 136
const OP_Offset = 93
const OP_OffsetLimit = 160
const OP_Once = 15
const OP_OpenAutoindex = 116
const OP_OpenDup = 115
const OP_OpenEphemeral = 118
const OP_OpenPseudo = 121
const OP_OpenRead = 112
const OP_OpenWrite = 113
const OP_Or = 43
const OP_Pagecount = 178
const OP_Param = 157
const OP_ParseSchema = 149
const OP_Permutation = 89
const OP_Prev = 38
const OP_Program = 48
const OP_PureFunc = 65
const OP_ReadCookie = 99
const OP_Real = 153
const OP_RealAffinity = 87
const OP_ReleaseReg = 186
const OP_Remainder = 110
const OP_ReopenIdx = 101
const OP_ResetCount = 131
const OP_ResetSorter = 146
const OP_ResultRow = 84
const OP_Return = 67
const OP_Rewind = 36
const OP_RowCell = 129
const OP_RowData = 134
const OP_RowSetAdd = 156
const OP_RowSetRead = 46
const OP_RowSetTest = 47
const OP_Rowid = 135
const OP_SCopy = 81
const OP_Savepoint = 0
const OP_SeekEnd = 137
const OP_SeekGE = 23
const OP_SeekGT = 24
const OP_SeekHit = 125
const OP_SeekLE = 22
const OP_SeekLT = 21
const OP_SeekRowid = 30
const OP_SeekScan = 124
const OP_Sequence = 126
const OP_SequenceTest = 120
const OP_SetCookie = 100
const OP_SetSubtype = 182
const OP_ShiftLeft = 104
const OP_ShiftRight = 105
const OP_SoftNull = 76
const OP_Sort = 35
const OP_SorterCompare = 132
const OP_SorterData = 133
const OP_SorterInsert = 139
const OP_SorterNext = 37
const OP_SorterOpen = 119
const OP_SorterSort = 34
const OP_SqlExec = 148
const OP_String = 73
const OP_String8 = 117
const OP_Subtract = 107
const OP_TableLock = 169
const OP_Trace = 184
const OP_Transaction = 2
const OP_TypeCheck = 95
const OP_VBegin = 170
const OP_VCheck = 174
const OP_VColumn = 176
const OP_VCreate = 171
const OP_VDestroy = 172
const OP_VFilter = 6
const OP_VInitIn = 175
const OP_VNext = 63
const OP_VOpen = 173
const OP_VRename = 177
const OP_VUpdate = 7
const OP_Vacuum = 5
const OP_Variable = 78
const OP_Yield = 12
const OP_ZeroOrNull = 92
const OS_VXWORKS = 0
const O_ACCMODE = 3
const O_APPEND = 8
const O_ASYNC = 64
const O_BINARY = 0
const O_CLOEXEC = 1048576
const O_CREAT = 512
const O_DIRECT = 65536
const O_DIRECTORY = 131072
const O_DSYNC = 16777216
const O_EMPTY_PATH = 33554432
const O_EXCL = 2048
const O_EXEC = 262144
const O_EXLOCK = 32
const O_FSYNC = 128
const O_LARGEFILE = 0
const O_NDELAY = 4
const O_NOCTTY = 32768
const O_NOFOLLOW = 256
const O_NONBLOCK = 4
const O_PATH = 4194304
const O_RDONLY = 0
const O_RDWR = 2
const O_RESOLVE_BENEATH = 8388608
const O_SEARCH = 262144
const O_SHLOCK = 16
const O_SYNC = 128
const O_TRUNC = 1024
const O_TTY_INIT = 524288
const O_VERIFY = 2097152
const O_WRONLY = 1
const P4_COLLSEQ = -2
const P4_DYNAMIC = -6
const P4_EXPR = -9
const P4_FREE_IF_LE = -6
const P4_FUNCCTX = -15
const P4_FUNCDEF = -7
const P4_INT32 = -3
const P4_INT64 = -13
const P4_INTARRAY = -14
const P4_KEYINFO = -8
const P4_MEM = -10
const P4_NOTUSED = 0
const P4_REAL = -12
const P4_STATIC = -1
const P4_SUBPROGRAM = -4
const P4_TABLE = -5
const P4_TABLEREF = -16
const P4_TRANSIENT = 0
const P4_VTAB = -11
const P5_ConstraintCheck = 3
const P5_ConstraintFK = 4
const P5_ConstraintNotNull = 1
const P5_ConstraintUnique = 2
const PAGER_CACHESPILL = 32
const PAGER_CKPT_FULLFSYNC = 16
const PAGER_ERROR = 6
const PAGER_FLAGS_MASK = 56
const PAGER_FULLFSYNC = 8
const PAGER_GET_NOCONTENT = 1
const PAGER_GET_READONLY = 2
const PAGER_JOURNALMODE_DELETE = 0
const PAGER_JOURNALMODE_MEMORY = 4
const PAGER_JOURNALMODE_OFF = 2
const PAGER_JOURNALMODE_PERSIST = 1
const PAGER_JOURNALMODE_QUERY = -1
const PAGER_JOURNALMODE_TRUNCATE = 3
const PAGER_JOURNALMODE_WAL = 5
const PAGER_LOCKINGMODE_EXCLUSIVE = 1
const PAGER_LOCKINGMODE_NORMAL = 0
const PAGER_LOCKINGMODE_QUERY = -1
const PAGER_MEMORY = 2
const PAGER_OMIT_JOURNAL = 1
const PAGER_OPEN = 0
const PAGER_READER = 1
const PAGER_STAT_HIT = 0
const PAGER_STAT_MISS = 1
const PAGER_STAT_SPILL = 3
const PAGER_STAT_WRITE = 2
const PAGER_SYNCHRONOUS_EXTRA = 4
const PAGER_SYNCHRONOUS_FULL = 3
const PAGER_SYNCHRONOUS_MASK = 7
const PAGER_SYNCHRONOUS_NORMAL = 2
const PAGER_SYNCHRONOUS_OFF = 1
const PAGER_WRITER_CACHEMOD = 3
const PAGER_WRITER_DBMOD = 4
const PAGER_WRITER_FINISHED = 5
const PAGER_WRITER_LOCKED = 2
const PARSE_MODE_DECLARE_VTAB = 1
const PARSE_MODE_NORMAL = 0
const PARSE_MODE_RENAME = 2
const PARSE_MODE_UNMAP = 3
const PARTLY_WITHIN = 1
const PCACHE1_MIGHT_USE_GROUP_MUTEX = 1
const PCACHE_DIRTYLIST_ADD = 2
const PCACHE_DIRTYLIST_FRONT = 3
const PCACHE_DIRTYLIST_REMOVE = 1
const PDP_ENDIAN = 3412
const PENDING_BYTE = 0
const PENDING_LOCK = 3
const PGHDR_CLEAN = 1
const PGHDR_DIRTY = 2
const PGHDR_DONT_WRITE = 16
const PGHDR_MMAP = 32
const PGHDR_NEED_SYNC = 8
const PGHDR_WAL_APPEND = 64
const PGHDR_WRITEABLE = 4
const POSIX_FADV_DONTNEED = 4
const POSIX_FADV_NOREUSE = 5
const POSIX_FADV_NORMAL = 0
const POSIX_FADV_RANDOM = 1
const POSIX_FADV_SEQUENTIAL = 2
const POSIX_FADV_WILLNEED = 3
const POSIX_MADV_DONTNEED = 4
const POSIX_MADV_NORMAL = 0
const POSIX_MADV_RANDOM = 1
const POSIX_MADV_SEQUENTIAL = 2
const POSIX_MADV_WILLNEED = 3
const PPPDISC = 5
const PREFERRED_SCHEMA_TABLE = "sqlite_schema"
const PREFERRED_TEMP_SCHEMA_TABLE = "sqlite_temp_schema"
const PROT_EXEC = 4
const PROT_NONE = 0
const PROT_READ = 1
const PROT_WRITE = 2
const PTF_INTKEY = 1
const PTF_LEAF = 8
const PTF_LEAFDATA = 4
const PTF_ZERODATA = 2
const PTRMAP_BTREE = 5
const PTRMAP_FREEPAGE = 2
const PTRMAP_OVERFLOW1 = 3
const PTRMAP_OVERFLOW2 = 4
const PTRMAP_ROOTPAGE = 1
const P_tmpdir = "/tmp/"
const PragFlg_NeedSchema = 1
const PragFlg_NoColumns = 2
const PragFlg_NoColumns1 = 4
const PragFlg_ReadOnly = 8
const PragFlg_Result0 = 16
const PragFlg_Result1 = 32
const PragFlg_SchemaOpt = 64
const PragFlg_SchemaReq = 128
const PragTyp_ACTIVATE_EXTENSIONS = 0
const PragTyp_ANALYSIS_LIMIT = 1
const PragTyp_AUTO_VACUUM = 3
const PragTyp_BUSY_TIMEOUT = 5
const PragTyp_CACHE_SIZE = 6
const PragTyp_CACHE_SPILL = 7
const PragTyp_CASE_SENSITIVE_LIKE = 8
const PragTyp_COLLATION_LIST = 9
const PragTyp_COMPILE_OPTIONS = 10
const PragTyp_DATABASE_LIST = 12
const PragTyp_DATA_STORE_DIRECTORY = 11
const PragTyp_DEFAULT_CACHE_SIZE = 13
const PragTyp_ENCODING = 14
const PragTyp_FLAG = 4
const PragTyp_FOREIGN_KEY_CHECK = 15
const PragTyp_FOREIGN_KEY_LIST = 16
const PragTyp_FUNCTION_LIST = 17
const PragTyp_HARD_HEAP_LIMIT = 18
const PragTyp_HEADER_VALUE = 2
const PragTyp_INCREMENTAL_VACUUM = 19
const PragTyp_INDEX_INFO = 20
const PragTyp_INDEX_LIST = 21
const PragTyp_INTEGRITY_CHECK = 22
const PragTyp_JOURNAL_MODE = 23
const PragTyp_JOURNAL_SIZE_LIMIT = 24
const PragTyp_LOCKING_MODE = 26
const PragTyp_LOCK_PROXY_FILE = 25
const PragTyp_LOCK_STATUS = 44
const PragTyp_MMAP_SIZE = 28
const PragTyp_MODULE_LIST = 29
const PragTyp_OPTIMIZE = 30
const PragTyp_PAGE_COUNT = 27
const PragTyp_PAGE_SIZE = 31
const PragTyp_PRAGMA_LIST = 32
const PragTyp_SECURE_DELETE = 33
const PragTyp_SHRINK_MEMORY = 34
const PragTyp_SOFT_HEAP_LIMIT = 35
const PragTyp_STATS = 45
const PragTyp_SYNCHRONOUS = 36
const PragTyp_TABLE_INFO = 37
const PragTyp_TABLE_LIST = 38
const PragTyp_TEMP_STORE = 39
const PragTyp_TEMP_STORE_DIRECTORY = 40
const PragTyp_THREADS = 41
const PragTyp_WAL_AUTOCHECKPOINT = 42
const PragTyp_WAL_CHECKPOINT = 43
const RAND_MAX = 2147483647
const RBU_CREATE_STATE = "CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)"
const RBU_DELETE = 2
const RBU_ENABLE_DELTA_CKSUM = 0
const RBU_EXCLUSIVE_CHECKPOINT = "rbu_exclusive_checkpoint"
const RBU_IDX_DELETE = 4
const RBU_IDX_INSERT = 5
const RBU_INSERT = 1
const RBU_PK_EXTERNAL = 3
const RBU_PK_IPK = 2
const RBU_PK_NONE = 1
const RBU_PK_NOTABLE = 0
const RBU_PK_VTAB = 5
const RBU_PK_WITHOUT_ROWID = 4
const RBU_REPLACE = 3
const RBU_STAGE_CAPTURE = 3
const RBU_STAGE_CKPT = 4
const RBU_STAGE_DONE = 5
const RBU_STAGE_MOVE = 2
const RBU_STAGE_OAL = 1
const RBU_STATE_CKPT = 6
const RBU_STATE_COOKIE = 7
const RBU_STATE_DATATBL = 10
const RBU_STATE_IDX = 3
const RBU_STATE_OALSZ = 8
const RBU_STATE_PHASEONESTEP = 9
const RBU_STATE_PROGRESS = 5
const RBU_STATE_ROW = 4
const RBU_STATE_STAGE = 1
const RBU_STATE_TBL = 2
const RBU_UPDATE = 6
const RBU_ZIPVFS_CTRL_FILE_POINTER = 230439
const READMARK_NOT_USED = 4294967295
const READ_LOCK = 1
const RESERVED_BYTE = 1
const RESERVED_LOCK = 2
const RFCENVG = 2048
const RFCFDG = 4096
const RFCNAMEG = 1024
const RFENVG = 2
const RFFDG = 4
const RFFLAGS = 2416930932
const RFHIGHPID = 262144
const RFKERNELONLY = 268828672
const RFLINUXTHPN = 65536
const RFMEM = 32
const RFNAMEG = 1
const RFNOTEG = 8
const RFNOWAIT = 64
const RFPPWAIT = 2147483648
const RFPROC = 16
const RFPROCDESC = 268435456
const RFSIGSHARE = 16384
const RFSPAWN = 2147483648
const RFSTOPPED = 131072
const RFTHREAD = 8192
const RFTSIGMASK = 255
const RFTSIGSHIFT = 20
const RFTSIGZMB = 524288
const RNDAWAY = 0
const RNDTOWARDS = 0
const ROWSET_ALLOCATION_SIZE = 1024
const ROWSET_ENTRY_PER_CHUNK = 1016
const ROWSET_NEXT = 2
const ROWSET_SORTED = 1
const RTLD_DEEPBIND = 16384
const RTLD_DEFAULT = -2
const RTLD_DI_LINKMAP = 2
const RTLD_DI_MAX = 6
const RTLD_DI_ORIGIN = 6
const RTLD_DI_SERINFO = 4
const RTLD_DI_SERINFOSIZE = 5
const RTLD_GLOBAL = 256
const RTLD_LAZY = 1
const RTLD_LOCAL = 0
const RTLD_MODEMASK = 3
const RTLD_NEXT = -1
const RTLD_NODELETE = 4096
const RTLD_NOLOAD = 8192
const RTLD_NOW = 2
const RTLD_SELF = -3
const RTLD_TRACE = 512
const RTREE_CACHE_SZ = 5
const RTREE_CHECK_MAX_ERROR = 100
const RTREE_COORD_INT32 = 1
const RTREE_COORD_REAL32 = 0
const RTREE_DEFAULT_ROWEST = 1048576
const RTREE_EQ = 65
const RTREE_FALSE = 64
const RTREE_GE = 68
const RTREE_GT = 69
const RTREE_LE = 66
const RTREE_LT = 67
const RTREE_MATCH = 70
const RTREE_MAXCELLS = 51
const RTREE_MAX_AUX_COLUMN = 100
const RTREE_MAX_DEPTH = 40
const RTREE_MAX_DIMENSIONS = 5
const RTREE_MIN_ROWEST = 100
const RTREE_QUERY = 71
const RTREE_TRUE = 63
const RTREE_ZERO = 0
const R_OK = 4
const SAVEPOINT_BEGIN = 0
const SAVEPOINT_RELEASE = 1
const SAVEPOINT_ROLLBACK = 2
const SBT_MAX = 9223372036854775807
const SCHEMA_ROOT = 1
const SEEK_CUR = 1
const SEEK_DATA = 3
const SEEK_END = 2
const SEEK_HOLE = 4
const SEEK_SET = 0
const SESSIONS_ROWID = "_rowid_"
const SESSIONS_STRM_CHUNK_SIZE = 1024
const SESSION_MAX_BUFFER_SZ = 2147483391
const SESSION_UPDATE_CACHE_SZ = 12
const SF_APPEND = 262144
const SF_ARCHIVED = 65536
const SF_Aggregate = 8
const SF_All = 2
const SF_ComplexResult = 262144
const SF_Compound = 256
const SF_Converted = 65536
const SF_CopyCte = 67108864
const SF_Distinct = 1
const SF_Expanded = 64
const SF_FixedLimit = 16384
const SF_HasAgg = 16
const SF_HasTypeInfo = 128
const SF_IMMUTABLE = 131072
const SF_IncludeHidden = 131072
const SF_MaybeConvert = 32768
const SF_MinMaxAgg = 4096
const SF_MultiPart = 33554432
const SF_MultiValue = 1024
const SF_NOUNLINK = 1048576
const SF_NestedFrom = 2048
const SF_NoopOrderBy = 4194304
const SF_OrderByReqd = 134217728
const SF_PushDown = 16777216
const SF_Recursive = 8192
const SF_Resolved = 4
const SF_SETTABLE = 4294901760
const SF_SNAPSHOT = 2097152
const SF_UFSrcCheck = 8388608
const SF_UpdateFrom = 268435456
const SF_UsesEphemeral = 32
const SF_Values = 512
const SF_View = 2097152
const SF_WhereBegin = 524288
const SF_WinRewrite = 1048576
const SHARED_FIRST = 2
const SHARED_LOCK = 1
const SHARED_SIZE = 510
const SHM_ALLOW_SEALING = 1
const SHM_GROW_ON_WRITE = 2
const SHM_LARGEPAGE = 4
const SHM_LARGEPAGE_ALLOC_DEFAULT = 0
const SHM_LARGEPAGE_ALLOC_HARD = 2
const SHM_LARGEPAGE_ALLOC_NOWAIT = 1
const SHM_RENAME_EXCHANGE = 2
const SHM_RENAME_NOREPLACE = 1
const SLIPDISC = 4
const SLOT_2_0 = 2080895
const SLOT_4_2_0 = 4028612735
const SORTER_MAX_MERGE_COUNT = 16
const SORTER_TYPE_INTEGER = 1
const SORTER_TYPE_TEXT = 2
const SORTFLAG_UseSorter = 1
const SPACECTL_DEALLOC = 1
const SPACECTL_F_SUPPORTED = 0
const SPILLFLAG_NOSYNC = 4
const SPILLFLAG_OFF = 1
const SPILLFLAG_ROLLBACK = 2
const SQLITE3_TEXT = 3
const SQLITE_ABORT = 4
const SQLITE_ABORT_ROLLBACK = 516
const SQLITE_ACCESS_EXISTS = 0
const SQLITE_ACCESS_READ = 2
const SQLITE_ACCESS_READWRITE = 1
const SQLITE_AFF_BLOB = 65
const SQLITE_AFF_FLEXNUM = 70
const SQLITE_AFF_INTEGER = 68
const SQLITE_AFF_MASK = 71
const SQLITE_AFF_NONE = 64
const SQLITE_AFF_NUMERIC = 67
const SQLITE_AFF_REAL = 69
const SQLITE_AFF_TEXT = 66
const SQLITE_ALLOW_COVERING_INDEX_SCAN = 1
const SQLITE_ALTER_TABLE = 26
const SQLITE_AMALGAMATION = 1
const SQLITE_ANALYZE = 28
const SQLITE_ANY = 5
const SQLITE_ASCII = 1
const SQLITE_ATOMIC_INTRINSICS = 0
const SQLITE_ATTACH = 24
const SQLITE_AUTH = 23
const SQLITE_AUTH_USER = 279
const SQLITE_AllOpts = 4294967295
const SQLITE_AutoIndex = 32768
const SQLITE_BIGENDIAN = 0
const SQLITE_BIG_DBL = 1e+99
const SQLITE_BLDF1_INDEXED = 1
const SQLITE_BLDF1_UNIQUE = 2
const SQLITE_BLDF2_2NDPASS = 4
const SQLITE_BLOB = 4
const SQLITE_BUSY = 5
const SQLITE_BUSY_RECOVERY = 261
const SQLITE_BUSY_SNAPSHOT = 517
const SQLITE_BUSY_TIMEOUT = 773
const SQLITE_BYTEORDER = 1234
const SQLITE_BalancedMerge = 2097152
const SQLITE_BloomFilter = 524288
const SQLITE_BloomPulldown = 1048576
const SQLITE_CANTOPEN = 14
const SQLITE_CANTOPEN_BKPT = 0
const SQLITE_CANTOPEN_CONVPATH = 1038
const SQLITE_CANTOPEN_DIRTYWAL = 1294
const SQLITE_CANTOPEN_FULLPATH = 782
const SQLITE_CANTOPEN_ISDIR = 526
const SQLITE_CANTOPEN_NOTEMPDIR = 270
const SQLITE_CANTOPEN_SYMLINK = 1550
const SQLITE_CHANGESETAPPLY_FKNOACTION = 8
const SQLITE_CHANGESETAPPLY_IGNORENOOP = 4
const SQLITE_CHANGESETAPPLY_INVERT = 2
const SQLITE_CHANGESETAPPLY_NOSAVEPOINT = 1
const SQLITE_CHANGESETSTART_INVERT = 2
const SQLITE_CHANGESET_ABORT = 2
const SQLITE_CHANGESET_CONFLICT = 3
const SQLITE_CHANGESET_CONSTRAINT = 4
const SQLITE_CHANGESET_DATA = 1
const SQLITE_CHANGESET_FOREIGN_KEY = 5
const SQLITE_CHANGESET_NOTFOUND = 2
const SQLITE_CHANGESET_OMIT = 0
const SQLITE_CHANGESET_REPLACE = 1
const SQLITE_CHECKPOINT_FULL = 1
const SQLITE_CHECKPOINT_PASSIVE = 0
const SQLITE_CHECKPOINT_RESTART = 2
const SQLITE_CHECKPOINT_TRUNCATE = 3
const SQLITE_CONFIG_COVERING_INDEX_SCAN = 20
const SQLITE_CONFIG_GETMALLOC = 5
const SQLITE_CONFIG_GETMUTEX = 11
const SQLITE_CONFIG_GETPCACHE = 15
const SQLITE_CONFIG_GETPCACHE2 = 19
const SQLITE_CONFIG_HEAP = 8
const SQLITE_CONFIG_LOG = 16
const SQLITE_CONFIG_LOOKASIDE = 13
const SQLITE_CONFIG_MALLOC = 4
const SQLITE_CONFIG_MEMDB_MAXSIZE = 29
const SQLITE_CONFIG_MEMSTATUS = 9
const SQLITE_CONFIG_MMAP_SIZE = 22
const SQLITE_CONFIG_MULTITHREAD = 2
const SQLITE_CONFIG_MUTEX = 10
const SQLITE_CONFIG_PAGECACHE = 7
const SQLITE_CONFIG_PCACHE = 14
const SQLITE_CONFIG_PCACHE2 = 18
const SQLITE_CONFIG_PCACHE_HDRSZ = 24
const SQLITE_CONFIG_PMASZ = 25
const SQLITE_CONFIG_ROWID_IN_VIEW = 30
const SQLITE_CONFIG_SCRATCH = 6
const SQLITE_CONFIG_SERIALIZED = 3
const SQLITE_CONFIG_SINGLETHREAD = 1
const SQLITE_CONFIG_SMALL_MALLOC = 27
const SQLITE_CONFIG_SORTERREF_SIZE = 28
const SQLITE_CONFIG_SQLLOG = 21
const SQLITE_CONFIG_STMTJRNL_SPILL = 26
const SQLITE_CONFIG_URI = 17
const SQLITE_CONFIG_WIN32_HEAPSIZE = 23
const SQLITE_CONSTRAINT = 19
const SQLITE_CONSTRAINT_CHECK = 275
const SQLITE_CONSTRAINT_COMMITHOOK = 531
const SQLITE_CONSTRAINT_DATATYPE = 3091
const SQLITE_CONSTRAINT_FOREIGNKEY = 787
const SQLITE_CONSTRAINT_FUNCTION = 1043
const SQLITE_CONSTRAINT_NOTNULL = 1299
const SQLITE_CONSTRAINT_PINNED = 2835
const SQLITE_CONSTRAINT_PRIMARYKEY = 1555
const SQLITE_CONSTRAINT_ROWID = 2579
const SQLITE_CONSTRAINT_TRIGGER = 1811
const SQLITE_CONSTRAINT_UNIQUE = 2067
const SQLITE_CONSTRAINT_VTAB = 2323
const SQLITE_COPY = 0
const SQLITE_CORE = 1
const SQLITE_CORRUPT = 11
const SQLITE_CORRUPT_BKPT = 0
const SQLITE_CORRUPT_INDEX = 779
const SQLITE_CORRUPT_SEQUENCE = 523
const SQLITE_CORRUPT_VTAB = 267
const SQLITE_CREATE_INDEX = 1
const SQLITE_CREATE_TABLE = 2
const SQLITE_CREATE_TEMP_INDEX = 3
const SQLITE_CREATE_TEMP_TABLE = 4
const SQLITE_CREATE_TEMP_TRIGGER = 5
const SQLITE_CREATE_TEMP_VIEW = 6
const SQLITE_CREATE_TRIGGER = 7
const SQLITE_CREATE_VIEW = 8
const SQLITE_CREATE_VTABLE = 29
const SQLITE_CacheSpill = 32
const SQLITE_CellSizeCk = 2097152
const SQLITE_CkptFullFSync = 16
const SQLITE_Coroutines = 33554432
const SQLITE_CountOfView = 512
const SQLITE_CoverIdxScan = 32
const SQLITE_CursorHints = 1024
const SQLITE_DBCONFIG_DEFENSIVE = 1010
const SQLITE_DBCONFIG_DQS_DDL = 1014
const SQLITE_DBCONFIG_DQS_DML = 1013
const SQLITE_DBCONFIG_ENABLE_FKEY = 1002
const SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER = 1004
const SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION = 1005
const SQLITE_DBCONFIG_ENABLE_QPSG = 1007
const SQLITE_DBCONFIG_ENABLE_TRIGGER = 1003
const SQLITE_DBCONFIG_ENABLE_VIEW = 1015
const SQLITE_DBCONFIG_LEGACY_ALTER_TABLE = 1012
const SQLITE_DBCONFIG_LEGACY_FILE_FORMAT = 1016
const SQLITE_DBCONFIG_LOOKASIDE = 1001
const SQLITE_DBCONFIG_MAINDBNAME = 1000
const SQLITE_DBCONFIG_MAX = 1019
const SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE = 1006
const SQLITE_DBCONFIG_RESET_DATABASE = 1009
const SQLITE_DBCONFIG_REVERSE_SCANORDER = 1019
const SQLITE_DBCONFIG_STMT_SCANSTATUS = 1018
const SQLITE_DBCONFIG_TRIGGER_EQP = 1008
const SQLITE_DBCONFIG_TRUSTED_SCHEMA = 1017
const SQLITE_DBCONFIG_WRITABLE_SCHEMA = 1011
const SQLITE_DBSTATUS_CACHE_HIT = 7
const SQLITE_DBSTATUS_CACHE_MISS = 8
const SQLITE_DBSTATUS_CACHE_SPILL = 12
const SQLITE_DBSTATUS_CACHE_USED = 1
const SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
const SQLITE_DBSTATUS_CACHE_WRITE = 9
const SQLITE_DBSTATUS_DEFERRED_FKS = 10
const SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
const SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
const SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
const SQLITE_DBSTATUS_LOOKASIDE_USED = 0
const SQLITE_DBSTATUS_MAX = 12
const SQLITE_DBSTATUS_SCHEMA_USED = 2
const SQLITE_DBSTATUS_STMT_USED = 3
const SQLITE_DEFAULT_AUTOVACUUM = 0
const SQLITE_DEFAULT_CACHE_SIZE = -2000
const SQLITE_DEFAULT_FILE_FORMAT = 4
const SQLITE_DEFAULT_FILE_PERMISSIONS = 420
const SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT = -1
const SQLITE_DEFAULT_LOOKASIDE = 40
const SQLITE_DEFAULT_MEMSTATUS = 0
const SQLITE_DEFAULT_MMAP_SIZE = 0
const SQLITE_DEFAULT_PAGE_SIZE = 4096
const SQLITE_DEFAULT_PCACHE_INITSZ = 20
const SQLITE_DEFAULT_PROXYDIR_PERMISSIONS = 493
const SQLITE_DEFAULT_RECURSIVE_TRIGGERS = 0
const SQLITE_DEFAULT_SECTOR_SIZE = 4096
const SQLITE_DEFAULT_SORTERREF_SIZE = 2147483647
const SQLITE_DEFAULT_SYNCHRONOUS = 2
const SQLITE_DEFAULT_WAL_AUTOCHECKPOINT = 1000
const SQLITE_DEFAULT_WAL_SYNCHRONOUS = 2
const SQLITE_DEFAULT_WORKER_THREADS = 0
const SQLITE_DELETE = 9
const SQLITE_DENY = 1
const SQLITE_DESERIALIZE_FREEONCLOSE = 1
const SQLITE_DESERIALIZE_READONLY = 4
const SQLITE_DESERIALIZE_RESIZEABLE = 2
const SQLITE_DETACH = 25
const SQLITE_DETERMINISTIC = 2048
const SQLITE_DIRECTONLY = 524288
const SQLITE_DIRECT_OVERFLOW_READ = 1
const SQLITE_DONE = 101
const SQLITE_DQS = 3
const SQLITE_DROP_INDEX = 10
const SQLITE_DROP_TABLE = 11
const SQLITE_DROP_TEMP_INDEX = 12
const SQLITE_DROP_TEMP_TABLE = 13
const SQLITE_DROP_TEMP_TRIGGER = 14
const SQLITE_DROP_TEMP_VIEW = 15
const SQLITE_DROP_TRIGGER = 16
const SQLITE_DROP_VIEW = 17
const SQLITE_DROP_VTABLE = 30
const SQLITE_Defensive = 268435456
const SQLITE_DeferFKs = 524288
const SQLITE_DistinctOpt = 16
const SQLITE_DqsDDL = 536870912
const SQLITE_DqsDML = 1073741824
const SQLITE_ECEL_DUP = 1
const SQLITE_ECEL_FACTOR = 2
const SQLITE_ECEL_OMITREF = 8
const SQLITE_ECEL_REF = 4
const SQLITE_EMPTY = 16
const SQLITE_ENABLE_COLUMN_METADATA = 1
const SQLITE_ENABLE_DBSTAT_VTAB = 1
const SQLITE_ENABLE_FTS5 = 1
const SQLITE_ENABLE_GEOPOLY = 1
const SQLITE_ENABLE_JSON1 = 1
const SQLITE_ENABLE_LOCKING_STYLE = 0
const SQLITE_ENABLE_MATH_FUNCTIONS = 1
const SQLITE_ENABLE_MEMORY_MANAGEMENT = 1
const SQLITE_ENABLE_OFFSET_SQL_FUNC = 1
const SQLITE_ENABLE_PREUPDATE_HOOK = 1
const SQLITE_ENABLE_RBU = 1
const SQLITE_ENABLE_RTREE = 1
const SQLITE_ENABLE_SESSION = 1
const SQLITE_ENABLE_SNAPSHOT = 1
const SQLITE_ENABLE_STAT4 = 1
const SQLITE_ENABLE_UNLOCK_NOTIFY = 1
const SQLITE_ERROR = 1
const SQLITE_ERROR_MISSING_COLLSEQ = 257
const SQLITE_ERROR_RETRY = 513
const SQLITE_ERROR_SNAPSHOT = 769
const SQLITE_EXTERN = 0
const SQLITE_EnableQPSG = 8388608
const SQLITE_EnableTrigger = 262144
const SQLITE_EnableView = 2147483648
const SQLITE_FAIL = 3
const SQLITE_FAULTINJECTOR_COUNT = 1
const SQLITE_FAULTINJECTOR_MALLOC = 0
const SQLITE_FCNTL_BEGIN_ATOMIC_WRITE = 31
const SQLITE_FCNTL_BUSYHANDLER = 15
const SQLITE_FCNTL_CHUNK_SIZE = 6
const SQLITE_FCNTL_CKPT_DONE = 37
const SQLITE_FCNTL_CKPT_START = 39
const SQLITE_FCNTL_CKSM_FILE = 41
const SQLITE_FCNTL_COMMIT_ATOMIC_WRITE = 32
const SQLITE_FCNTL_COMMIT_PHASETWO = 22
const SQLITE_FCNTL_DATA_VERSION = 35
const SQLITE_FCNTL_DB_UNCHANGED = 3389603744
const SQLITE_FCNTL_EXTERNAL_READER = 40
const SQLITE_FCNTL_FILE_POINTER = 7
const SQLITE_FCNTL_GET_LOCKPROXYFILE = 2
const SQLITE_FCNTL_HAS_MOVED = 20
const SQLITE_FCNTL_JOURNAL_POINTER = 28
const SQLITE_FCNTL_LAST_ERRNO = 4
const SQLITE_FCNTL_LOCKSTATE = 1
const SQLITE_FCNTL_LOCK_TIMEOUT = 34
const SQLITE_FCNTL_MMAP_SIZE = 18
const SQLITE_FCNTL_OVERWRITE = 11
const SQLITE_FCNTL_PDB = 30
const SQLITE_FCNTL_PERSIST_WAL = 10
const SQLITE_FCNTL_POWERSAFE_OVERWRITE = 13
const SQLITE_FCNTL_PRAGMA = 14
const SQLITE_FCNTL_RBU = 26
const SQLITE_FCNTL_RBUCNT = 5149216
const SQLITE_FCNTL_RESERVE_BYTES = 38
const SQLITE_FCNTL_RESET_CACHE = 42
const SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE = 33
const SQLITE_FCNTL_SET_LOCKPROXYFILE = 3
const SQLITE_FCNTL_SIZE_HINT = 5
const SQLITE_FCNTL_SIZE_LIMIT = 36
const SQLITE_FCNTL_SYNC = 21
const SQLITE_FCNTL_SYNC_OMITTED = 8
const SQLITE_FCNTL_TEMPFILENAME = 16
const SQLITE_FCNTL_TRACE = 19
const SQLITE_FCNTL_VFSNAME = 12
const SQLITE_FCNTL_VFS_POINTER = 27
const SQLITE_FCNTL_WAL_BLOCK = 24
const SQLITE_FCNTL_WIN32_AV_RETRY = 9
const SQLITE_FCNTL_WIN32_GET_HANDLE = 29
const SQLITE_FCNTL_WIN32_SET_HANDLE = 23
const SQLITE_FCNTL_ZIPVFS = 25
const SQLITE_FILE_HEADER = "SQLite format 3"
const SQLITE_FLOAT = 2
const SQLITE_FORMAT = 24
const SQLITE_FP_PRECISION_LIMIT = 100000000
const SQLITE_FRAME_MAGIC = 2275391262
const SQLITE_FSFLAGS_IS_MSDOS = 1
const SQLITE_FTS5_MAX_EXPR_DEPTH = 256
const SQLITE_FULL = 13
const SQLITE_FUNCTION = 31
const SQLITE_FUNC_ANYORDER = 134217728
const SQLITE_FUNC_BUILTIN = 8388608
const SQLITE_FUNC_BYTELEN = 192
const SQLITE_FUNC_CASE = 8
const SQLITE_FUNC_CONSTANT = 2048
const SQLITE_FUNC_COUNT = 256
const SQLITE_FUNC_DIRECT = 524288
const SQLITE_FUNC_ENCMASK = 3
const SQLITE_FUNC_EPHEM = 16
const SQLITE_FUNC_HASH_SZ = 23
const SQLITE_FUNC_INLINE = 4194304
const SQLITE_FUNC_INTERNAL = 262144
const SQLITE_FUNC_LENGTH = 64
const SQLITE_FUNC_LIKE = 4
const SQLITE_FUNC_MINMAX = 4096
const SQLITE_FUNC_NEEDCOLL = 32
const SQLITE_FUNC_RUNONLY = 32768
const SQLITE_FUNC_SLOCHNG = 8192
const SQLITE_FUNC_TEST = 16384
const SQLITE_FUNC_TYPEOF = 128
const SQLITE_FUNC_UNLIKELY = 1024
const SQLITE_FUNC_UNSAFE = 2097152
const SQLITE_FUNC_WINDOW = 65536
const SQLITE_FactorOutConst = 8
const SQLITE_FlttnUnionAll = 8388608
const SQLITE_ForeignKeys = 16384
const SQLITE_Fts3Tokenizer = 4194304
const SQLITE_FullColNames = 4
const SQLITE_FullFSync = 8
const SQLITE_GET_LOCKPROXYFILE = 2
const SQLITE_GroupByOrder = 4
const SQLITE_HAVE_C99_MATH_FUNCS = 1
const SQLITE_HAVE_ZLIB = 1
const SQLITE_IDXTYPE_APPDEF = 0
const SQLITE_IDXTYPE_IPK = 3
const SQLITE_IDXTYPE_PRIMARYKEY = 2
const SQLITE_IDXTYPE_UNIQUE = 1
const SQLITE_IGNORE = 2
const SQLITE_INDEX_CONSTRAINT_EQ = 2
const SQLITE_INDEX_CONSTRAINT_FUNCTION = 150
const SQLITE_INDEX_CONSTRAINT_GE = 32
const SQLITE_INDEX_CONSTRAINT_GLOB = 66
const SQLITE_INDEX_CONSTRAINT_GT = 4
const SQLITE_INDEX_CONSTRAINT_IS = 72
const SQLITE_INDEX_CONSTRAINT_ISNOT = 69
const SQLITE_INDEX_CONSTRAINT_ISNOTNULL = 70
const SQLITE_INDEX_CONSTRAINT_ISNULL = 71
const SQLITE_INDEX_CONSTRAINT_LE = 8
const SQLITE_INDEX_CONSTRAINT_LIKE = 65
const SQLITE_INDEX_CONSTRAINT_LIMIT = 73
const SQLITE_INDEX_CONSTRAINT_LT = 16
const SQLITE_INDEX_CONSTRAINT_MATCH = 64
const SQLITE_INDEX_CONSTRAINT_NE = 68
const SQLITE_INDEX_CONSTRAINT_OFFSET = 74
const SQLITE_INDEX_CONSTRAINT_REGEXP = 67
const SQLITE_INDEX_SCAN_UNIQUE = 1
const SQLITE_INNOCUOUS = 2097152
const SQLITE_INSERT = 18
const SQLITE_INTEGER = 1
const SQLITE_INTEGRITY_CHECK_ERROR_MAX = 100
const SQLITE_INTERNAL = 2
const SQLITE_INTERRUPT = 9
const SQLITE_IOCAP_ATOMIC = 1
const SQLITE_IOCAP_ATOMIC16K = 64
const SQLITE_IOCAP_ATOMIC1K = 4
const SQLITE_IOCAP_ATOMIC2K = 8
const SQLITE_IOCAP_ATOMIC32K = 128
const SQLITE_IOCAP_ATOMIC4K = 16
const SQLITE_IOCAP_ATOMIC512 = 2
const SQLITE_IOCAP_ATOMIC64K = 256
const SQLITE_IOCAP_ATOMIC8K = 32
const SQLITE_IOCAP_BATCH_ATOMIC = 16384
const SQLITE_IOCAP_IMMUTABLE = 8192
const SQLITE_IOCAP_POWERSAFE_OVERWRITE = 4096
const SQLITE_IOCAP_SAFE_APPEND = 512
const SQLITE_IOCAP_SEQUENTIAL = 1024
const SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN = 2048
const SQLITE_IOERR = 10
const SQLITE_IOERR_ACCESS = 3338
const SQLITE_IOERR_AUTH = 7178
const SQLITE_IOERR_BEGIN_ATOMIC = 7434
const SQLITE_IOERR_BLOCKED = 2826
const SQLITE_IOERR_CHECKRESERVEDLOCK = 3594
const SQLITE_IOERR_CLOSE = 4106
const SQLITE_IOERR_COMMIT_ATOMIC = 7690
const SQLITE_IOERR_CONVPATH = 6666
const SQLITE_IOERR_CORRUPTFS = 8458
const SQLITE_IOERR_DATA = 8202
const SQLITE_IOERR_DELETE = 2570
const SQLITE_IOERR_DELETE_NOENT = 5898
const SQLITE_IOERR_DIR_CLOSE = 4362
const SQLITE_IOERR_DIR_FSYNC = 1290
const SQLITE_IOERR_FSTAT = 1802
const SQLITE_IOERR_FSYNC = 1034
const SQLITE_IOERR_GETTEMPPATH = 6410
const SQLITE_IOERR_IN_PAGE = 8714
const SQLITE_IOERR_LOCK = 3850
const SQLITE_IOERR_MMAP = 6154
const SQLITE_IOERR_NOMEM = 3082
const SQLITE_IOERR_NOMEM_BKPT = 3082
const SQLITE_IOERR_RDLOCK = 2314
const SQLITE_IOERR_READ = 266
const SQLITE_IOERR_ROLLBACK_ATOMIC = 7946
const SQLITE_IOERR_SEEK = 5642
const SQLITE_IOERR_SHMLOCK = 5130
const SQLITE_IOERR_SHMMAP = 5386
const SQLITE_IOERR_SHMOPEN = 4618
const SQLITE_IOERR_SHMSIZE = 4874
const SQLITE_IOERR_SHORT_READ = 522
const SQLITE_IOERR_TRUNCATE = 1546
const SQLITE_IOERR_UNLOCK = 2058
const SQLITE_IOERR_VNODE = 6922
const SQLITE_IOERR_WRITE = 778
const SQLITE_IgnoreChecks = 512
const SQLITE_IndexedExpr = 16777216
const SQLITE_JUMPIFNULL = 16
const SQLITE_LAST_ERRNO = 4
const SQLITE_LIKE_DOESNT_MATCH_BLOBS = 1
const SQLITE_LIMIT_ATTACHED = 7
const SQLITE_LIMIT_COLUMN = 2
const SQLITE_LIMIT_COMPOUND_SELECT = 4
const SQLITE_LIMIT_EXPR_DEPTH = 3
const SQLITE_LIMIT_FUNCTION_ARG = 6
const SQLITE_LIMIT_LENGTH = 0
const SQLITE_LIMIT_LIKE_PATTERN_LENGTH = 8
const SQLITE_LIMIT_SQL_LENGTH = 1
const SQLITE_LIMIT_TRIGGER_DEPTH = 10
const SQLITE_LIMIT_VARIABLE_NUMBER = 9
const SQLITE_LIMIT_VDBE_OP = 5
const SQLITE_LIMIT_WORKER_THREADS = 11
const SQLITE_LITTLEENDIAN = 1
const SQLITE_LOCKED = 6
const SQLITE_LOCKED_SHAREDCACHE = 262
const SQLITE_LOCKED_VTAB = 518
const SQLITE_LOCK_EXCLUSIVE = 4
const SQLITE_LOCK_NONE = 0
const SQLITE_LOCK_PENDING = 3
const SQLITE_LOCK_RESERVED = 2
const SQLITE_LOCK_SHARED = 1
const SQLITE_LegacyAlter = 67108864
const SQLITE_LegacyFileFmt = 2
const SQLITE_LoadExtFunc = 131072
const SQLITE_LoadExtension = 65536
const SQLITE_MALLOC_SOFT_LIMIT = 1024
const SQLITE_MATCH = 0
const SQLITE_MAX_ALLOCATION_SIZE = 2147483391
const SQLITE_MAX_ATTACHED = 10
const SQLITE_MAX_COLUMN = 2000
const SQLITE_MAX_COMPOUND_SELECT = 500
const SQLITE_MAX_DB = 12
const SQLITE_MAX_DEFAULT_PAGE_SIZE = 8192
const SQLITE_MAX_EXPR_DEPTH = 1000
const SQLITE_MAX_FILE_FORMAT = 4
const SQLITE_MAX_FUNCTION_ARG = 127
const SQLITE_MAX_LENGTH = 1000000000
const SQLITE_MAX_LIKE_PATTERN_LENGTH = 50000
const SQLITE_MAX_MEMORY = 0
const SQLITE_MAX_MMAP_SIZE = 2147418112
const SQLITE_MAX_PAGE_COUNT = 4294967294
const SQLITE_MAX_PAGE_SIZE = 65536
const SQLITE_MAX_PATHLEN = 1024
const SQLITE_MAX_PMASZ = 536870912
const SQLITE_MAX_PREPARE_RETRY = 25
const SQLITE_MAX_SCHEMA_RETRY = 50
const SQLITE_MAX_SQL_LENGTH = 1000000000
const SQLITE_MAX_SRCLIST = 200
const SQLITE_MAX_SYMLINK = 200
const SQLITE_MAX_SYMLINKS = 100
const SQLITE_MAX_TRIGGER_DEPTH = 1000
const SQLITE_MAX_VARIABLE_NUMBER = 32766
const SQLITE_MAX_VDBE_OP = 250000000
const SQLITE_MAX_WORKER_THREADS = 8
const SQLITE_MEMDB_DEFAULT_MAXSIZE = 1073741824
const SQLITE_MINIMUM_FILE_DESCRIPTOR = 3
const SQLITE_MISMATCH = 20
const SQLITE_MISUSE = 21
const SQLITE_MISUSE_BKPT = 0
const SQLITE_MUTEX_FAST = 0
const SQLITE_MUTEX_NOOP = 1
const SQLITE_MUTEX_RECURSIVE = 1
const SQLITE_MUTEX_STATIC_APP1 = 8
const SQLITE_MUTEX_STATIC_APP2 = 9
const SQLITE_MUTEX_STATIC_APP3 = 10
const SQLITE_MUTEX_STATIC_LRU = 6
const SQLITE_MUTEX_STATIC_LRU2 = 7
const SQLITE_MUTEX_STATIC_MAIN = 2
const SQLITE_MUTEX_STATIC_MASTER = 2
const SQLITE_MUTEX_STATIC_MEM = 3
const SQLITE_MUTEX_STATIC_MEM2 = 4
const SQLITE_MUTEX_STATIC_OPEN = 4
const SQLITE_MUTEX_STATIC_PMEM = 7
const SQLITE_MUTEX_STATIC_PRNG = 5
const SQLITE_MUTEX_STATIC_TEMPDIR = 11
const SQLITE_MUTEX_STATIC_VFS1 = 11
const SQLITE_MUTEX_STATIC_VFS2 = 12
const SQLITE_MUTEX_STATIC_VFS3 = 13
const SQLITE_MX_JUMP_OPCODE = 64
const SQLITE_MinMaxOpt = 65536
const SQLITE_NOLFS = 22
const SQLITE_NOMATCH = 1
const SQLITE_NOMEM = 7
const SQLITE_NOMEM_BKPT = 7
const SQLITE_NOTADB = 26
const SQLITE_NOTFOUND = 12
const SQLITE_NOTICE = 27
const SQLITE_NOTICE_RBU = 795
const SQLITE_NOTICE_RECOVER_ROLLBACK = 539
const SQLITE_NOTICE_RECOVER_WAL = 283
const SQLITE_NOTNULL = 144
const SQLITE_NOWILDCARDMATCH = 2
const SQLITE_NTUNE = 6
const SQLITE_NULL = 5
const SQLITE_NULLEQ = 128
const SQLITE_N_BTREE_META = 16
const SQLITE_N_KEYWORD = 147
const SQLITE_N_LIMIT = 12
const SQLITE_N_STDTYPE = 6
const SQLITE_NoCkptOnClose = 2048
const SQLITE_NoSchemaError = 134217728
const SQLITE_NullCallback = 256
const SQLITE_NullUnusedCols = 67108864
const SQLITE_OK = 0
const SQLITE_OK_LOAD_PERMANENTLY = 256
const SQLITE_OK_SYMLINK = 512
const SQLITE_OPEN_AUTOPROXY = 32
const SQLITE_OPEN_CREATE = 4
const SQLITE_OPEN_DELETEONCLOSE = 8
const SQLITE_OPEN_EXCLUSIVE = 16
const SQLITE_OPEN_EXRESCODE = 33554432
const SQLITE_OPEN_FULLMUTEX = 65536
const SQLITE_OPEN_MAIN_DB = 256
const SQLITE_OPEN_MAIN_JOURNAL = 2048
const SQLITE_OPEN_MASTER_JOURNAL = 16384
const SQLITE_OPEN_MEMORY = 128
const SQLITE_OPEN_NOFOLLOW = 16777216
const SQLITE_OPEN_NOMUTEX = 32768
const SQLITE_OPEN_PRIVATECACHE = 262144
const SQLITE_OPEN_READONLY = 1
const SQLITE_OPEN_READWRITE = 2
const SQLITE_OPEN_SHAREDCACHE = 131072
const SQLITE_OPEN_SUBJOURNAL = 8192
const SQLITE_OPEN_SUPER_JOURNAL = 16384
const SQLITE_OPEN_TEMP_DB = 512
const SQLITE_OPEN_TEMP_JOURNAL = 4096
const SQLITE_OPEN_TRANSIENT_DB = 1024
const SQLITE_OPEN_URI = 64
const SQLITE_OPEN_WAL = 524288
const SQLITE_OS_UNIX = 1
const SQLITE_OmitNoopJoin = 256
const SQLITE_OmitOrderBy = 262144
const SQLITE_OnePass = 134217728
const SQLITE_OrderByIdxJoin = 64
const SQLITE_PERM = 3
const SQLITE_POWERSAFE_OVERWRITE = 1
const SQLITE_PRAGMA = 19
const SQLITE_PREPARE_MASK = 15
const SQLITE_PREPARE_NORMALIZE = 2
const SQLITE_PREPARE_NO_VTAB = 4
const SQLITE_PREPARE_PERSISTENT = 1
const SQLITE_PREPARE_SAVESQL = 128
const SQLITE_PRINTF_INTERNAL = 1
const SQLITE_PRINTF_MALLOCED = 4
const SQLITE_PRINTF_SQLFUNC = 2
const SQLITE_PRINT_BUF_SIZE = 70
const SQLITE_PRIVATE = 0
const SQLITE_PROTOCOL = 15
const SQLITE_PTRSIZE = 8
const SQLITE_PropagateConst = 32768
const SQLITE_PushDown = 4096
const SQLITE_QUERY_PLANNER_LIMIT = 20000
const SQLITE_QUERY_PLANNER_LIMIT_INCR = 1000
const SQLITE_QueryFlattener = 1
const SQLITE_QueryOnly = 1048576
const SQLITE_RANGE = 25
const SQLITE_RBU_STATE_CHECKPOINT = 3
const SQLITE_RBU_STATE_DONE = 4
const SQLITE_RBU_STATE_ERROR = 5
const SQLITE_RBU_STATE_MOVE = 2
const SQLITE_RBU_STATE_OAL = 1
const SQLITE_RBU_UPDATE_CACHESIZE = 16
const SQLITE_READ = 20
const SQLITE_READONLY = 8
const SQLITE_READONLY_CANTINIT = 1288
const SQLITE_READONLY_CANTLOCK = 520
const SQLITE_READONLY_DBMOVED = 1032
const SQLITE_READONLY_DIRECTORY = 1544
const SQLITE_READONLY_RECOVERY = 264
const SQLITE_READONLY_ROLLBACK = 776
const SQLITE_RECURSIVE = 33
const SQLITE_REINDEX = 27
const SQLITE_REPLACE = 5
const SQLITE_RESULT_SUBTYPE = 16777216
const SQLITE_ROLLBACK = 1
const SQLITE_ROW = 100
const SQLITE_RecTriggers = 8192
const SQLITE_ReleaseReg = 4194304
const SQLITE_ResetDatabase = 33554432
const SQLITE_ReverseOrder = 4096
const SQLITE_SAVEPOINT = 32
const SQLITE_SCANSTAT_COMPLEX = 1
const SQLITE_SCANSTAT_EST = 2
const SQLITE_SCANSTAT_EXPLAIN = 4
const SQLITE_SCANSTAT_NAME = 3
const SQLITE_SCANSTAT_NCYCLE = 7
const SQLITE_SCANSTAT_NLOOP = 0
const SQLITE_SCANSTAT_NVISIT = 1
const SQLITE_SCANSTAT_PARENTID = 6
const SQLITE_SCANSTAT_SELECTID = 5
const SQLITE_SCHEMA = 17
const SQLITE_SELECT = 21
const SQLITE_SERIALIZE_NOCOPY = 1
const SQLITE_SESSION_CONFIG_STRMSIZE = 1
const SQLITE_SESSION_OBJCONFIG_ROWID = 2
const SQLITE_SESSION_OBJCONFIG_SIZE = 1
const SQLITE_SET_LOCKPROXYFILE = 3
const SQLITE_SHM_EXCLUSIVE = 8
const SQLITE_SHM_LOCK = 2
const SQLITE_SHM_NLOCK = 8
const SQLITE_SHM_SHARED = 4
const SQLITE_SHM_UNLOCK = 1
const SQLITE_SORTER_PMASZ = 250
const SQLITE_SOUNDEX = 1
const SQLITE_SOURCE_ID = "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355"
const SQLITE_SO_ASC = 0
const SQLITE_SO_DESC = 1
const SQLITE_SO_UNDEFINED = -1
const SQLITE_STAT4_SAMPLES = 24
const SQLITE_STATE_BUSY = 109
const SQLITE_STATE_CLOSED = 206
const SQLITE_STATE_ERROR = 213
const SQLITE_STATE_OPEN = 118
const SQLITE_STATE_SICK = 186
const SQLITE_STATE_ZOMBIE = 167
const SQLITE_STATUS_MALLOC_COUNT = 9
const SQLITE_STATUS_MALLOC_SIZE = 5
const SQLITE_STATUS_MEMORY_USED = 0
const SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
const SQLITE_STATUS_PAGECACHE_SIZE = 7
const SQLITE_STATUS_PAGECACHE_USED = 1
const SQLITE_STATUS_PARSER_STACK = 6
const SQLITE_STATUS_SCRATCH_OVERFLOW = 4
const SQLITE_STATUS_SCRATCH_SIZE = 8
const SQLITE_STATUS_SCRATCH_USED = 3
const SQLITE_STDCALL = 0
const SQLITE_STMTJRNL_SPILL = 65536
const SQLITE_STMTSTATUS_AUTOINDEX = 3
const SQLITE_STMTSTATUS_FILTER_HIT = 8
const SQLITE_STMTSTATUS_FILTER_MISS = 7
const SQLITE_STMTSTATUS_FULLSCAN_STEP = 1
const SQLITE_STMTSTATUS_MEMUSED = 99
const SQLITE_STMTSTATUS_REPREPARE = 5
const SQLITE_STMTSTATUS_RUN = 6
const SQLITE_STMTSTATUS_SORT = 2
const SQLITE_STMTSTATUS_VM_STEP = 4
const SQLITE_SUBTYPE = 1048576
const SQLITE_SYNC_DATAONLY = 16
const SQLITE_SYNC_FULL = 3
const SQLITE_SYNC_NORMAL = 2
const SQLITE_SYSTEM_MALLOC = 1
const SQLITE_SeekScan = 131072
const SQLITE_ShortColNames = 64
const SQLITE_SimplifyJoin = 8192
const SQLITE_SkipScan = 16384
const SQLITE_Stat4 = 2048
const SQLITE_StmtScanStatus = 1024
const SQLITE_TEMP_FILE_PREFIX = "etilqs_"
const SQLITE_TEMP_STORE = 1
const SQLITE_TESTCTRL_ALWAYS = 13
const SQLITE_TESTCTRL_ASSERT = 12
const SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS = 10
const SQLITE_TESTCTRL_BITVEC_TEST = 8
const SQLITE_TESTCTRL_BYTEORDER = 22
const SQLITE_TESTCTRL_EXPLAIN_STMT = 19
const SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS = 29
const SQLITE_TESTCTRL_FAULT_INSTALL = 9
const SQLITE_TESTCTRL_FIRST = 5
const SQLITE_TESTCTRL_FK_NO_ACTION = 7
const SQLITE_TESTCTRL_IMPOSTER = 25
const SQLITE_TESTCTRL_INTERNAL_FUNCTIONS = 17
const SQLITE_TESTCTRL_ISINIT = 23
const SQLITE_TESTCTRL_ISKEYWORD = 16
const SQLITE_TESTCTRL_JSON_SELFCHECK = 14
const SQLITE_TESTCTRL_LAST = 34
const SQLITE_TESTCTRL_LOCALTIME_FAULT = 18
const SQLITE_TESTCTRL_LOGEST = 33
const SQLITE_TESTCTRL_NEVER_CORRUPT = 20
const SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD = 19
const SQLITE_TESTCTRL_OPTIMIZATIONS = 15
const SQLITE_TESTCTRL_PARSER_COVERAGE = 26
const SQLITE_TESTCTRL_PENDING_BYTE = 11
const SQLITE_TESTCTRL_PRNG_RESET = 7
const SQLITE_TESTCTRL_PRNG_RESTORE = 6
const SQLITE_TESTCTRL_PRNG_SAVE = 5
const SQLITE_TESTCTRL_PRNG_SEED = 28
const SQLITE_TESTCTRL_RESERVE = 14
const SQLITE_TESTCTRL_RESULT_INTREAL = 27
const SQLITE_TESTCTRL_SCRATCHMALLOC = 17
const SQLITE_TESTCTRL_SEEK_COUNT = 30
const SQLITE_TESTCTRL_SORTER_MMAP = 24
const SQLITE_TESTCTRL_TRACEFLAGS = 31
const SQLITE_TESTCTRL_TUNE = 32
const SQLITE_TESTCTRL_USELONGDOUBLE = 34
const SQLITE_TESTCTRL_VDBE_COVERAGE = 21
const SQLITE_TEXT = 3
const SQLITE_THREADSAFE = 1
const SQLITE_TOKEN_KEYWORD = 2
const SQLITE_TOKEN_QUOTED = 1
const SQLITE_TOOBIG = 18
const SQLITE_TRACE_CLOSE = 8
const SQLITE_TRACE_LEGACY = 64
const SQLITE_TRACE_NONLEGACY_MASK = 15
const SQLITE_TRACE_PROFILE = 2
const SQLITE_TRACE_ROW = 4
const SQLITE_TRACE_STMT = 1
const SQLITE_TRACE_XPROFILE = 128
const SQLITE_TRANSACTION = 22
const SQLITE_TXN_NONE = 0
const SQLITE_TXN_READ = 1
const SQLITE_TXN_WRITE = 2
const SQLITE_Transitive = 128
const SQLITE_TriggerEQP = 16777216
const SQLITE_TrustedSchema = 128
const SQLITE_UPDATE = 23
const SQLITE_USE_URI = 0
const SQLITE_UTF16 = 4
const SQLITE_UTF16BE = 3
const SQLITE_UTF16LE = 2
const SQLITE_UTF16NATIVE = 2
const SQLITE_UTF16_ALIGNED = 8
const SQLITE_UTF8 = 1
const SQLITE_VERSION = "3.45.3"
const SQLITE_VERSION_NUMBER = 3045003
const SQLITE_VTABRISK_High = 2
const SQLITE_VTABRISK_Low = 0
const SQLITE_VTABRISK_Normal = 1
const SQLITE_VTAB_CONSTRAINT_SUPPORT = 1
const SQLITE_VTAB_DIRECTONLY = 3
const SQLITE_VTAB_INNOCUOUS = 2
const SQLITE_VTAB_USES_ALL_SCHEMAS = 4
const SQLITE_WARNING = 28
const SQLITE_WARNING_AUTOINDEX = 284
const SQLITE_WIN32_DATA_DIRECTORY_TYPE = 1
const SQLITE_WIN32_TEMP_DIRECTORY_TYPE = 2
const SQLITE_WITHOUT_ZONEMALLOC = 1
const SQLITE_WindowFunc = 2
const SQLITE_WriteSchema = 1
const SRT_Coroutine = 13
const SRT_Discard = 4
const SRT_DistFifo = 5
const SRT_DistQueue = 6
const SRT_EphemTab = 12
const SRT_Except = 2
const SRT_Exists = 3
const SRT_Fifo = 8
const SRT_Mem = 10
const SRT_Output = 9
const SRT_Queue = 7
const SRT_Set = 11
const SRT_Table = 14
const SRT_Union = 1
const SRT_Upfrom = 15
const STAT_GET_NDLT = 4
const STAT_GET_NEQ = 2
const STAT_GET_NLT = 3
const STAT_GET_ROWID = 1
const STAT_GET_STAT1 = 0
const STDERR_FILENO = 2
const STDIN_FILENO = 0
const STDOUT_FILENO = 1
const SWAPOFF_FORCE = 1
const S_BLKSIZE = 512
const S_IEXEC = 64
const S_IFBLK = 24576
const S_IFCHR = 8192
const S_IFDIR = 16384
const S_IFIFO = 4096
const S_IFLNK = 40960
const S_IFMT = 61440
const S_IFREG = 32768
const S_IFSOCK = 49152
const S_IFWHT = 57344
const S_IREAD = 256
const S_IRGRP = 32
const S_IROTH = 4
const S_IRUSR = 256
const S_IRWXG = 56
const S_IRWXO = 7
const S_IRWXU = 448
const S_ISGID = 1024
const S_ISTXT = 512
const S_ISUID = 2048
const S_ISVTX = 512
const S_IWGRP = 16
const S_IWOTH = 2
const S_IWRITE = 128
const S_IWUSR = 128
const S_IXGRP = 8
const S_IXOTH = 1
const S_IXUSR = 64
const TABTYP_NORM = 0
const TABTYP_VIEW = 2
const TABTYP_VTAB = 1
const TERM_ANDINFO = 32
const TERM_CODED = 4
const TERM_COPIED = 8
const TERM_DYNAMIC = 1
const TERM_HEURTRUTH = 8192
const TERM_HIGHTRUTH = 16384
const TERM_IS = 2048
const TERM_LIKE = 1024
const TERM_LIKECOND = 512
const TERM_LIKEOPT = 256
const TERM_OK = 64
const TERM_ORINFO = 16
const TERM_SLICE = 32768
const TERM_VARSELECT = 4096
const TERM_VIRTUAL = 2
const TERM_VNULL = 128
const TF_Autoincrement = 8
const TF_Ephemeral = 16384
const TF_Eponymous = 32768
const TF_HasGenerated = 96
const TF_HasHidden = 2
const TF_HasNotNull = 2048
const TF_HasPrimaryKey = 4
const TF_HasStat1 = 16
const TF_HasStat4 = 8192
const TF_HasStored = 64
const TF_HasVirtual = 32
const TF_NoVisibleRowid = 512
const TF_OOOHidden = 1024
const TF_Readonly = 1
const TF_Shadow = 4096
const TF_StatsUsed = 256
const TF_Strict = 65536
const TF_WithoutRowid = 128
const TIMER_ABSTIME = 1
const TIMER_RELTIME = 0
const TIME_MONOTONIC = 2
const TIME_UTC = 1
const TIOCM_CAR = 64
const TIOCM_CD = 64
const TIOCM_CTS = 32
const TIOCM_DCD = 64
const TIOCM_DSR = 256
const TIOCM_DTR = 2
const TIOCM_LE = 1
const TIOCM_RI = 128
const TIOCM_RNG = 128
const TIOCM_RTS = 4
const TIOCM_SR = 16
const TIOCM_ST = 8
const TIOCPKT_DATA = 0
const TIOCPKT_DOSTOP = 32
const TIOCPKT_FLUSHREAD = 1
const TIOCPKT_FLUSHWRITE = 2
const TIOCPKT_IOCTL = 64
const TIOCPKT_NOSTOP = 16
const TIOCPKT_START = 8
const TIOCPKT_STOP = 4
const TK_ABORT = 27
const TK_ACTION = 28
const TK_ADD = 163
const TK_AFTER = 29
const TK_AGG_COLUMN = 169
const TK_AGG_FUNCTION = 168
const TK_ALL = 135
const TK_ALTER = 162
const TK_ALWAYS = 96
const TK_ANALYZE = 30
const TK_AND = 44
const TK_ANY = 101
const TK_AS = 24
const TK_ASC = 31
const TK_ASTERISK = 180
const TK_ATTACH = 32
const TK_AUTOINCR = 126
const TK_BEFORE = 33
const TK_BEGIN = 5
const TK_BETWEEN = 48
const TK_BITAND = 102
const TK_BITNOT = 114
const TK_BITOR = 103
const TK_BLOB = 154
const TK_BY = 34
const TK_CASCADE = 35
const TK_CASE = 157
const TK_CAST = 36
const TK_CHECK = 124
const TK_COLLATE = 113
const TK_COLUMN = 167
const TK_COLUMNKW = 60
const TK_COMMA = 25
const TK_COMMIT = 10
const TK_CONCAT = 111
const TK_CONFLICT = 37
const TK_CONSTRAINT = 119
const TK_CREATE = 17
const TK_CTIME_KW = 100
const TK_CURRENT = 85
const TK_DATABASE = 38
const TK_DEFAULT = 120
const TK_DEFERRABLE = 131
const TK_DEFERRED = 7
const TK_DELETE = 128
const TK_DESC = 39
const TK_DETACH = 40
const TK_DISTINCT = 140
const TK_DO = 61
const TK_DOT = 141
const TK_DROP = 133
const TK_EACH = 41
const TK_ELSE = 160
const TK_END = 11
const TK_EQ = 53
const TK_ERROR = 182
const TK_ESCAPE = 58
const TK_EXCEPT = 136
const TK_EXCLUDE = 91
const TK_EXCLUSIVE = 9
const TK_EXISTS = 20
const TK_EXPLAIN = 2
const TK_FAIL = 42
const TK_FILTER = 166
const TK_FIRST = 83
const TK_FLOAT = 153
const TK_FOLLOWING = 86
const TK_FOR = 62
const TK_FOREIGN = 132
const TK_FROM = 142
const TK_FUNCTION = 172
const TK_GE = 57
const TK_GENERATED = 95
const TK_GROUP = 146
const TK_GROUPS = 92
const TK_GT = 54
const TK_HAVING = 147
const TK_ID = 59
const TK_IF = 18
const TK_IF_NULL_ROW = 179
const TK_IGNORE = 63
const TK_ILLEGAL = 184
const TK_IMMEDIATE = 8
const TK_IN = 49
const TK_INDEX = 161
const TK_INDEXED = 116
const TK_INITIALLY = 64
const TK_INSERT = 127
const TK_INSTEAD = 65
const TK_INTEGER = 155
const TK_INTERSECT = 137
const TK_INTO = 151
const TK_IS = 45
const TK_ISNOT = 171
const TK_ISNULL = 50
const TK_JOIN = 143
const TK_JOIN_KW = 118
const TK_KEY = 67
const TK_LAST = 84
const TK_LE = 55
const TK_LIKE_KW = 47
const TK_LIMIT = 148
const TK_LP = 22
const TK_LSHIFT = 104
const TK_LT = 56
const TK_MATCH = 46
const TK_MATERIALIZED = 97
const TK_MINUS = 107
const TK_NE = 52
const TK_NO = 66
const TK_NOT = 19
const TK_NOTHING = 152
const TK_NOTNULL = 51
const TK_NULL = 121
const TK_NULLS = 82
const TK_OF = 68
const TK_OFFSET = 69
const TK_ON = 115
const TK_OR = 43
const TK_ORDER = 145
const TK_OTHERS = 93
const TK_OVER = 165
const TK_PARTITION = 87
const TK_PLAN = 4
const TK_PLUS = 106
const TK_PRAGMA = 70
const TK_PRECEDING = 88
const TK_PRIMARY = 122
const TK_PTR = 112
const TK_QUERY = 3
const TK_RAISE = 71
const TK_RANGE = 89
const TK_RECURSIVE = 72
const TK_REFERENCES = 125
const TK_REGISTER = 176
const TK_REINDEX = 98
const TK_RELEASE = 14
const TK_REM = 110
const TK_RENAME = 99
const TK_REPLACE = 73
const TK_RESTRICT = 74
const TK_RETURNING = 150
const TK_ROLLBACK = 12
const TK_ROW = 75
const TK_ROWS = 76
const TK_RP = 23
const TK_RSHIFT = 105
const TK_SAVEPOINT = 13
const TK_SELECT = 138
const TK_SELECT_COLUMN = 178
const TK_SEMI = 1
const TK_SET = 130
const TK_SLASH = 109
const TK_SPACE = 183
const TK_SPAN = 181
const TK_STAR = 108
const TK_STRING = 117
const TK_TABLE = 16
const TK_TEMP = 21
const TK_THEN = 159
const TK_TIES = 94
const TK_TO = 15
const TK_TRANSACTION = 6
const TK_TRIGGER = 77
const TK_TRUEFALSE = 170
const TK_TRUTH = 175
const TK_UMINUS = 173
const TK_UNBOUNDED = 90
const TK_UNION = 134
const TK_UNIQUE = 123
const TK_UPDATE = 129
const TK_UPLUS = 174
const TK_USING = 144
const TK_VACUUM = 78
const TK_VALUES = 139
const TK_VARIABLE = 156
const TK_VECTOR = 177
const TK_VIEW = 79
const TK_VIRTUAL = 80
const TK_WHEN = 158
const TK_WHERE = 149
const TK_WINDOW = 164
const TK_WITH = 81
const TK_WITHOUT = 26
const TMP_MAX = 308915776
const TOKEN = 0
const TRANS_NONE = 0
const TRANS_READ = 1
const TRANS_WRITE = 2
const TREETRACE_ENABLED = 0
const TRIGGER_AFTER = 2
const TRIGGER_BEFORE = 1
const TTYDISC = 0
const UF_APPEND = 4
const UF_ARCHIVE = 2048
const UF_HIDDEN = 32768
const UF_IMMUTABLE = 2
const UF_NODUMP = 1
const UF_NOUNLINK = 16
const UF_OFFLINE = 512
const UF_OPAQUE = 8
const UF_READONLY = 4096
const UF_REPARSE = 1024
const UF_SETTABLE = 65535
const UF_SPARSE = 256
const UF_SYSTEM = 128
const UNIXFILE_DELETE = 32
const UNIXFILE_DIRSYNC = 8
const UNIXFILE_EXCL = 1
const UNIXFILE_NOLOCK = 128
const UNIXFILE_PERSIST_WAL = 4
const UNIXFILE_PSOW = 16
const UNIXFILE_RDONLY = 2
const UNIXFILE_URI = 64
const UNIX_SHM_BASE = 120
const UNIX_SHM_DMS = 128
const UNKNOWN_LOCK = 5
const UTIME_NOW = -1
const UTIME_OMIT = -2
const UpperToLower = 0
const VDBE_DISPLAY_P4 = 1
const VDBE_HALT_STATE = 3
const VDBE_INIT_STATE = 0
const VDBE_READY_STATE = 1
const VDBE_RUN_STATE = 2
const ViewCanHaveRowid = 0
const WALINDEX_HDR_SIZE = 0
const WALINDEX_MAX_VERSION = 3007000
const WALINDEX_PGSZ = 0
const WAL_ALL_BUT_WRITE = 1
const WAL_CKPT_LOCK = 1
const WAL_EXCLUSIVE_MODE = 1
const WAL_FRAME_HDRSIZE = 24
const WAL_HDRSIZE = 32
const WAL_HEAPMEMORY_MODE = 2
const WAL_LOCK_CKPT = 1
const WAL_LOCK_READ0 = 3
const WAL_LOCK_WRITE = 0
const WAL_MAGIC = 931071618
const WAL_MAX_VERSION = 3007000
const WAL_NORMAL_MODE = 0
const WAL_NREADER = 5
const WAL_RDONLY = 1
const WAL_RDWR = 0
const WAL_RECOVER_LOCK = 2
const WAL_RETRY = -1
const WAL_RETRY_BLOCKED_MASK = 0
const WAL_RETRY_PROTOCOL_LIMIT = 100
const WAL_SAVEPOINT_NDATA = 4
const WAL_SHM_RDONLY = 2
const WAL_WRITE_LOCK = 0
const WHERE_AGG_DISTINCT = 1024
const WHERE_AUTO_INDEX = 16384
const WHERE_BIGNULL_SORT = 524288
const WHERE_BLOOMFILTER = 4194304
const WHERE_BOTH_LIMIT = 48
const WHERE_BTM_LIMIT = 32
const WHERE_COLUMN_EQ = 1
const WHERE_COLUMN_IN = 4
const WHERE_COLUMN_NULL = 8
const WHERE_COLUMN_RANGE = 2
const WHERE_CONSTRAINT = 15
const WHERE_DISTINCTBY = 128
const WHERE_DISTINCT_NOOP = 0
const WHERE_DISTINCT_ORDERED = 2
const WHERE_DISTINCT_UNIQUE = 1
const WHERE_DISTINCT_UNORDERED = 3
const WHERE_DUPLICATES_OK = 16
const WHERE_EXPRIDX = 67108864
const WHERE_GROUPBY = 64
const WHERE_IDX_ONLY = 64
const WHERE_INDEXED = 512
const WHERE_IN_ABLE = 2048
const WHERE_IN_EARLYOUT = 262144
const WHERE_IN_SEEKSCAN = 1048576
const WHERE_IPK = 256
const WHERE_MULTI_OR = 8192
const WHERE_OMIT_OFFSET = 16777216
const WHERE_ONEPASS_DESIRED = 4
const WHERE_ONEPASS_MULTIROW = 8
const WHERE_ONEROW = 4096
const WHERE_ORDERBY_LIMIT = 2048
const WHERE_ORDERBY_MAX = 2
const WHERE_ORDERBY_MIN = 1
const WHERE_ORDERBY_NORMAL = 0
const WHERE_OR_SUBCLAUSE = 32
const WHERE_PARTIALIDX = 131072
const WHERE_RIGHT_JOIN = 4096
const WHERE_SELFCULL = 8388608
const WHERE_SKIPSCAN = 32768
const WHERE_SORTBYGROUP = 512
const WHERE_TOP_LIMIT = 16
const WHERE_TRANSCONS = 2097152
const WHERE_UNQ_WANTED = 65536
const WHERE_USE_LIMIT = 16384
const WHERE_VIRTUALTABLE = 1024
const WHERE_WANT_DISTINCT = 256
const WINDOW_AGGINVERSE = 2
const WINDOW_AGGSTEP = 3
const WINDOW_ENDING_INT = 1
const WINDOW_ENDING_NUM = 4
const WINDOW_NTH_VALUE_INT = 2
const WINDOW_RETURN_ROW = 1
const WINDOW_STARTING_INT = 0
const WINDOW_STARTING_NUM = 3
const WO_ALL = 16383
const WO_AND = 1024
const WO_AUX = 64
const WO_EQ = 2
const WO_EQUIV = 2048
const WO_GE = 32
const WO_GT = 4
const WO_IN = 1
const WO_IS = 128
const WO_ISNULL = 256
const WO_LE = 8
const WO_LT = 16
const WO_NOOP = 4096
const WO_OR = 512
const WO_ROWVAL = 8192
const WO_SINGLE = 511
const WRC_Abort = 2
const WRC_Continue = 0
const WRC_Prune = 1
const WRITE_LOCK = 2
const W_OK = 2
const XN_EXPR = -2
const XN_ROWID = -1
const X_OK = 1
const YYFALLBACK = 1
const YYMALLOCARGTYPE = 0
const YYNOCODE = 319
const YYNOERRORRECOVERY = 1
const YYNRULE = 405
const YYNRULE_WITH_ACTION = 340
const YYNSTATE = 579
const YYNTOKEN = 185
const YYPARSEFREENEVERNULL = 1
const YYSTACKDEPTH = 100
const YYWILDCARD = 101
const YY_ACCEPT_ACTION = 1244
const YY_ACTTAB_COUNT = 2100
const YY_ERROR_ACTION = 1243
const YY_MAX_REDUCE = 1650
const YY_MAX_SHIFT = 578
const YY_MAX_SHIFTREDUCE = 1242
const YY_MIN_REDUCE = 1246
const YY_MIN_SHIFTREDUCE = 838
const YY_NO_ACTION = 1245
const YY_REDUCE_COUNT = 410
const YY_REDUCE_MAX = 1753
const YY_REDUCE_MIN = -271
const YY_SHIFT_COUNT = 578
const YY_SHIFT_MAX = 2088
const YY_SHIFT_MIN = 0
const _BYTE_ORDER = 1234
const _CS_PATH = 1
const _CS_POSIX_V6_ILP32_OFF32_CFLAGS = 2
const _CS_POSIX_V6_ILP32_OFF32_LDFLAGS = 3
const _CS_POSIX_V6_ILP32_OFF32_LIBS = 4
const _CS_POSIX_V6_ILP32_OFFBIG_CFLAGS = 5
const _CS_POSIX_V6_ILP32_OFFBIG_LDFLAGS = 6
const _CS_POSIX_V6_ILP32_OFFBIG_LIBS = 7
const _CS_POSIX_V6_LP64_OFF64_CFLAGS = 8
const _CS_POSIX_V6_LP64_OFF64_LDFLAGS = 9
const _CS_POSIX_V6_LP64_OFF64_LIBS = 10
const _CS_POSIX_V6_LPBIG_OFFBIG_CFLAGS = 11
const _CS_POSIX_V6_LPBIG_OFFBIG_LDFLAGS = 12
const _CS_POSIX_V6_LPBIG_OFFBIG_LIBS = 13
const _CS_POSIX_V6_WIDTH_RESTRICTED_ENVS = 14
const _FILE_OFFSET_BITS = 64
const _IOFBF = 0
const _IOLBF = 1
const _IONBF = 2
const _LARGEFILE_SOURCE = 1
const _LARGE_FILE = 1
const _LP64 = 1
const _MADV_DONTNEED = 4
const _MADV_NORMAL = 0
const _MADV_RANDOM = 1
const _MADV_SEQUENTIAL = 2
const _MADV_WILLNEED = 3
const _NFDBITS = 0
const _PC_ACL_EXTENDED = 59
const _PC_ACL_NFS4 = 64
const _PC_ACL_PATH_MAX = 60
const _PC_ALLOC_SIZE_MIN = 10
const _PC_ASYNC_IO = 53
const _PC_CAP_PRESENT = 61
const _PC_CHOWN_RESTRICTED = 7
const _PC_DEALLOC_PRESENT = 65
const _PC_FILESIZEBITS = 12
const _PC_INF_PRESENT = 62
const _PC_LINK_MAX = 1
const _PC_MAC_PRESENT = 63
const _PC_MAX_CANON = 2
const _PC_MAX_INPUT = 3
const _PC_MIN_HOLE_SIZE = 21
const _PC_NAME_MAX = 4
const _PC_NO_TRUNC = 8
const _PC_PATH_MAX = 5
const _PC_PIPE_BUF = 6
const _PC_PRIO_IO = 54
const _PC_REC_INCR_XFER_SIZE = 14
const _PC_REC_MAX_XFER_SIZE = 15
const _PC_REC_MIN_XFER_SIZE = 16
const _PC_REC_XFER_ALIGN = 17
const _PC_SYMLINK_MAX = 18
const _PC_SYNC_IO = 55
const _PC_VDISABLE = 9
const _PDP_ENDIAN = 3412
const _POSIX2_CHAR_TERM = 1
const _POSIX2_C_BIND = 200112
const _POSIX2_C_DEV = -1
const _POSIX2_FORT_DEV = -1
const _POSIX2_FORT_RUN = 200112
const _POSIX2_LOCALEDEF = -1
const _POSIX2_PBS = -1
const _POSIX2_PBS_ACCOUNTING = -1
const _POSIX2_PBS_CHECKPOINT = -1
const _POSIX2_PBS_LOCATE = -1
const _POSIX2_PBS_MESSAGE = -1
const _POSIX2_PBS_TRACK = -1
const _POSIX2_SW_DEV = -1
const _POSIX2_UPE = 200112
const _POSIX2_VERSION = 199212
const _POSIX_ADVISORY_INFO = 200112
const _POSIX_ASYNCHRONOUS_IO = 200112
const _POSIX_BARRIERS = 200112
const _POSIX_CHOWN_RESTRICTED = 1
const _POSIX_CLOCK_SELECTION = -1
const _POSIX_CPUTIME = 200112
const _POSIX_FSYNC = 200112
const _POSIX_IPV6 = 0
const _POSIX_JOB_CONTROL = 1
const _POSIX_MAPPED_FILES = 200112
const _POSIX_MEMLOCK = -1
const _POSIX_MEMLOCK_RANGE = 200112
const _POSIX_MEMORY_PROTECTION = 200112
const _POSIX_MESSAGE_PASSING = 200112
const _POSIX_MONOTONIC_CLOCK = 200112
const _POSIX_NO_TRUNC = 1
const _POSIX_PRIORITIZED_IO = -1
const _POSIX_PRIORITY_SCHEDULING = 0
const _POSIX_RAW_SOCKETS = 200112
const _POSIX_READER_WRITER_LOCKS = 200112
const _POSIX_REALTIME_SIGNALS = 200112
const _POSIX_REGEXP = 1
const _POSIX_SEMAPHORES = 200112
const _POSIX_SHARED_MEMORY_OBJECTS = 200112
const _POSIX_SHELL = 1
const _POSIX_SPAWN = 200112
const _POSIX_SPIN_LOCKS = 200112
const _POSIX_SPORADIC_SERVER = -1
const _POSIX_SYNCHRONIZED_IO = -1
const _POSIX_THREADS = 200112
const _POSIX_THREAD_ATTR_STACKADDR = 200112
const _POSIX_THREAD_ATTR_STACKSIZE = 200112
const _POSIX_THREAD_CPUTIME = 200112
const _POSIX_THREAD_PRIORITY_SCHEDULING = 200112
const _POSIX_THREAD_PRIO_INHERIT = 200112
const _POSIX_THREAD_PRIO_PROTECT = 200112
const _POSIX_THREAD_PROCESS_SHARED = 200112
const _POSIX_THREAD_SAFE_FUNCTIONS = -1
const _POSIX_THREAD_SPORADIC_SERVER = -1
const _POSIX_TIMEOUTS = 200112
const _POSIX_TIMERS = 200112
const _POSIX_TRACE = -1
const _POSIX_TRACE_EVENT_FILTER = -1
const _POSIX_TRACE_INHERIT = -1
const _POSIX_TRACE_LOG = -1
const _POSIX_TYPED_MEMORY_OBJECTS = -1
const _POSIX_VDISABLE = 255
const _POSIX_VERSION = 200112
const _PROT_ALL = 7
const _PROT_MAX_SHIFT = 16
const _QUAD_HIGHWORD = 1
const _QUAD_LOWWORD = 0
const _SC_2_CHAR_TERM = 20
const _SC_2_C_BIND = 18
const _SC_2_C_DEV = 19
const _SC_2_FORT_DEV = 21
const _SC_2_FORT_RUN = 22
const _SC_2_LOCALEDEF = 23
const _SC_2_PBS = 59
const _SC_2_PBS_ACCOUNTING = 60
const _SC_2_PBS_CHECKPOINT = 61
const _SC_2_PBS_LOCATE = 62
const _SC_2_PBS_MESSAGE = 63
const _SC_2_PBS_TRACK = 64
const _SC_2_SW_DEV = 24
const _SC_2_UPE = 25
const _SC_2_VERSION = 17
const _SC_ADVISORY_INFO = 65
const _SC_AIO_LISTIO_MAX = 42
const _SC_AIO_MAX = 43
const _SC_AIO_PRIO_DELTA_MAX = 44
const _SC_ARG_MAX = 1
const _SC_ASYNCHRONOUS_IO = 28
const _SC_ATEXIT_MAX = 107
const _SC_BARRIERS = 66
const _SC_BC_BASE_MAX = 9
const _SC_BC_DIM_MAX = 10
const _SC_BC_SCALE_MAX = 11
const _SC_BC_STRING_MAX = 12
const _SC_CHILD_MAX = 2
const _SC_CLK_TCK = 3
const _SC_CLOCK_SELECTION = 67
const _SC_COLL_WEIGHTS_MAX = 13
const _SC_CPUSET_SIZE = 122
const _SC_CPUTIME = 68
const _SC_DELAYTIMER_MAX = 45
const _SC_EXPR_NEST_MAX = 14
const _SC_FILE_LOCKING = 69
const _SC_FSYNC = 38
const _SC_GETGR_R_SIZE_MAX = 70
const _SC_GETPW_R_SIZE_MAX = 71
const _SC_HOST_NAME_MAX = 72
const _SC_IOV_MAX = 56
const _SC_IPV6 = 118
const _SC_JOB_CONTROL = 6
const _SC_LINE_MAX = 15
const _SC_LOGIN_NAME_MAX = 73
const _SC_MAPPED_FILES = 29
const _SC_MEMLOCK = 30
const _SC_MEMLOCK_RANGE = 31
const _SC_MEMORY_PROTECTION = 32
const _SC_MESSAGE_PASSING = 33
const _SC_MONOTONIC_CLOCK = 74
const _SC_MQ_OPEN_MAX = 46
const _SC_MQ_PRIO_MAX = 75
const _SC_NGROUPS_MAX = 4
const _SC_NPROCESSORS_CONF = 57
const _SC_NPROCESSORS_ONLN = 58
const _SC_OPEN_MAX = 5
const _SC_PAGESIZE = 47
const _SC_PAGE_SIZE = 47
const _SC_PHYS_PAGES = 121
const _SC_PRIORITIZED_IO = 34
const _SC_PRIORITY_SCHEDULING = 35
const _SC_RAW_SOCKETS = 119
const _SC_READER_WRITER_LOCKS = 76
const _SC_REALTIME_SIGNALS = 36
const _SC_REGEXP = 77
const _SC_RE_DUP_MAX = 16
const _SC_RTSIG_MAX = 48
const _SC_SAVED_IDS = 7
const _SC_SEMAPHORES = 37
const _SC_SEM_NSEMS_MAX = 49
const _SC_SEM_VALUE_MAX = 50
const _SC_SHARED_MEMORY_OBJECTS = 39
const _SC_SHELL = 78
const _SC_SIGQUEUE_MAX = 51
const _SC_SPAWN = 79
const _SC_SPIN_LOCKS = 80
const _SC_SPORADIC_SERVER = 81
const _SC_STREAM_MAX = 26
const _SC_SYMLOOP_MAX = 120
const _SC_SYNCHRONIZED_IO = 40
const _SC_THREADS = 96
const _SC_THREAD_ATTR_STACKADDR = 82
const _SC_THREAD_ATTR_STACKSIZE = 83
const _SC_THREAD_CPUTIME = 84
const _SC_THREAD_DESTRUCTOR_ITERATIONS = 85
const _SC_THREAD_KEYS_MAX = 86
const _SC_THREAD_PRIORITY_SCHEDULING = 89
const _SC_THREAD_PRIO_INHERIT = 87
const _SC_THREAD_PRIO_PROTECT = 88
const _SC_THREAD_PROCESS_SHARED = 90
const _SC_THREAD_SAFE_FUNCTIONS = 91
const _SC_THREAD_SPORADIC_SERVER = 92
const _SC_THREAD_STACK_MIN = 93
const _SC_THREAD_THREADS_MAX = 94
const _SC_TIMEOUTS = 95
const _SC_TIMERS = 41
const _SC_TIMER_MAX = 52
const _SC_TRACE = 97
const _SC_TRACE_EVENT_FILTER = 98
const _SC_TRACE_INHERIT = 99
const _SC_TRACE_LOG = 100
const _SC_TTY_NAME_MAX = 101
const _SC_TYPED_MEMORY_OBJECTS = 102
const _SC_TZNAME_MAX = 27
const _SC_V6_ILP32_OFF32 = 103
const _SC_V6_ILP32_OFFBIG = 104
const _SC_V6_LP64_OFF64 = 105
const _SC_V6_LPBIG_OFFBIG = 106
const _SC_VERSION = 8
const _SC_XOPEN_CRYPT = 108
const _SC_XOPEN_ENH_I18N = 109
const _SC_XOPEN_LEGACY = 110
const _SC_XOPEN_REALTIME = 111
const _SC_XOPEN_REALTIME_THREADS = 112
const _SC_XOPEN_SHM = 113
const _SC_XOPEN_STREAMS = 114
const _SC_XOPEN_UNIX = 115
const _SC_XOPEN_VERSION = 116
const _SC_XOPEN_XCU_VERSION = 117
const _SIG_MAXSIG = 128
const _SIG_WORDS = 4
const _V6_ILP32_OFF32 = -1
const _V6_ILP32_OFFBIG = 0
const _V6_LP64_OFF64 = 0
const _V6_LPBIG_OFFBIG = -1
const _XOPEN_CRYPT = -1
const _XOPEN_ENH_I18N = -1
const _XOPEN_LEGACY = -1
const _XOPEN_REALTIME = -1
const _XOPEN_REALTIME_THREADS = -1
const _XOPEN_SHM = 1
const _XOPEN_SOURCE = 600
const _XOPEN_STREAMS = -1
const _XOPEN_UNIX = -1
const __ATOMIC_ACQUIRE = 2
const __ATOMIC_ACQ_REL = 4
const __ATOMIC_CONSUME = 1
const __ATOMIC_RELAXED = 0
const __ATOMIC_RELEASE = 3
const __ATOMIC_SEQ_CST = 5
const __BIGGEST_ALIGNMENT__ = 16
const __BITINT_MAXWIDTH__ = 8388608
const __BOOL_WIDTH__ = 8
const __BSD_VISIBLE = 1
const __BYTE_ORDER__ = 1234
const __CCGO__ = 1
const __CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
const __CC_SUPPORTS_INLINE = 1
const __CC_SUPPORTS_VARADIC_XXX = 1
const __CC_SUPPORTS_WARNING = 1
const __CC_SUPPORTS___FUNC__ = 1
const __CC_SUPPORTS___INLINE = 1
const __CC_SUPPORTS___INLINE__ = 1
const __CHAR_BIT = 8
const __CHAR_BIT__ = 8
const __CLANG_ATOMIC_BOOL_LOCK_FREE = 2
const __CLANG_ATOMIC_CHAR16_T_LOCK_FREE = 2
const __CLANG_ATOMIC_CHAR32_T_LOCK_FREE = 2
const __CLANG_ATOMIC_CHAR_LOCK_FREE = 2
const __CLANG_ATOMIC_INT_LOCK_FREE = 2
const __CLANG_ATOMIC_LLONG_LOCK_FREE = 2
const __CLANG_ATOMIC_LONG_LOCK_FREE = 2
const __CLANG_ATOMIC_POINTER_LOCK_FREE = 2
const __CLANG_ATOMIC_SHORT_LOCK_FREE = 2
const __CLANG_ATOMIC_WCHAR_T_LOCK_FREE = 2
const __CONSTANT_CFSTRINGS__ = 1
const __DBL_DECIMAL_DIG__ = 17
const __DBL_DENORM_MIN__ = 0
const __DBL_DIG__ = 15
const __DBL_EPSILON__ = 0
const __DBL_HAS_DENORM__ = 1
const __DBL_HAS_INFINITY__ = 1
const __DBL_HAS_QUIET_NAN__ = 1
const __DBL_MANT_DIG__ = 53
const __DBL_MAX_10_EXP__ = 308
const __DBL_MAX_EXP__ = 1024
const __DBL_MAX__ = 0
const __DBL_MIN_10_EXP__ = -307
const __DBL_MIN_EXP__ = -1021
const __DBL_MIN__ = 0
const __DECIMAL_DIG__ = 17
const __ELF__ = 1
const __EXT1_VISIBLE = 1
const __FINITE_MATH_ONLY__ = 0
const __FLT16_DECIMAL_DIG__ = 5
const __FLT16_DENORM_MIN__ = 0
const __FLT16_DIG__ = 3
const __FLT16_EPSILON__ = 0
const __FLT16_HAS_DENORM__ = 1
const __FLT16_HAS_INFINITY__ = 1
const __FLT16_HAS_QUIET_NAN__ = 1
const __FLT16_MANT_DIG__ = 11
const __FLT16_MAX_10_EXP__ = 4
const __FLT16_MAX_EXP__ = 16
const __FLT16_MAX__ = 0
const __FLT16_MIN_10_EXP__ = -4
const __FLT16_MIN_EXP__ = -13
const __FLT16_MIN__ = 0
const __FLT_DECIMAL_DIG__ = 9
const __FLT_DENORM_MIN__ = 0
const __FLT_DIG__ = 6
const __FLT_EPSILON__ = 0
const __FLT_HAS_DENORM__ = 1
const __FLT_HAS_INFINITY__ = 1
const __FLT_HAS_QUIET_NAN__ = 1
const __FLT_MANT_DIG__ = 24
const __FLT_MAX_10_EXP__ = 38
const __FLT_MAX_EXP__ = 128
const __FLT_MAX__ = 0
const __FLT_MIN_10_EXP__ = -37
const __FLT_MIN_EXP__ = -125
const __FLT_MIN__ = 0
const __FLT_RADIX__ = 2
const __FUNCTION__ = 0
const __FXSR__ = 1
const __FreeBSD__ = 14
const __FreeBSD_cc_version = 1400006
const __GCC_ASM_FLAG_OUTPUTS__ = 1
const __GCC_ATOMIC_BOOL_LOCK_FREE = 2
const __GCC_ATOMIC_CHAR16_T_LOCK_FREE = 2
const __GCC_ATOMIC_CHAR32_T_LOCK_FREE = 2
const __GCC_ATOMIC_CHAR_LOCK_FREE = 2
const __GCC_ATOMIC_INT_LOCK_FREE = 2
const __GCC_ATOMIC_LLONG_LOCK_FREE = 2
const __GCC_ATOMIC_LONG_LOCK_FREE = 2
const __GCC_ATOMIC_POINTER_LOCK_FREE = 2
const __GCC_ATOMIC_SHORT_LOCK_FREE = 2
const __GCC_ATOMIC_TEST_AND_SET_TRUEVAL = 1
const __GCC_ATOMIC_WCHAR_T_LOCK_FREE = 2
const __GCC_HAVE_DWARF2_CFI_ASM = 1
const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1 = 1
const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2 = 1
const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 = 1
const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 = 1
const __GNUCLIKE_ASM = 3
const __GNUCLIKE_BUILTIN_CONSTANT_P = 1
const __GNUCLIKE_BUILTIN_MEMCPY = 1
const __GNUCLIKE_BUILTIN_NEXT_ARG = 1
const __GNUCLIKE_BUILTIN_STDARG = 1
const __GNUCLIKE_BUILTIN_VAALIST = 1
const __GNUCLIKE_BUILTIN_VARARGS = 1
const __GNUCLIKE_CTOR_SECTION_HANDLING = 1
const __GNUCLIKE___SECTION = 1
const __GNUCLIKE___TYPEOF = 1
const __GNUC_MINOR__ = 2
const __GNUC_PATCHLEVEL__ = 1
const __GNUC_STDC_INLINE__ = 1
const __GNUC_VA_LIST_COMPATIBILITY = 1
const __GNUC__ = 4
const __GXX_ABI_VERSION = 1002
const __INT16_FMTd__ = "hd"
const __INT16_FMTi__ = "hi"
const __INT16_MAX__ = 32767
const __INT16_TYPE__ = 0
const __INT32_FMTd__ = "d"
const __INT32_FMTi__ = "i"
const __INT32_MAX__ = 2147483647
const __INT32_TYPE__ = 0
const __INT64_C_SUFFIX__ = 0
const __INT64_FMTd__ = "ld"
const __INT64_FMTi__ = "li"
const __INT64_MAX__ = 9223372036854775807
const __INT8_FMTd__ = "hhd"
const __INT8_FMTi__ = "hhi"
const __INT8_MAX__ = 127
const __INTMAX_C_SUFFIX__ = 0
const __INTMAX_FMTd__ = "ld"
const __INTMAX_FMTi__ = "li"
const __INTMAX_MAX__ = 9223372036854775807
const __INTMAX_WIDTH__ = 64
const __INTPTR_FMTd__ = "ld"
const __INTPTR_FMTi__ = "li"
const __INTPTR_MAX__ = 9223372036854775807
const __INTPTR_WIDTH__ = 64
const __INT_FAST16_FMTd__ = "hd"
const __INT_FAST16_FMTi__ = "hi"
const __INT_FAST16_MAX__ = 32767
const __INT_FAST16_TYPE__ = 0
const __INT_FAST16_WIDTH__ = 16
const __INT_FAST32_FMTd__ = "d"
const __INT_FAST32_FMTi__ = "i"
const __INT_FAST32_MAX__ = 2147483647
const __INT_FAST32_TYPE__ = 0
const __INT_FAST32_WIDTH__ = 32
const __INT_FAST64_FMTd__ = "ld"
const __INT_FAST64_FMTi__ = "li"
const __INT_FAST64_MAX__ = 9223372036854775807
const __INT_FAST64_WIDTH__ = 64
const __INT_FAST8_FMTd__ = "hhd"
const __INT_FAST8_FMTi__ = "hhi"
const __INT_FAST8_MAX__ = 127
const __INT_FAST8_WIDTH__ = 8
const __INT_LEAST16_FMTd__ = "hd"
const __INT_LEAST16_FMTi__ = "hi"
const __INT_LEAST16_MAX__ = 32767
const __INT_LEAST16_TYPE__ = 0
const __INT_LEAST16_WIDTH__ = 16
const __INT_LEAST32_FMTd__ = "d"
const __INT_LEAST32_FMTi__ = "i"
const __INT_LEAST32_MAX__ = 2147483647
const __INT_LEAST32_TYPE__ = 0
const __INT_LEAST32_WIDTH__ = 32
const __INT_LEAST64_FMTd__ = "ld"
const __INT_LEAST64_FMTi__ = "li"
const __INT_LEAST64_MAX__ = 9223372036854775807
const __INT_LEAST64_WIDTH__ = 64
const __INT_LEAST8_FMTd__ = "hhd"
const __INT_LEAST8_FMTi__ = "hhi"
const __INT_LEAST8_MAX__ = 127
const __INT_LEAST8_WIDTH__ = 8
const __INT_MAX = 2147483647
const __INT_MAX__ = 2147483647
const __INT_MIN = -2147483648
const __INT_WIDTH__ = 32
const __ISO_C_VISIBLE = 2011
const __KPRINTF_ATTRIBUTE__ = 1
const __LDBL_DECIMAL_DIG__ = 17
const __LDBL_DENORM_MIN__ = 0
const __LDBL_DIG__ = 15
const __LDBL_EPSILON__ = 0
const __LDBL_HAS_DENORM__ = 1
const __LDBL_HAS_INFINITY__ = 1
const __LDBL_HAS_QUIET_NAN__ = 1
const __LDBL_MANT_DIG__ = 53
const __LDBL_MAX_10_EXP__ = 308
const __LDBL_MAX_EXP__ = 1024
const __LDBL_MAX__ = 0
const __LDBL_MIN_10_EXP__ = -307
const __LDBL_MIN_EXP__ = -1021
const __LDBL_MIN__ = 0
const __LITTLE_ENDIAN__ = 1
const __LLONG_MAX = 9223372036854775807
const __LLONG_MIN = -9223372036854775808
const __LLONG_WIDTH__ = 64
const __LONG_BIT = 64
const __LONG_LONG_MAX__ = 9223372036854775807
const __LONG_MAX = 9223372036854775807
const __LONG_MAX__ = 9223372036854775807
const __LONG_MIN = -9223372036854775808
const __LONG_WIDTH__ = 64
const __LP64__ = 1
const __MINSIGSTKSZ = 2048
const __MMX__ = 1
const __NO_INLINE__ = 1
const __NO_MATH_ERRNO__ = 1
const __NO_MATH_INLINES = 1
const __OBJC_BOOL_IS_BOOL = 0
const __OFF_MAX = 9223372036854775807
const __OFF_MIN = -9223372036854775808
const __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES = 3
const __OPENCL_MEMORY_SCOPE_DEVICE = 2
const __OPENCL_MEMORY_SCOPE_SUB_GROUP = 4
const __OPENCL_MEMORY_SCOPE_WORK_GROUP = 1
const __OPENCL_MEMORY_SCOPE_WORK_ITEM = 0
const __ORDER_BIG_ENDIAN__ = 4321
const __ORDER_LITTLE_ENDIAN__ = 1234
const __ORDER_PDP_ENDIAN__ = 3412
const __POINTER_WIDTH__ = 64
const __POSIX_VISIBLE = 200809
const __PRAGMA_REDEFINE_EXTNAME = 1
const __PRETTY_FUNCTION__ = 0
const __PTRDIFF_FMTd__ = "ld"
const __PTRDIFF_FMTi__ = "li"
const __PTRDIFF_MAX__ = 9223372036854775807
const __PTRDIFF_WIDTH__ = 64
const __QUAD_MAX = 9223372036854775807
const __QUAD_MIN = -9223372036854775808
const __S2OAP = 1
const __SALC = 16384
const __SAPP = 256
const __SCHAR_MAX = 127
const __SCHAR_MAX__ = 127
const __SCHAR_MIN = -128
const __SEG_FS = 1
const __SEG_GS = 1
const __SEOF = 32
const __SERR = 64
const __SHRT_MAX = 32767
const __SHRT_MAX__ = 32767
const __SHRT_MIN = -32768
const __SHRT_WIDTH__ = 16
const __SIGN = 32768
const __SIG_ATOMIC_MAX__ = 2147483647
const __SIG_ATOMIC_WIDTH__ = 32
const __SIZEOF_DOUBLE__ = 8
const __SIZEOF_FLOAT__ = 4
const __SIZEOF_INT128__ = 16
const __SIZEOF_INT__ = 4
const __SIZEOF_LONG_DOUBLE__ = 8
const __SIZEOF_LONG_LONG__ = 8
const __SIZEOF_LONG__ = 8
const __SIZEOF_POINTER__ = 8
const __SIZEOF_PTRDIFF_T__ = 8
const __SIZEOF_SHORT__ = 2
const __SIZEOF_SIZE_T__ = 8
const __SIZEOF_WCHAR_T__ = 4
const __SIZEOF_WINT_T__ = 4
const __SIZE_FMTX__ = "lX"
const __SIZE_FMTo__ = "lo"
const __SIZE_FMTu__ = "lu"
const __SIZE_FMTx__ = "lx"
const __SIZE_MAX__ = 18446744073709551615
const __SIZE_T_MAX = 18446744073709551615
const __SIZE_WIDTH__ = 64
const __SLBF = 1
const __SMBF = 128
const __SMOD = 8192
const __SNBF = 2
const __SNPT = 2048
const __SOFF = 4096
const __SOPT = 1024
const __SQLITESESSION_H_ = 1
const __SRD = 4
const __SRW = 16
const __SSE2_MATH__ = 1
const __SSE2__ = 1
const __SSE_MATH__ = 1
const __SSE__ = 1
const __SSIZE_MAX = 9223372036854775807
const __SSTR = 512
const __STDC_HOSTED__ = 1
const __STDC_MB_MIGHT_NEQ_WC__ = 1
const __STDC_UTF_16__ = 1
const __STDC_UTF_32__ = 1
const __STDC_VERSION__ = 201710
const __STDC__ = 1
const __SWR = 8
const __UCHAR_MAX = 255
const __UINT16_FMTX__ = "hX"
const __UINT16_FMTo__ = "ho"
const __UINT16_FMTu__ = "hu"
const __UINT16_FMTx__ = "hx"
const __UINT16_MAX__ = 65535
const __UINT32_C_SUFFIX__ = 0
const __UINT32_FMTX__ = "X"
const __UINT32_FMTo__ = "o"
const __UINT32_FMTu__ = "u"
const __UINT32_FMTx__ = "x"
const __UINT32_MAX__ = 4294967295
const __UINT64_C_SUFFIX__ = 0
const __UINT64_FMTX__ = "lX"
const __UINT64_FMTo__ = "lo"
const __UINT64_FMTu__ = "lu"
const __UINT64_FMTx__ = "lx"
const __UINT64_MAX__ = 18446744073709551615
const __UINT8_FMTX__ = "hhX"
const __UINT8_FMTo__ = "hho"
const __UINT8_FMTu__ = "hhu"
const __UINT8_FMTx__ = "hhx"
const __UINT8_MAX__ = 255
const __UINTMAX_C_SUFFIX__ = 0
const __UINTMAX_FMTX__ = "lX"
const __UINTMAX_FMTo__ = "lo"
const __UINTMAX_FMTu__ = "lu"
const __UINTMAX_FMTx__ = "lx"
const __UINTMAX_MAX__ = 18446744073709551615
const __UINTMAX_WIDTH__ = 64
const __UINTPTR_FMTX__ = "lX"
const __UINTPTR_FMTo__ = "lo"
const __UINTPTR_FMTu__ = "lu"
const __UINTPTR_FMTx__ = "lx"
const __UINTPTR_MAX__ = 18446744073709551615
const __UINTPTR_WIDTH__ = 64
const __UINT_FAST16_FMTX__ = "hX"
const __UINT_FAST16_FMTo__ = "ho"
const __UINT_FAST16_FMTu__ = "hu"
const __UINT_FAST16_FMTx__ = "hx"
const __UINT_FAST16_MAX__ = 65535
const __UINT_FAST32_FMTX__ = "X"
const __UINT_FAST32_FMTo__ = "o"
const __UINT_FAST32_FMTu__ = "u"
const __UINT_FAST32_FMTx__ = "x"
const __UINT_FAST32_MAX__ = 4294967295
const __UINT_FAST64_FMTX__ = "lX"
const __UINT_FAST64_FMTo__ = "lo"
const __UINT_FAST64_FMTu__ = "lu"
const __UINT_FAST64_FMTx__ = "lx"
const __UINT_FAST64_MAX__ = 18446744073709551615
const __UINT_FAST8_FMTX__ = "hhX"
const __UINT_FAST8_FMTo__ = "hho"
const __UINT_FAST8_FMTu__ = "hhu"
const __UINT_FAST8_FMTx__ = "hhx"
const __UINT_FAST8_MAX__ = 255
const __UINT_LEAST16_FMTX__ = "hX"
const __UINT_LEAST16_FMTo__ = "ho"
const __UINT_LEAST16_FMTu__ = "hu"
const __UINT_LEAST16_FMTx__ = "hx"
const __UINT_LEAST16_MAX__ = 65535
const __UINT_LEAST32_FMTX__ = "X"
const __UINT_LEAST32_FMTo__ = "o"
const __UINT_LEAST32_FMTu__ = "u"
const __UINT_LEAST32_FMTx__ = "x"
const __UINT_LEAST32_MAX__ = 4294967295
const __UINT_LEAST64_FMTX__ = "lX"
const __UINT_LEAST64_FMTo__ = "lo"
const __UINT_LEAST64_FMTu__ = "lu"
const __UINT_LEAST64_FMTx__ = "lx"
const __UINT_LEAST64_MAX__ = 18446744073709551615
const __UINT_LEAST8_FMTX__ = "hhX"
const __UINT_LEAST8_FMTo__ = "hho"
const __UINT_LEAST8_FMTu__ = "hhu"
const __UINT_LEAST8_FMTx__ = "hhx"
const __UINT_LEAST8_MAX__ = 255
const __UINT_MAX = 4294967295
const __ULLONG_MAX = 18446744073709551615
const __ULONG_MAX = 18446744073709551615
const __UQUAD_MAX = 18446744073709551615
const __USHRT_MAX = 65535
const __VERSION__ = "FreeBSD Clang 16.0.6 (https://github.com/llvm/llvm-project.git llvmorg-16.0.6-0-g7cbf1a259152)"
const __WCHAR_MAX = 2147483647
const __WCHAR_MAX__ = 2147483647
const __WCHAR_MIN = -2147483648
const __WCHAR_TYPE__ = 0
const __WCHAR_WIDTH__ = 32
const __WINT_MAX__ = 2147483647
const __WINT_TYPE__ = 0
const __WINT_WIDTH__ = 32
const __WORD_BIT = 32
const __XSI_VISIBLE = 700
const __amd64 = 1
const __amd64__ = 1
const __clang__ = 1
const __clang_literal_encoding__ = "UTF-8"
const __clang_major__ = 16
const __clang_minor__ = 0
const __clang_patchlevel__ = 6
const __clang_version__ = "16.0.6 (https://github.com/llvm/llvm-project.git llvmorg-16.0.6-0-g7cbf1a259152)"
const __clang_wide_literal_encoding__ = "UTF-32"
const __code_model_small__ = 1
const __const = 0
const __has_extension = 0
const __isnan = 0
const __isnanf = 0
const __k8 = 1
const __k8__ = 1
const __llvm__ = 1
const __lockable = 0
const __no_lock_analysis = 0
const __restrict = 0
const __restrict_arr = 0
const __signed = 0
const __tune_k8__ = 1
const __unix = 1
const __unix__ = 1
const __volatile = 0
const __x86_64 = 1
const __x86_64__ = 1
const bBatch = 0
const cume_distFinalizeFunc = 0
const errno = 0
const etBUFSIZE = 70
const etCHARX = 8
const etDECIMAL = 16
const etDYNSTRING = 6
const etEXP = 2
const etFLOAT = 1
const etGENERIC = 3
const etINVALID = 17
const etORDINAL = 15
const etPERCENT = 7
const etPOINTER = 13
const etRADIX = 0
const etSIZE = 4
const etSQLESCAPE = 9
const etSQLESCAPE2 = 10
const etSQLESCAPE3 = 14
const etSRCITEM = 12
const etSTRING = 5
const etTOKEN = 11
const fdatasync = 0
const fds_bits = 0
const first_valueInvFunc = 0
const first_valueValueFunc = 0
const fts5GetVarint = 0
const fts5YYMALLOCARGTYPE = 0
const fts5YYNFTS5TOKEN = 16
const fts5YYNOCODE = 27
const fts5YYNOERRORRECOVERY = 1
const fts5YYNRULE = 28
const fts5YYNRULE_WITH_ACTION = 28
const fts5YYNSTATE = 35
const fts5YYPARSEFREENOTNULL = 1
const fts5YYSTACKDEPTH = 100
const fts5YY_ACCEPT_ACTION = 81
const fts5YY_ACTTAB_COUNT = 105
const fts5YY_ERROR_ACTION = 80
const fts5YY_MAX_REDUCE = 110
const fts5YY_MAX_SHIFT = 34
const fts5YY_MAX_SHIFTREDUCE = 79
const fts5YY_MIN_REDUCE = 83
const fts5YY_MIN_SHIFTREDUCE = 52
const fts5YY_NO_ACTION = 82
const fts5YY_REDUCE_COUNT = 17
const fts5YY_REDUCE_MAX = 67
const fts5YY_REDUCE_MIN = -17
const fts5YY_SHIFT_COUNT = 34
const fts5YY_SHIFT_MAX = 93
const fts5YY_SHIFT_MIN = 0
const get4byte = 0
const getVarint = 0
const math_errhandling = 2
const mem0 = 0
const noopFunc = 0
const nth_valueInvFunc = 0
const nth_valueValueFunc = 0
const ntileFinalizeFunc = 0
const pcache1 = 0
const percent_rankFinalizeFunc = 0
const pread64 = 0
const put4byte = 0
const putVarint = 0
const pwrite64 = 0
const sqlite3Fts5ParserARG_PARAM = 0
const sqlite3Fts5ParserARG_PDECL = 0
const sqlite3Fts5ParserFTS5TOKENTYPE = 0
const sqlite3GlobalConfig = 0
const sqlite3ParserCTX_PARAM = 0
const sqlite3ParserCTX_PDECL = 0
const sqlite3ParserTOKENTYPE = 0
const sqlite3Parser_ENGINEALWAYSONSTACK = 1
const sqlite3StrNICmp = 0
const st_atimespec = 0
const st_birthtimespec = 0
const st_ctimespec = 0
const st_mtimespec = 0
const static_assert = 0
const stderr = 0
const stdin = 0
const stdout = 0
const threadid = 0
const tkCREATE = 4
const tkEND = 7
const tkEXPLAIN = 3
const tkOTHER = 2
const tkSEMI = 0
const tkTEMP = 5
const tkTRIGGER = 6
const tkWS = 1
const unix = 1
const vfsList = 0
const wsdAutoext = 0
const wsdHooks = 0
const wsdPrng = 0
const wsdStat = 0
type t__builtin_va_list = uintptr
type t__predefined_size_t = uint64
type t__predefined_wchar_t = int32
type t__predefined_ptrdiff_t = int64
type t__int8_t = int8
type t__uint8_t = uint8
type t__int16_t = int16
type t__uint16_t = uint16
type t__int32_t = int32
type t__uint32_t = uint32
type t__int64_t = int64
type t__uint64_t = uint64
type t__int_least8_t = int8
type t__int_least16_t = int16
type t__int_least32_t = int32
type t__int_least64_t = int64
type t__intmax_t = int64
type t__uint_least8_t = uint8
type t__uint_least16_t = uint16
type t__uint_least32_t = uint32
type t__uint_least64_t = uint64
type t__uintmax_t = uint64
type t__intptr_t = int64
type t__intfptr_t = int64
type t__uintptr_t = uint64
type t__uintfptr_t = uint64
type t__vm_offset_t = uint64
type t__vm_size_t = uint64
type t__size_t = uint64
type t__ssize_t = int64
type t__ptrdiff_t = int64
type t__clock_t = int32
type t__critical_t = int64
type t__double_t = float64
type t__float_t = float32
type t__int_fast8_t = int32
type t__int_fast16_t = int32
type t__int_fast32_t = int32
type t__int_fast64_t = int64
type t__register_t = int64
type t__segsz_t = int64
type t__time_t = int64
type t__uint_fast8_t = uint32
type t__uint_fast16_t = uint32
type t__uint_fast32_t = uint32
type t__uint_fast64_t = uint64
type t__u_register_t = uint64
type t__vm_paddr_t = uint64
type T___wchar_t = int32
type ___wchar_t = T___wchar_t
type t__blksize_t = int32
type t__blkcnt_t = int64
type t__clockid_t = int32
type t__fflags_t = uint32
type t__fsblkcnt_t = uint64
type t__fsfilcnt_t = uint64
type t__gid_t = uint32
type t__id_t = int64
type t__ino_t = uint64
type t__key_t = int64
type t__lwpid_t = int32
type t__mode_t = uint16
type t__accmode_t = int32
type t__nl_item = int32
type t__nlink_t = uint64
type t__off_t = int64
type t__off64_t = int64
type t__pid_t = int32
type t__sbintime_t = int64
type t__rlim_t = int64
type t__sa_family_t = uint8
type t__socklen_t = uint32
type t__suseconds_t = int64
type t__timer_t = uintptr
type t__mqd_t = uintptr
type t__uid_t = uint32
type t__useconds_t = uint32
type t__cpuwhich_t = int32
type t__cpulevel_t = int32
type t__cpusetid_t = int32
type t__daddr_t = int64
type t__ct_rune_t = int32
type t__rune_t = int32
type t__wint_t = int32
type t__char16_t = uint16
type t__char32_t = uint32
type t__max_align_t = struct {
F__max_align1 int64
F__max_align2 float64
}
type t__dev_t = uint64
type t__fixpt_t = uint32
type t__mbstate_t = struct {
F_mbstateL [0]t__int64_t
F__mbstate8 [128]int8
}
type t__rman_res_t = uint64
type t__va_list = uintptr
type t__gnuc_va_list = uintptr
type Tva_list = uintptr
type va_list = Tva_list
// C documentation
//
// /*
// ** CAPI3REF: Database Connection Handle
// ** KEYWORDS: {database connection} {database connections}
// **
// ** Each open SQLite database is represented by a pointer to an instance of
// ** the opaque structure named "sqlite3". It is useful to think of an sqlite3
// ** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and
// ** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()]
// ** and [sqlite3_close_v2()] are its destructors. There are many other
// ** interfaces (such as
// ** [sqlite3_prepare_v2()], [sqlite3_create_function()], and
// ** [sqlite3_busy_timeout()] to name but three) that are methods on an
// ** sqlite3 object.
// */
type Tsqlite3 = struct {
FpVfs uintptr
FpVdbe uintptr
FpDfltColl uintptr
Fmutex uintptr
FaDb uintptr
FnDb int32
FmDbFlags Tu32
Fflags Tu64
FlastRowid Ti64
FszMmap Ti64
FnSchemaLock Tu32
FopenFlags uint32
FerrCode int32
FerrByteOffset int32
FerrMask int32
FiSysErrno int32
FdbOptFlags Tu32
Fenc Tu8
FautoCommit Tu8
Ftemp_store Tu8
FmallocFailed Tu8
FbBenignMalloc Tu8
FdfltLockMode Tu8
FnextAutovac int8
FsuppressErr Tu8
FvtabOnConflict Tu8
FisTransactionSavepoint Tu8
FmTrace Tu8
FnoSharedCache Tu8
FnSqlExec Tu8
FeOpenState Tu8
FnextPagesize int32
FnChange Ti64
FnTotalChange Ti64
FaLimit [12]int32
FnMaxSorterMmap int32
Finit1 Tsqlite3InitInfo
FnVdbeActive int32
FnVdbeRead int32
FnVdbeWrite int32
FnVdbeExec int32
FnVDestroy int32
FnExtension int32
FaExtension uintptr
Ftrace struct {
FxV2 [0]uintptr
FxLegacy uintptr
}
FpTraceArg uintptr
FxProfile uintptr
FpProfileArg uintptr
FpCommitArg uintptr
FxCommitCallback uintptr
FpRollbackArg uintptr
FxRollbackCallback uintptr
FpUpdateArg uintptr
FxUpdateCallback uintptr
FpAutovacPagesArg uintptr
FxAutovacDestr uintptr
FxAutovacPages uintptr
FpParse uintptr
FpPreUpdateArg uintptr
FxPreUpdateCallback uintptr
FpPreUpdate uintptr
FxWalCallback uintptr
FpWalArg uintptr
FxCollNeeded uintptr
FxCollNeeded16 uintptr
FpCollNeededArg uintptr
FpErr uintptr
Fu1 struct {
FnotUsed1 [0]float64
FisInterrupted int32
F__ccgo_pad2 [4]byte
}
Flookaside TLookaside
FxAuth Tsqlite3_xauth
FpAuthArg uintptr
FxProgress uintptr
FpProgressArg uintptr
FnProgressOps uint32
FnVTrans int32
FaModule THash
FpVtabCtx uintptr
FaVTrans uintptr
FpDisconnect uintptr
FaFunc THash
FaCollSeq THash
FbusyHandler TBusyHandler
FaDbStatic [2]TDb
FpSavepoint uintptr
FnAnalysisLimit int32
FbusyTimeout int32
FnSavepoint int32
FnStatement int32
FnDeferredCons Ti64
FnDeferredImmCons Ti64
FpnBytesFreed uintptr
FpDbData uintptr
FpBlockingConnection uintptr
FpUnlockConnection uintptr
FpUnlockArg uintptr
FxUnlockNotify uintptr
FpNextBlocked uintptr
}
type sqlite3 = Tsqlite3
// C documentation
//
// /*
// ** CAPI3REF: 64-Bit Integer Types
// ** KEYWORDS: sqlite_int64 sqlite_uint64
// **
// ** Because there is no cross-platform way to specify 64-bit integer types
// ** SQLite includes typedefs for 64-bit signed and unsigned integers.
// **
// ** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions.
// ** The sqlite_int64 and sqlite_uint64 types are supported for backwards
// ** compatibility only.
// **
// ** ^The sqlite3_int64 and sqlite_int64 types can store integer values
// ** between -9223372036854775808 and +9223372036854775807 inclusive. ^The
// ** sqlite3_uint64 and sqlite_uint64 types can store integer values
// ** between 0 and +18446744073709551615 inclusive.
// */
type Tsqlite_int64 = int64
type sqlite_int64 = Tsqlite_int64
type Tsqlite_uint64 = uint64
type sqlite_uint64 = Tsqlite_uint64
type Tsqlite3_int64 = int64
type sqlite3_int64 = Tsqlite3_int64
type Tsqlite3_uint64 = uint64
type sqlite3_uint64 = Tsqlite3_uint64
// C documentation
//
// /*
// ** The type for a callback function.
// ** This is legacy and deprecated. It is included for historical
// ** compatibility and is not documented.
// */
type Tsqlite3_callback = uintptr
type sqlite3_callback = Tsqlite3_callback
/*
** CAPI3REF: Result Codes
** KEYWORDS: {result code definitions}
**
** Many SQLite functions return an integer result code from the set shown
** here in order to indicate success or failure.
**
** New error codes may be added in future versions of SQLite.
**
** See also: [extended result code definitions]
*/
/* beginning-of-error-codes */
/* end-of-error-codes */
/*
** CAPI3REF: Extended Result Codes
** KEYWORDS: {extended result code definitions}
**
** In its default configuration, SQLite API routines return one of 30 integer
** [result codes]. However, experience has shown that many of
** these result codes are too coarse-grained. They do not provide as
** much information about problems as programmers might like. In an effort to
** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8]
** and later) include
** support for additional result codes that provide more detailed information
** about errors. These [extended result codes] are enabled or disabled
** on a per database connection basis using the
** [sqlite3_extended_result_codes()] API. Or, the extended code for
** the most recent error can be obtained using
** [sqlite3_extended_errcode()].
*/
/*
** CAPI3REF: Flags For File Open Operations
**
** These bit values are intended for use in the
** 3rd parameter to the [sqlite3_open_v2()] interface and
** in the 4th parameter to the [sqlite3_vfs.xOpen] method.
**
** Only those flags marked as "Ok for sqlite3_open_v2()" may be
** used as the third argument to the [sqlite3_open_v2()] interface.
** The other flags have historically been ignored by sqlite3_open_v2(),
** though future versions of SQLite might change so that an error is
** raised if any of the disallowed bits are passed into sqlite3_open_v2().
** Applications should not depend on the historical behavior.
**
** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into
** [sqlite3_open_v2()] does *not* cause the underlying database file
** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into
** [sqlite3_open_v2()] has historically be a no-op and might become an
** error in future versions of SQLite.
*/
/* Reserved: 0x00F00000 */
/* Legacy compatibility: */
/*
** CAPI3REF: Device Characteristics
**
** The xDeviceCharacteristics method of the [sqlite3_io_methods]
** object returns an integer which is a vector of these
** bit values expressing I/O characteristics of the mass storage
** device that holds the file that the [sqlite3_io_methods]
** refers to.
**
** The SQLITE_IOCAP_ATOMIC property means that all writes of
** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
** mean that writes of blocks that are nnn bytes in size and
** are aligned to an address which is an integer multiple of
** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
** that when data is appended to a file, the data is appended
** first then the size of the file is extended, never the other
** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
** information is written to disk in the same order as calls
** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that
** after reboot following a crash or power loss, the only bytes in a
** file that were written at the application level might have changed
** and that adjacent bytes, even bytes within the same sector are
** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
** flag indicates that a file cannot be deleted when open. The
** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
** read-only media and cannot be changed even by processes with
** elevated privileges.
**
** The SQLITE_IOCAP_BATCH_ATOMIC property means that the underlying
** filesystem supports doing multiple write operations atomically when those
** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and
** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE].
*/
/*
** CAPI3REF: File Locking Levels
**
** SQLite uses one of these integer values as the second
** argument to calls it makes to the xLock() and xUnlock() methods
** of an [sqlite3_io_methods] object. These values are ordered from
** lest restrictive to most restrictive.
**
** The argument to xLock() is always SHARED or higher. The argument to
** xUnlock is either SHARED or NONE.
*/
/*
** CAPI3REF: Synchronization Type Flags
**
** When SQLite invokes the xSync() method of an
** [sqlite3_io_methods] object it uses a combination of
** these integer values as the second argument.
**
** When the SQLITE_SYNC_DATAONLY flag is used, it means that the
** sync operation only needs to flush data to mass storage. Inode
** information need not be flushed. If the lower four bits of the flag
** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics.
** If the lower four bits equal SQLITE_SYNC_FULL, that means
** to use Mac OS X style fullsync instead of fsync().
**
** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags
** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL
** settings. The [synchronous pragma] determines when calls to the
** xSync VFS method occur and applies uniformly across all platforms.
** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how
** energetic or rigorous or forceful the sync operations are and
** only make a difference on Mac OSX for the default SQLite code.
** (Third-party VFS implementations might also make the distinction
** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the
** operating systems natively supported by SQLite, only Mac OSX
** cares about the difference.)
*/
// C documentation
//
// /*
// ** CAPI3REF: OS Interface Open File Handle
// **
// ** An [sqlite3_file] object represents an open file in the
// ** [sqlite3_vfs | OS interface layer]. Individual OS interface
// ** implementations will
// ** want to subclass this object by appending additional fields
// ** for their own use. The pMethods entry is a pointer to an
// ** [sqlite3_io_methods] object that defines methods for performing
// ** I/O operations on the open file.
// */
type Tsqlite3_file = struct {
FpMethods uintptr
}
type sqlite3_file = Tsqlite3_file
type Tsqlite3_file1 = struct {
FpMethods uintptr
}
type sqlite3_file1 = Tsqlite3_file1
// C documentation
//
// /*
// ** CAPI3REF: OS Interface File Virtual Methods Object
// **
// ** Every file opened by the [sqlite3_vfs.xOpen] method populates an
// ** [sqlite3_file] object (or, more commonly, a subclass of the
// ** [sqlite3_file] object) with a pointer to an instance of this object.
// ** This object defines the methods used to perform various operations
// ** against the open file represented by the [sqlite3_file] object.
// **
// ** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element
// ** to a non-NULL pointer, then the sqlite3_io_methods.xClose method
// ** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The
// ** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen]
// ** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element
// ** to NULL.
// **
// ** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or
// ** [SQLITE_SYNC_FULL]. The first choice is the normal fsync().
// ** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY]
// ** flag may be ORed in to indicate that only the data of the file
// ** and not its inode needs to be synced.
// **
// ** The integer values to xLock() and xUnlock() are one of
// **
// ** - [SQLITE_LOCK_NONE],
// **
- [SQLITE_LOCK_SHARED],
// **
- [SQLITE_LOCK_RESERVED],
// **
- [SQLITE_LOCK_PENDING], or
// **
- [SQLITE_LOCK_EXCLUSIVE].
// **
// ** xLock() upgrades the database file lock. In other words, xLock() moves the
// ** database file lock in the direction NONE toward EXCLUSIVE. The argument to
// ** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never
// ** SQLITE_LOCK_NONE. If the database file lock is already at or above the
// ** requested lock, then the call to xLock() is a no-op.
// ** xUnlock() downgrades the database file lock to either SHARED or NONE.
// * If the lock is already at or below the requested lock state, then the call
// ** to xUnlock() is a no-op.
// ** The xCheckReservedLock() method checks whether any database connection,
// ** either in this process or in some other process, is holding a RESERVED,
// ** PENDING, or EXCLUSIVE lock on the file. It returns true
// ** if such a lock exists and false otherwise.
// **
// ** The xFileControl() method is a generic interface that allows custom
// ** VFS implementations to directly control an open file using the
// ** [sqlite3_file_control()] interface. The second "op" argument is an
// ** integer opcode. The third argument is a generic pointer intended to
// ** point to a structure that may contain arguments or space in which to
// ** write return values. Potential uses for xFileControl() might be
// ** functions to enable blocking locks with timeouts, to change the
// ** locking strategy (for example to use dot-file locks), to inquire
// ** about the status of a lock, or to break stale locks. The SQLite
// ** core reserves all opcodes less than 100 for its own use.
// ** A [file control opcodes | list of opcodes] less than 100 is available.
// ** Applications that define a custom xFileControl method should use opcodes
// ** greater than 100 to avoid conflicts. VFS implementations should
// ** return [SQLITE_NOTFOUND] for file control opcodes that they do not
// ** recognize.
// **
// ** The xSectorSize() method returns the sector size of the
// ** device that underlies the file. The sector size is the
// ** minimum write that can be performed without disturbing
// ** other bytes in the file. The xDeviceCharacteristics()
// ** method returns a bit vector describing behaviors of the
// ** underlying device:
// **
// **
// ** - [SQLITE_IOCAP_ATOMIC]
// **
- [SQLITE_IOCAP_ATOMIC512]
// **
- [SQLITE_IOCAP_ATOMIC1K]
// **
- [SQLITE_IOCAP_ATOMIC2K]
// **
- [SQLITE_IOCAP_ATOMIC4K]
// **
- [SQLITE_IOCAP_ATOMIC8K]
// **
- [SQLITE_IOCAP_ATOMIC16K]
// **
- [SQLITE_IOCAP_ATOMIC32K]
// **
- [SQLITE_IOCAP_ATOMIC64K]
// **
- [SQLITE_IOCAP_SAFE_APPEND]
// **
- [SQLITE_IOCAP_SEQUENTIAL]
// **
- [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN]
// **
- [SQLITE_IOCAP_POWERSAFE_OVERWRITE]
// **
- [SQLITE_IOCAP_IMMUTABLE]
// **
- [SQLITE_IOCAP_BATCH_ATOMIC]
// **
// **
// ** The SQLITE_IOCAP_ATOMIC property means that all writes of
// ** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
// ** mean that writes of blocks that are nnn bytes in size and
// ** are aligned to an address which is an integer multiple of
// ** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
// ** that when data is appended to a file, the data is appended
// ** first then the size of the file is extended, never the other
// ** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
// ** information is written to disk in the same order as calls
// ** to xWrite().
// **
// ** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill
// ** in the unread portions of the buffer with zeros. A VFS that
// ** fails to zero-fill short reads might seem to work. However,
// ** failure to zero-fill short reads will eventually lead to
// ** database corruption.
// */
type Tsqlite3_io_methods = struct {
FiVersion int32
FxClose uintptr
FxRead uintptr
FxWrite uintptr
FxTruncate uintptr
FxSync uintptr
FxFileSize uintptr
FxLock uintptr
FxUnlock uintptr
FxCheckReservedLock uintptr
FxFileControl uintptr
FxSectorSize uintptr
FxDeviceCharacteristics uintptr
FxShmMap uintptr
FxShmLock uintptr
FxShmBarrier uintptr
FxShmUnmap uintptr
FxFetch uintptr
FxUnfetch uintptr
}
type sqlite3_io_methods = Tsqlite3_io_methods
type Tsqlite3_io_methods1 = struct {
FiVersion int32
FxClose uintptr
FxRead uintptr
FxWrite uintptr
FxTruncate uintptr
FxSync uintptr
FxFileSize uintptr
FxLock uintptr
FxUnlock uintptr
FxCheckReservedLock uintptr
FxFileControl uintptr
FxSectorSize uintptr
FxDeviceCharacteristics uintptr
FxShmMap uintptr
FxShmLock uintptr
FxShmBarrier uintptr
FxShmUnmap uintptr
FxFetch uintptr
FxUnfetch uintptr
}
type sqlite3_io_methods1 = Tsqlite3_io_methods1
// C documentation
//
// /*
// ** CAPI3REF: Loadable Extension Thunk
// **
// ** A pointer to the opaque sqlite3_api_routines structure is passed as
// ** the third parameter to entry points of [loadable extensions]. This
// ** structure must be typedefed in order to work around compiler warnings
// ** on some platforms.
// */
type Tsqlite3_api_routines = struct {
Faggregate_context uintptr
Faggregate_count uintptr
Fbind_blob uintptr
Fbind_double uintptr
Fbind_int uintptr
Fbind_int64 uintptr
Fbind_null uintptr
Fbind_parameter_count uintptr
Fbind_parameter_index uintptr
Fbind_parameter_name uintptr
Fbind_text uintptr
Fbind_text16 uintptr
Fbind_value uintptr
Fbusy_handler uintptr
Fbusy_timeout uintptr
Fchanges uintptr
Fclose1 uintptr
Fcollation_needed uintptr
Fcollation_needed16 uintptr
Fcolumn_blob uintptr
Fcolumn_bytes uintptr
Fcolumn_bytes16 uintptr
Fcolumn_count uintptr
Fcolumn_database_name uintptr
Fcolumn_database_name16 uintptr
Fcolumn_decltype uintptr
Fcolumn_decltype16 uintptr
Fcolumn_double uintptr
Fcolumn_int uintptr
Fcolumn_int64 uintptr
Fcolumn_name uintptr
Fcolumn_name16 uintptr
Fcolumn_origin_name uintptr
Fcolumn_origin_name16 uintptr
Fcolumn_table_name uintptr
Fcolumn_table_name16 uintptr
Fcolumn_text uintptr
Fcolumn_text16 uintptr
Fcolumn_type uintptr
Fcolumn_value uintptr
Fcommit_hook uintptr
Fcomplete uintptr
Fcomplete16 uintptr
Fcreate_collation uintptr
Fcreate_collation16 uintptr
Fcreate_function uintptr
Fcreate_function16 uintptr
Fcreate_module uintptr
Fdata_count uintptr
Fdb_handle uintptr
Fdeclare_vtab uintptr
Fenable_shared_cache uintptr
Ferrcode uintptr
Ferrmsg uintptr
Ferrmsg16 uintptr
Fexec uintptr
Fexpired uintptr
Ffinalize uintptr
Ffree uintptr
Ffree_table uintptr
Fget_autocommit uintptr
Fget_auxdata uintptr
Fget_table uintptr
Fglobal_recover uintptr
Finterruptx uintptr
Flast_insert_rowid uintptr
Flibversion uintptr
Flibversion_number uintptr
Fmalloc uintptr
Fmprintf uintptr
Fopen uintptr
Fopen16 uintptr
Fprepare uintptr
Fprepare16 uintptr
Fprofile uintptr
Fprogress_handler uintptr
Frealloc uintptr
Freset uintptr
Fresult_blob uintptr
Fresult_double uintptr
Fresult_error uintptr
Fresult_error16 uintptr
Fresult_int uintptr
Fresult_int64 uintptr
Fresult_null uintptr
Fresult_text uintptr
Fresult_text16 uintptr
Fresult_text16be uintptr
Fresult_text16le uintptr
Fresult_value uintptr
Frollback_hook uintptr
Fset_authorizer uintptr
Fset_auxdata uintptr
Fxsnprintf uintptr
Fstep uintptr
Ftable_column_metadata uintptr
Fthread_cleanup uintptr
Ftotal_changes uintptr
Ftrace uintptr
Ftransfer_bindings uintptr
Fupdate_hook uintptr
Fuser_data uintptr
Fvalue_blob uintptr
Fvalue_bytes uintptr
Fvalue_bytes16 uintptr
Fvalue_double uintptr
Fvalue_int uintptr
Fvalue_int64 uintptr
Fvalue_numeric_type uintptr
Fvalue_text uintptr
Fvalue_text16 uintptr
Fvalue_text16be uintptr
Fvalue_text16le uintptr
Fvalue_type uintptr
Fvmprintf uintptr
Foverload_function uintptr
Fprepare_v2 uintptr
Fprepare16_v2 uintptr
Fclear_bindings uintptr
Fcreate_module_v2 uintptr
Fbind_zeroblob uintptr
Fblob_bytes uintptr
Fblob_close uintptr
Fblob_open uintptr
Fblob_read uintptr
Fblob_write uintptr
Fcreate_collation_v2 uintptr
Ffile_control uintptr
Fmemory_highwater uintptr
Fmemory_used uintptr
Fmutex_alloc uintptr
Fmutex_enter uintptr
Fmutex_free uintptr
Fmutex_leave uintptr
Fmutex_try uintptr
Fopen_v2 uintptr
Frelease_memory uintptr
Fresult_error_nomem uintptr
Fresult_error_toobig uintptr
Fsleep uintptr
Fsoft_heap_limit uintptr
Fvfs_find uintptr
Fvfs_register uintptr
Fvfs_unregister uintptr
Fxthreadsafe uintptr
Fresult_zeroblob uintptr
Fresult_error_code uintptr
Ftest_control uintptr
Frandomness uintptr
Fcontext_db_handle uintptr
Fextended_result_codes uintptr
Flimit uintptr
Fnext_stmt uintptr
Fsql uintptr
Fstatus uintptr
Fbackup_finish uintptr
Fbackup_init uintptr
Fbackup_pagecount uintptr
Fbackup_remaining uintptr
Fbackup_step uintptr
Fcompileoption_get uintptr
Fcompileoption_used uintptr
Fcreate_function_v2 uintptr
Fdb_config uintptr
Fdb_mutex uintptr
Fdb_status uintptr
Fextended_errcode uintptr
Flog uintptr
Fsoft_heap_limit64 uintptr
Fsourceid uintptr
Fstmt_status uintptr
Fstrnicmp uintptr
Funlock_notify uintptr
Fwal_autocheckpoint uintptr
Fwal_checkpoint uintptr
Fwal_hook uintptr
Fblob_reopen uintptr
Fvtab_config uintptr
Fvtab_on_conflict uintptr
Fclose_v2 uintptr
Fdb_filename uintptr
Fdb_readonly uintptr
Fdb_release_memory uintptr
Ferrstr uintptr
Fstmt_busy uintptr
Fstmt_readonly uintptr
Fstricmp uintptr
Furi_boolean uintptr
Furi_int64 uintptr
Furi_parameter uintptr
Fxvsnprintf uintptr
Fwal_checkpoint_v2 uintptr
Fauto_extension uintptr
Fbind_blob64 uintptr
Fbind_text64 uintptr
Fcancel_auto_extension uintptr
Fload_extension uintptr
Fmalloc64 uintptr
Fmsize uintptr
Frealloc64 uintptr
Freset_auto_extension uintptr
Fresult_blob64 uintptr
Fresult_text64 uintptr
Fstrglob uintptr
Fvalue_dup uintptr
Fvalue_free uintptr
Fresult_zeroblob64 uintptr
Fbind_zeroblob64 uintptr
Fvalue_subtype uintptr
Fresult_subtype uintptr
Fstatus64 uintptr
Fstrlike uintptr
Fdb_cacheflush uintptr
Fsystem_errno uintptr
Ftrace_v2 uintptr
Fexpanded_sql uintptr
Fset_last_insert_rowid uintptr
Fprepare_v3 uintptr
Fprepare16_v3 uintptr
Fbind_pointer uintptr
Fresult_pointer uintptr
Fvalue_pointer uintptr
Fvtab_nochange uintptr
Fvalue_nochange uintptr
Fvtab_collation uintptr
Fkeyword_count uintptr
Fkeyword_name uintptr
Fkeyword_check uintptr
Fstr_new uintptr
Fstr_finish uintptr
Fstr_appendf uintptr
Fstr_vappendf uintptr
Fstr_append uintptr
Fstr_appendall uintptr
Fstr_appendchar uintptr
Fstr_reset uintptr
Fstr_errcode uintptr
Fstr_length uintptr
Fstr_value uintptr
Fcreate_window_function uintptr
Fnormalized_sql uintptr
Fstmt_isexplain uintptr
Fvalue_frombind uintptr
Fdrop_modules uintptr
Fhard_heap_limit64 uintptr
Furi_key uintptr
Ffilename_database uintptr
Ffilename_journal uintptr
Ffilename_wal uintptr
Fcreate_filename uintptr
Ffree_filename uintptr
Fdatabase_file_object uintptr
Ftxn_state uintptr
Fchanges64 uintptr
Ftotal_changes64 uintptr
Fautovacuum_pages uintptr
Ferror_offset uintptr
Fvtab_rhs_value uintptr
Fvtab_distinct uintptr
Fvtab_in uintptr
Fvtab_in_first uintptr
Fvtab_in_next uintptr
Fdeserialize uintptr
Fserialize uintptr
Fdb_name uintptr
Fvalue_encoding uintptr
Fis_interrupted uintptr
Fstmt_explain uintptr
Fget_clientdata uintptr
Fset_clientdata uintptr
}
type sqlite3_api_routines = Tsqlite3_api_routines
// C documentation
//
// /*
// ** CAPI3REF: File Name
// **
// ** Type [sqlite3_filename] is used by SQLite to pass filenames to the
// ** xOpen method of a [VFS]. It may be cast to (const char*) and treated
// ** as a normal, nul-terminated, UTF-8 buffer containing the filename, but
// ** may also be passed to special APIs such as:
// **
// **
// ** - sqlite3_filename_database()
// **
- sqlite3_filename_journal()
// **
- sqlite3_filename_wal()
// **
- sqlite3_uri_parameter()
// **
- sqlite3_uri_boolean()
// **
- sqlite3_uri_int64()
// **
- sqlite3_uri_key()
// **
// */
type Tsqlite3_filename = uintptr
type sqlite3_filename = Tsqlite3_filename
// C documentation
//
// /*
// ** CAPI3REF: OS Interface Object
// **
// ** An instance of the sqlite3_vfs object defines the interface between
// ** the SQLite core and the underlying operating system. The "vfs"
// ** in the name of the object stands for "virtual file system". See
// ** the [VFS | VFS documentation] for further information.
// **
// ** The VFS interface is sometimes extended by adding new methods onto
// ** the end. Each time such an extension occurs, the iVersion field
// ** is incremented. The iVersion value started out as 1 in
// ** SQLite [version 3.5.0] on [dateof:3.5.0], then increased to 2
// ** with SQLite [version 3.7.0] on [dateof:3.7.0], and then increased
// ** to 3 with SQLite [version 3.7.6] on [dateof:3.7.6]. Additional fields
// ** may be appended to the sqlite3_vfs object and the iVersion value
// ** may increase again in future versions of SQLite.
// ** Note that due to an oversight, the structure
// ** of the sqlite3_vfs object changed in the transition from
// ** SQLite [version 3.5.9] to [version 3.6.0] on [dateof:3.6.0]
// ** and yet the iVersion field was not increased.
// **
// ** The szOsFile field is the size of the subclassed [sqlite3_file]
// ** structure used by this VFS. mxPathname is the maximum length of
// ** a pathname in this VFS.
// **
// ** Registered sqlite3_vfs objects are kept on a linked list formed by
// ** the pNext pointer. The [sqlite3_vfs_register()]
// ** and [sqlite3_vfs_unregister()] interfaces manage this list
// ** in a thread-safe way. The [sqlite3_vfs_find()] interface
// ** searches the list. Neither the application code nor the VFS
// ** implementation should use the pNext pointer.
// **
// ** The pNext field is the only field in the sqlite3_vfs
// ** structure that SQLite will ever modify. SQLite will only access
// ** or modify this field while holding a particular static mutex.
// ** The application should never modify anything within the sqlite3_vfs
// ** object once the object has been registered.
// **
// ** The zName field holds the name of the VFS module. The name must
// ** be unique across all VFS modules.
// **
// ** [[sqlite3_vfs.xOpen]]
// ** ^SQLite guarantees that the zFilename parameter to xOpen
// ** is either a NULL pointer or string obtained
// ** from xFullPathname() with an optional suffix added.
// ** ^If a suffix is added to the zFilename parameter, it will
// ** consist of a single "-" character followed by no more than
// ** 11 alphanumeric and/or "-" characters.
// ** ^SQLite further guarantees that
// ** the string will be valid and unchanged until xClose() is
// ** called. Because of the previous sentence,
// ** the [sqlite3_file] can safely store a pointer to the
// ** filename if it needs to remember the filename for some reason.
// ** If the zFilename parameter to xOpen is a NULL pointer then xOpen
// ** must invent its own temporary name for the file. ^Whenever the
// ** xFilename parameter is NULL it will also be the case that the
// ** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE].
// **
// ** The flags argument to xOpen() includes all bits set in
// ** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()]
// ** or [sqlite3_open16()] is used, then flags includes at least
// ** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE].
// ** If xOpen() opens a file read-only then it sets *pOutFlags to
// ** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set.
// **
// ** ^(SQLite will also add one of the following flags to the xOpen()
// ** call, depending on the object being opened:
// **
// **
// ** - [SQLITE_OPEN_MAIN_DB]
// **
- [SQLITE_OPEN_MAIN_JOURNAL]
// **
- [SQLITE_OPEN_TEMP_DB]
// **
- [SQLITE_OPEN_TEMP_JOURNAL]
// **
- [SQLITE_OPEN_TRANSIENT_DB]
// **
- [SQLITE_OPEN_SUBJOURNAL]
// **
- [SQLITE_OPEN_SUPER_JOURNAL]
// **
- [SQLITE_OPEN_WAL]
// **
)^
// **
// ** The file I/O implementation can use the object type flags to
// ** change the way it deals with files. For example, an application
// ** that does not care about crash recovery or rollback might make
// ** the open of a journal file a no-op. Writes to this journal would
// ** also be no-ops, and any attempt to read the journal would return
// ** SQLITE_IOERR. Or the implementation might recognize that a database
// ** file will be doing page-aligned sector reads and writes in a random
// ** order and set up its I/O subsystem accordingly.
// **
// ** SQLite might also add one of the following flags to the xOpen method:
// **
// **
// ** - [SQLITE_OPEN_DELETEONCLOSE]
// **
- [SQLITE_OPEN_EXCLUSIVE]
// **
// **
// ** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be
// ** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE]
// ** will be set for TEMP databases and their journals, transient
// ** databases, and subjournals.
// **
// ** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction
// ** with the [SQLITE_OPEN_CREATE] flag, which are both directly
// ** analogous to the O_EXCL and O_CREAT flags of the POSIX open()
// ** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the
// ** SQLITE_OPEN_CREATE, is used to indicate that file should always
// ** be created, and that it is an error if it already exists.
// ** It is not used to indicate the file should be opened
// ** for exclusive access.
// **
// ** ^At least szOsFile bytes of memory are allocated by SQLite
// ** to hold the [sqlite3_file] structure passed as the third
// ** argument to xOpen. The xOpen method does not have to
// ** allocate the structure; it should just fill it in. Note that
// ** the xOpen method must set the sqlite3_file.pMethods to either
// ** a valid [sqlite3_io_methods] object or to NULL. xOpen must do
// ** this even if the open fails. SQLite expects that the sqlite3_file.pMethods
// ** element will be valid after xOpen returns regardless of the success
// ** or failure of the xOpen call.
// **
// ** [[sqlite3_vfs.xAccess]]
// ** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
// ** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
// ** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
// ** to test whether a file is at least readable. The SQLITE_ACCESS_READ
// ** flag is never actually used and is not implemented in the built-in
// ** VFSes of SQLite. The file is named by the second argument and can be a
// ** directory. The xAccess method returns [SQLITE_OK] on success or some
// ** non-zero error code if there is an I/O error or if the name of
// ** the file given in the second argument is illegal. If SQLITE_OK
// ** is returned, then non-zero or zero is written into *pResOut to indicate
// ** whether or not the file is accessible.
// **
// ** ^SQLite will always allocate at least mxPathname+1 bytes for the
// ** output buffer xFullPathname. The exact size of the output buffer
// ** is also passed as a parameter to both methods. If the output buffer
// ** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is
// ** handled as a fatal error by SQLite, vfs implementations should endeavor
// ** to prevent this by setting mxPathname to a sufficiently large value.
// **
// ** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64()
// ** interfaces are not strictly a part of the filesystem, but they are
// ** included in the VFS structure for completeness.
// ** The xRandomness() function attempts to return nBytes bytes
// ** of good-quality randomness into zOut. The return value is
// ** the actual number of bytes of randomness obtained.
// ** The xSleep() method causes the calling thread to sleep for at
// ** least the number of microseconds given. ^The xCurrentTime()
// ** method returns a Julian Day Number for the current date and time as
// ** a floating point value.
// ** ^The xCurrentTimeInt64() method returns, as an integer, the Julian
// ** Day Number multiplied by 86400000 (the number of milliseconds in
// ** a 24-hour day).
// ** ^SQLite will use the xCurrentTimeInt64() method to get the current
// ** date and time if that method is available (if iVersion is 2 or
// ** greater and the function pointer is not NULL) and will fall back
// ** to xCurrentTime() if xCurrentTimeInt64() is unavailable.
// **
// ** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces
// ** are not used by the SQLite core. These optional interfaces are provided
// ** by some VFSes to facilitate testing of the VFS code. By overriding
// ** system calls with functions under its control, a test program can
// ** simulate faults and error conditions that would otherwise be difficult
// ** or impossible to induce. The set of system calls that can be overridden
// ** varies from one VFS to another, and from one version of the same VFS to the
// ** next. Applications that use these interfaces must be prepared for any
// ** or all of these interfaces to be NULL or for their behavior to change
// ** from one release to the next. Applications must not attempt to access
// ** any of these methods if the iVersion of the VFS is less than 3.
// */
type Tsqlite3_vfs = struct {
FiVersion int32
FszOsFile int32
FmxPathname int32
FpNext uintptr
FzName uintptr
FpAppData uintptr
FxOpen uintptr
FxDelete uintptr
FxAccess uintptr
FxFullPathname uintptr
FxDlOpen uintptr
FxDlError uintptr
FxDlSym uintptr
FxDlClose uintptr
FxRandomness uintptr
FxSleep uintptr
FxCurrentTime uintptr
FxGetLastError uintptr
FxCurrentTimeInt64 uintptr
FxSetSystemCall uintptr
FxGetSystemCall uintptr
FxNextSystemCall uintptr
}
type sqlite3_vfs = Tsqlite3_vfs
type Tsqlite3_syscall_ptr = uintptr
type sqlite3_syscall_ptr = Tsqlite3_syscall_ptr
type Tsqlite3_vfs1 = struct {
FiVersion int32
FszOsFile int32
FmxPathname int32
FpNext uintptr
FzName uintptr
FpAppData uintptr
FxOpen uintptr
FxDelete uintptr
FxAccess uintptr
FxFullPathname uintptr
FxDlOpen uintptr
FxDlError uintptr
FxDlSym uintptr
FxDlClose uintptr
FxRandomness uintptr
FxSleep uintptr
FxCurrentTime uintptr
FxGetLastError uintptr
FxCurrentTimeInt64 uintptr
FxSetSystemCall uintptr
FxGetSystemCall uintptr
FxNextSystemCall uintptr
}
type sqlite3_vfs1 = Tsqlite3_vfs1
// C documentation
//
// /*
// ** CAPI3REF: Memory Allocation Routines
// **
// ** An instance of this object defines the interface between SQLite
// ** and low-level memory allocation routines.
// **
// ** This object is used in only one place in the SQLite interface.
// ** A pointer to an instance of this object is the argument to
// ** [sqlite3_config()] when the configuration option is
// ** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC].
// ** By creating an instance of this object
// ** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC])
// ** during configuration, an application can specify an alternative
// ** memory allocation subsystem for SQLite to use for all of its
// ** dynamic memory needs.
// **
// ** Note that SQLite comes with several [built-in memory allocators]
// ** that are perfectly adequate for the overwhelming majority of applications
// ** and that this object is only useful to a tiny minority of applications
// ** with specialized memory allocation requirements. This object is
// ** also used during testing of SQLite in order to specify an alternative
// ** memory allocator that simulates memory out-of-memory conditions in
// ** order to verify that SQLite recovers gracefully from such
// ** conditions.
// **
// ** The xMalloc, xRealloc, and xFree methods must work like the
// ** malloc(), realloc() and free() functions from the standard C library.
// ** ^SQLite guarantees that the second argument to
// ** xRealloc is always a value returned by a prior call to xRoundup.
// **
// ** xSize should return the allocated size of a memory allocation
// ** previously obtained from xMalloc or xRealloc. The allocated size
// ** is always at least as big as the requested size but may be larger.
// **
// ** The xRoundup method returns what would be the allocated size of
// ** a memory allocation given a particular requested size. Most memory
// ** allocators round up memory allocations at least to the next multiple
// ** of 8. Some allocators round up to a larger multiple or to a power of 2.
// ** Every memory allocation request coming in through [sqlite3_malloc()]
// ** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0,
// ** that causes the corresponding memory allocation to fail.
// **
// ** The xInit method initializes the memory allocator. For example,
// ** it might allocate any required mutexes or initialize internal data
// ** structures. The xShutdown method is invoked (indirectly) by
// ** [sqlite3_shutdown()] and should deallocate any resources acquired
// ** by xInit. The pAppData pointer is used as the only parameter to
// ** xInit and xShutdown.
// **
// ** SQLite holds the [SQLITE_MUTEX_STATIC_MAIN] mutex when it invokes
// ** the xInit method, so the xInit method need not be threadsafe. The
// ** xShutdown method is only called from [sqlite3_shutdown()] so it does
// ** not need to be threadsafe either. For all other methods, SQLite
// ** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the
// ** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which
// ** it is by default) and so the methods are automatically serialized.
// ** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other
// ** methods must be threadsafe or else make their own arrangements for
// ** serialization.
// **
// ** SQLite will never invoke xInit() more than once without an intervening
// ** call to xShutdown().
// */
type Tsqlite3_mem_methods = struct {
FxMalloc uintptr
FxFree uintptr
FxRealloc uintptr
FxSize uintptr
FxRoundup uintptr
FxInit uintptr
FxShutdown uintptr
FpAppData uintptr
}
type sqlite3_mem_methods = Tsqlite3_mem_methods
type Tsqlite3_mem_methods1 = struct {
FxMalloc uintptr
FxFree uintptr
FxRealloc uintptr
FxSize uintptr
FxRoundup uintptr
FxInit uintptr
FxShutdown uintptr
FpAppData uintptr
}
type sqlite3_mem_methods1 = Tsqlite3_mem_methods1
// C documentation
//
// /*
// ** CAPI3REF: Dynamically Typed Value Object
// ** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value}
// **
// ** SQLite uses the sqlite3_value object to represent all values
// ** that can be stored in a database table. SQLite uses dynamic typing
// ** for the values it stores. ^Values stored in sqlite3_value objects
// ** can be integers, floating point values, strings, BLOBs, or NULL.
// **
// ** An sqlite3_value object may be either "protected" or "unprotected".
// ** Some interfaces require a protected sqlite3_value. Other interfaces
// ** will accept either a protected or an unprotected sqlite3_value.
// ** Every interface that accepts sqlite3_value arguments specifies
// ** whether or not it requires a protected sqlite3_value. The
// ** [sqlite3_value_dup()] interface can be used to construct a new
// ** protected sqlite3_value from an unprotected sqlite3_value.
// **
// ** The terms "protected" and "unprotected" refer to whether or not
// ** a mutex is held. An internal mutex is held for a protected
// ** sqlite3_value object but no mutex is held for an unprotected
// ** sqlite3_value object. If SQLite is compiled to be single-threaded
// ** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0)
// ** or if SQLite is run in one of reduced mutex modes
// ** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD]
// ** then there is no distinction between protected and unprotected
// ** sqlite3_value objects and they can be used interchangeably. However,
// ** for maximum code portability it is recommended that applications
// ** still make the distinction between protected and unprotected
// ** sqlite3_value objects even when not strictly required.
// **
// ** ^The sqlite3_value objects that are passed as parameters into the
// ** implementation of [application-defined SQL functions] are protected.
// ** ^The sqlite3_value objects returned by [sqlite3_vtab_rhs_value()]
// ** are protected.
// ** ^The sqlite3_value object returned by
// ** [sqlite3_column_value()] is unprotected.
// ** Unprotected sqlite3_value objects may only be used as arguments
// ** to [sqlite3_result_value()], [sqlite3_bind_value()], and
// ** [sqlite3_value_dup()].
// ** The [sqlite3_value_blob | sqlite3_value_type()] family of
// ** interfaces require protected sqlite3_value objects.
// */
type Tsqlite3_value = struct {
Fu TMemValue
Fz uintptr
Fn int32
Fflags Tu16
Fenc Tu8
FeSubtype Tu8
Fdb uintptr
FszMalloc int32
FuTemp Tu32
FzMalloc uintptr
FxDel uintptr
}
type sqlite3_value = Tsqlite3_value
// C documentation
//
// /*
// ** CAPI3REF: SQL Function Context Object
// **
// ** The context in which an SQL function executes is stored in an
// ** sqlite3_context object. ^A pointer to an sqlite3_context object
// ** is always first parameter to [application-defined SQL functions].
// ** The application-defined SQL function implementation will pass this
// ** pointer through into calls to [sqlite3_result_int | sqlite3_result()],
// ** [sqlite3_aggregate_context()], [sqlite3_user_data()],
// ** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()],
// ** and/or [sqlite3_set_auxdata()].
// */
type Tsqlite3_context = struct {
FpOut uintptr
FpFunc uintptr
FpMem uintptr
FpVdbe uintptr
FiOp int32
FisError int32
Fenc Tu8
FskipFlag Tu8
Fargc Tu8
Fargv [1]uintptr
}
type sqlite3_context = Tsqlite3_context
// C documentation
//
// /*
// ** CAPI3REF: Constants Defining Special Destructor Behavior
// **
// ** These are special values for the destructor that is passed in as the
// ** final argument to routines like [sqlite3_result_blob()]. ^If the destructor
// ** argument is SQLITE_STATIC, it means that the content pointer is constant
// ** and will never change. It does not need to be destroyed. ^The
// ** SQLITE_TRANSIENT value means that the content will likely change in
// ** the near future and that SQLite should make its own private copy of
// ** the content before returning.
// **
// ** The typedef is necessary to work around problems in certain
// ** C++ compilers.
// */
type Tsqlite3_destructor_type = uintptr
type sqlite3_destructor_type = Tsqlite3_destructor_type
// C documentation
//
// /*
// ** Structures used by the virtual table interface
// */
type Tsqlite3_vtab = struct {
FpModule uintptr
FnRef int32
FzErrMsg uintptr
}
type sqlite3_vtab = Tsqlite3_vtab
type Tsqlite3_index_info = struct {
FnConstraint int32
FaConstraint uintptr
FnOrderBy int32
FaOrderBy uintptr
FaConstraintUsage uintptr
FidxNum int32
FidxStr uintptr
FneedToFreeIdxStr int32
ForderByConsumed int32
FestimatedCost float64
FestimatedRows Tsqlite3_int64
FidxFlags int32
FcolUsed Tsqlite3_uint64
}
type sqlite3_index_info = Tsqlite3_index_info
type Tsqlite3_vtab_cursor = struct {
FpVtab uintptr
}
type sqlite3_vtab_cursor = Tsqlite3_vtab_cursor
type Tsqlite3_module = struct {
FiVersion int32
FxCreate uintptr
FxConnect uintptr
FxBestIndex uintptr
FxDisconnect uintptr
FxDestroy uintptr
FxOpen uintptr
FxClose uintptr
FxFilter uintptr
FxNext uintptr
FxEof uintptr
FxColumn uintptr
FxRowid uintptr
FxUpdate uintptr
FxBegin uintptr
FxSync uintptr
FxCommit uintptr
FxRollback uintptr
FxFindFunction uintptr
FxRename uintptr
FxSavepoint uintptr
FxRelease uintptr
FxRollbackTo uintptr
FxShadowName uintptr
FxIntegrity uintptr
}
type sqlite3_module = Tsqlite3_module
/*
** CAPI3REF: Virtual Table Object
** KEYWORDS: sqlite3_module {virtual table module}
**
** This structure, sometimes called a "virtual table module",
** defines the implementation of a [virtual table].
** This structure consists mostly of methods for the module.
**
** ^A virtual table module is created by filling in a persistent
** instance of this structure and passing a pointer to that instance
** to [sqlite3_create_module()] or [sqlite3_create_module_v2()].
** ^The registration remains valid until it is replaced by a different
** module or until the [database connection] closes. The content
** of this structure must not change while it is registered with
** any database connection.
*/
type Tsqlite3_module1 = struct {
FiVersion int32
FxCreate uintptr
FxConnect uintptr
FxBestIndex uintptr
FxDisconnect uintptr
FxDestroy uintptr
FxOpen uintptr
FxClose uintptr
FxFilter uintptr
FxNext uintptr
FxEof uintptr
FxColumn uintptr
FxRowid uintptr
FxUpdate uintptr
FxBegin uintptr
FxSync uintptr
FxCommit uintptr
FxRollback uintptr
FxFindFunction uintptr
FxRename uintptr
FxSavepoint uintptr
FxRelease uintptr
FxRollbackTo uintptr
FxShadowName uintptr
FxIntegrity uintptr
}
type sqlite3_module1 = Tsqlite3_module1
/*
** CAPI3REF: Virtual Table Indexing Information
** KEYWORDS: sqlite3_index_info
**
** The sqlite3_index_info structure and its substructures is used as part
** of the [virtual table] interface to
** pass information into and receive the reply from the [xBestIndex]
** method of a [virtual table module]. The fields under **Inputs** are the
** inputs to xBestIndex and are read-only. xBestIndex inserts its
** results into the **Outputs** fields.
**
** ^(The aConstraint[] array records WHERE clause constraints of the form:
**
** column OP expr
**
** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is
** stored in aConstraint[].op using one of the
** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^
** ^(The index of the column is stored in
** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the
** expr on the right-hand side can be evaluated (and thus the constraint
** is usable) and false if it cannot.)^
**
** ^The optimizer automatically inverts terms of the form "expr OP column"
** and makes other simplifications to the WHERE clause in an attempt to
** get as many WHERE clause terms into the form shown above as possible.
** ^The aConstraint[] array only reports WHERE clause terms that are
** relevant to the particular virtual table being queried.
**
** ^Information about the ORDER BY clause is stored in aOrderBy[].
** ^Each term of aOrderBy records a column of the ORDER BY clause.
**
** The colUsed field indicates which columns of the virtual table may be
** required by the current scan. Virtual table columns are numbered from
** zero in the order in which they appear within the CREATE TABLE statement
** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62),
** the corresponding bit is set within the colUsed mask if the column may be
** required by SQLite. If the table has at least 64 columns and any column
** to the right of the first 63 is required, then bit 63 of colUsed is also
** set. In other words, column iCol may be required if the expression
** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to
** non-zero.
**
** The [xBestIndex] method must fill aConstraintUsage[] with information
** about what parameters to pass to xFilter. ^If argvIndex>0 then
** the right-hand side of the corresponding aConstraint[] is evaluated
** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit
** is true, then the constraint is assumed to be fully handled by the
** virtual table and might not be checked again by the byte code.)^ ^(The
** aConstraintUsage[].omit flag is an optimization hint. When the omit flag
** is left in its default setting of false, the constraint will always be
** checked separately in byte code. If the omit flag is change to true, then
** the constraint may or may not be checked in byte code. In other words,
** when the omit flag is true there is no guarantee that the constraint will
** not be checked again using byte code.)^
**
** ^The idxNum and idxStr values are recorded and passed into the
** [xFilter] method.
** ^[sqlite3_free()] is used to free idxStr if and only if
** needToFreeIdxStr is true.
**
** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
** the correct order to satisfy the ORDER BY clause so that no separate
** sorting step is required.
**
** ^The estimatedCost value is an estimate of the cost of a particular
** strategy. A cost of N indicates that the cost of the strategy is similar
** to a linear scan of an SQLite table with N rows. A cost of log(N)
** indicates that the expense of the operation is similar to that of a
** binary search on a unique indexed field of an SQLite table with N rows.
**
** ^The estimatedRows value is an estimate of the number of rows that
** will be returned by the strategy.
**
** The xBestIndex method may optionally populate the idxFlags field with a
** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
** assumes that the strategy may visit at most one row.
**
** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
** SQLite also assumes that if a call to the xUpdate() method is made as
** part of the same statement to delete or update a virtual table row and the
** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
** any database changes. In other words, if the xUpdate() returns
** SQLITE_CONSTRAINT, the database contents must be exactly as they were
** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
** the xUpdate method are automatically rolled back by SQLite.
**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
** structure for SQLite [version 3.8.2] ([dateof:3.8.2]).
** If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting
** to read or write the estimatedRows field are undefined (but are likely
** to include crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
** value greater than or equal to 3008002. Similarly, the idxFlags field
** was added for [version 3.9.0] ([dateof:3.9.0]).
** It may therefore only be used if
** sqlite3_libversion_number() returns a value greater than or equal to
** 3009000.
*/
type Tsqlite3_index_info1 = struct {
FnConstraint int32
FaConstraint uintptr
FnOrderBy int32
FaOrderBy uintptr
FaConstraintUsage uintptr
FidxNum int32
FidxStr uintptr
FneedToFreeIdxStr int32
ForderByConsumed int32
FestimatedCost float64
FestimatedRows Tsqlite3_int64
FidxFlags int32
FcolUsed Tsqlite3_uint64
}
type sqlite3_index_info1 = Tsqlite3_index_info1
/*
** CAPI3REF: Virtual Table Instance Object
** KEYWORDS: sqlite3_vtab
**
** Every [virtual table module] implementation uses a subclass
** of this object to describe a particular instance
** of the [virtual table]. Each subclass will
** be tailored to the specific needs of the module implementation.
** The purpose of this superclass is to define certain fields that are
** common to all module implementations.
**
** ^Virtual tables methods can set an error message by assigning a
** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should
** take care that any prior string is freed by a call to [sqlite3_free()]
** prior to assigning a new string to zErrMsg. ^After the error message
** is delivered up to the client application, the string will be automatically
** freed by sqlite3_free() and the zErrMsg field will be zeroed.
*/
type Tsqlite3_vtab1 = struct {
FpModule uintptr
FnRef int32
FzErrMsg uintptr
}
type sqlite3_vtab1 = Tsqlite3_vtab1
/*
** CAPI3REF: Virtual Table Cursor Object
** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor}
**
** Every [virtual table module] implementation uses a subclass of the
** following structure to describe cursors that point into the
** [virtual table] and are used
** to loop through the virtual table. Cursors are created using the
** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed
** by the [sqlite3_module.xClose | xClose] method. Cursors are used
** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods
** of the module. Each module implementation will define
** the content of a cursor structure to suit its own needs.
**
** This superclass exists in order to define fields of the cursor that
** are common to all implementations.
*/
type Tsqlite3_vtab_cursor1 = struct {
FpVtab uintptr
}
type sqlite3_vtab_cursor1 = Tsqlite3_vtab_cursor1
// C documentation
//
// /*
// ** CAPI3REF: Mutex Methods Object
// **
// ** An instance of this structure defines the low-level routines
// ** used to allocate and use mutexes.
// **
// ** Usually, the default mutex implementations provided by SQLite are
// ** sufficient, however the application has the option of substituting a custom
// ** implementation for specialized deployments or systems for which SQLite
// ** does not provide a suitable implementation. In this case, the application
// ** creates and populates an instance of this structure to pass
// ** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
// ** Additionally, an instance of this structure can be used as an
// ** output variable when querying the system for the current mutex
// ** implementation, using the [SQLITE_CONFIG_GETMUTEX] option.
// **
// ** ^The xMutexInit method defined by this structure is invoked as
// ** part of system initialization by the sqlite3_initialize() function.
// ** ^The xMutexInit routine is called by SQLite exactly once for each
// ** effective call to [sqlite3_initialize()].
// **
// ** ^The xMutexEnd method defined by this structure is invoked as
// ** part of system shutdown by the sqlite3_shutdown() function. The
// ** implementation of this method is expected to release all outstanding
// ** resources obtained by the mutex methods implementation, especially
// ** those obtained by the xMutexInit method. ^The xMutexEnd()
// ** interface is invoked exactly once for each call to [sqlite3_shutdown()].
// **
// ** ^(The remaining seven methods defined by this structure (xMutexAlloc,
// ** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and
// ** xMutexNotheld) implement the following interfaces (respectively):
// **
// **
// ** - [sqlite3_mutex_alloc()]
// ** - [sqlite3_mutex_free()]
// ** - [sqlite3_mutex_enter()]
// ** - [sqlite3_mutex_try()]
// ** - [sqlite3_mutex_leave()]
// ** - [sqlite3_mutex_held()]
// ** - [sqlite3_mutex_notheld()]
// **
)^
// **
// ** The only difference is that the public sqlite3_XXX functions enumerated
// ** above silently ignore any invocations that pass a NULL pointer instead
// ** of a valid mutex handle. The implementations of the methods defined
// ** by this structure are not required to handle this case. The results
// ** of passing a NULL pointer instead of a valid mutex handle are undefined
// ** (i.e. it is acceptable to provide an implementation that segfaults if
// ** it is passed a NULL pointer).
// **
// ** The xMutexInit() method must be threadsafe. It must be harmless to
// ** invoke xMutexInit() multiple times within the same process and without
// ** intervening calls to xMutexEnd(). Second and subsequent calls to
// ** xMutexInit() must be no-ops.
// **
// ** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
// ** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
// ** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
// ** memory allocation for a fast or recursive mutex.
// **
// ** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is
// ** called, but only if the prior call to xMutexInit returned SQLITE_OK.
// ** If xMutexInit fails in any way, it is expected to clean up after itself
// ** prior to returning.
// */
type Tsqlite3_mutex_methods = struct {
FxMutexInit uintptr
FxMutexEnd uintptr
FxMutexAlloc uintptr
FxMutexFree uintptr
FxMutexEnter uintptr
FxMutexTry uintptr
FxMutexLeave uintptr
FxMutexHeld uintptr
FxMutexNotheld uintptr
}
type sqlite3_mutex_methods = Tsqlite3_mutex_methods
type Tsqlite3_mutex_methods1 = struct {
FxMutexInit uintptr
FxMutexEnd uintptr
FxMutexAlloc uintptr
FxMutexFree uintptr
FxMutexEnter uintptr
FxMutexTry uintptr
FxMutexLeave uintptr
FxMutexHeld uintptr
FxMutexNotheld uintptr
}
type sqlite3_mutex_methods1 = Tsqlite3_mutex_methods1
// C documentation
//
// /*
// ** CAPI3REF: Dynamic String Object
// ** KEYWORDS: {dynamic string}
// **
// ** An instance of the sqlite3_str object contains a dynamically-sized
// ** string under construction.
// **
// ** The lifecycle of an sqlite3_str object is as follows:
// **
// ** - ^The sqlite3_str object is created using [sqlite3_str_new()].
// **
- ^Text is appended to the sqlite3_str object using various
// ** methods, such as [sqlite3_str_appendf()].
// **
- ^The sqlite3_str object is destroyed and the string it created
// ** is returned using the [sqlite3_str_finish()] interface.
// **
// */
type Tsqlite3_str = struct {
Fdb uintptr
FzText uintptr
FnAlloc Tu32
FmxAlloc Tu32
FnChar Tu32
FaccError Tu8
FprintfFlags Tu8
}
type sqlite3_str = Tsqlite3_str
// C documentation
//
// /*
// ** CAPI3REF: Custom Page Cache Object
// **
// ** The sqlite3_pcache_page object represents a single page in the
// ** page cache. The page cache will allocate instances of this
// ** object. Various methods of the page cache use pointers to instances
// ** of this object as parameters or as their return value.
// **
// ** See [sqlite3_pcache_methods2] for additional information.
// */
type Tsqlite3_pcache_page = struct {
FpBuf uintptr
FpExtra uintptr
}
type sqlite3_pcache_page = Tsqlite3_pcache_page
type Tsqlite3_pcache_page1 = struct {
FpBuf uintptr
FpExtra uintptr
}
type sqlite3_pcache_page1 = Tsqlite3_pcache_page1
// C documentation
//
// /*
// ** CAPI3REF: Application Defined Page Cache.
// ** KEYWORDS: {page cache}
// **
// ** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can
// ** register an alternative page cache implementation by passing in an
// ** instance of the sqlite3_pcache_methods2 structure.)^
// ** In many applications, most of the heap memory allocated by
// ** SQLite is used for the page cache.
// ** By implementing a
// ** custom page cache using this API, an application can better control
// ** the amount of memory consumed by SQLite, the way in which
// ** that memory is allocated and released, and the policies used to
// ** determine exactly which parts of a database file are cached and for
// ** how long.
// **
// ** The alternative page cache mechanism is an
// ** extreme measure that is only needed by the most demanding applications.
// ** The built-in page cache is recommended for most uses.
// **
// ** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an
// ** internal buffer by SQLite within the call to [sqlite3_config]. Hence
// ** the application may discard the parameter after the call to
// ** [sqlite3_config()] returns.)^
// **
// ** [[the xInit() page cache method]]
// ** ^(The xInit() method is called once for each effective
// ** call to [sqlite3_initialize()])^
// ** (usually only once during the lifetime of the process). ^(The xInit()
// ** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^
// ** The intent of the xInit() method is to set up global data structures
// ** required by the custom page cache implementation.
// ** ^(If the xInit() method is NULL, then the
// ** built-in default page cache is used instead of the application defined
// ** page cache.)^
// **
// ** [[the xShutdown() page cache method]]
// ** ^The xShutdown() method is called by [sqlite3_shutdown()].
// ** It can be used to clean up
// ** any outstanding resources before process shutdown, if required.
// ** ^The xShutdown() method may be NULL.
// **
// ** ^SQLite automatically serializes calls to the xInit method,
// ** so the xInit method need not be threadsafe. ^The
// ** xShutdown method is only called from [sqlite3_shutdown()] so it does
// ** not need to be threadsafe either. All other methods must be threadsafe
// ** in multithreaded applications.
// **
// ** ^SQLite will never invoke xInit() more than once without an intervening
// ** call to xShutdown().
// **
// ** [[the xCreate() page cache methods]]
// ** ^SQLite invokes the xCreate() method to construct a new cache instance.
// ** SQLite will typically create one cache instance for each open database file,
// ** though this is not guaranteed. ^The
// ** first parameter, szPage, is the size in bytes of the pages that must
// ** be allocated by the cache. ^szPage will always a power of two. ^The
// ** second parameter szExtra is a number of bytes of extra storage
// ** associated with each page cache entry. ^The szExtra parameter will
// ** a number less than 250. SQLite will use the
// ** extra szExtra bytes on each page to store metadata about the underlying
// ** database page on disk. The value passed into szExtra depends
// ** on the SQLite version, the target platform, and how SQLite was compiled.
// ** ^The third argument to xCreate(), bPurgeable, is true if the cache being
// ** created will be used to cache database pages of a file stored on disk, or
// ** false if it is used for an in-memory database. The cache implementation
// ** does not have to do anything special based with the value of bPurgeable;
// ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will
// ** never invoke xUnpin() except to deliberately delete a page.
// ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to
// ** false will always have the "discard" flag set to true.
// ** ^Hence, a cache created with bPurgeable false will
// ** never contain any unpinned pages.
// **
// ** [[the xCachesize() page cache method]]
// ** ^(The xCachesize() method may be called at any time by SQLite to set the
// ** suggested maximum cache-size (number of pages stored by) the cache
// ** instance passed as the first argument. This is the value configured using
// ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable
// ** parameter, the implementation is not required to do anything with this
// ** value; it is advisory only.
// **
// ** [[the xPagecount() page cache methods]]
// ** The xPagecount() method must return the number of pages currently
// ** stored in the cache, both pinned and unpinned.
// **
// ** [[the xFetch() page cache methods]]
// ** The xFetch() method locates a page in the cache and returns a pointer to
// ** an sqlite3_pcache_page object associated with that page, or a NULL pointer.
// ** The pBuf element of the returned sqlite3_pcache_page object will be a
// ** pointer to a buffer of szPage bytes used to store the content of a
// ** single database page. The pExtra element of sqlite3_pcache_page will be
// ** a pointer to the szExtra bytes of extra storage that SQLite has requested
// ** for each entry in the page cache.
// **
// ** The page to be fetched is determined by the key. ^The minimum key value
// ** is 1. After it has been retrieved using xFetch, the page is considered
// ** to be "pinned".
// **
// ** If the requested page is already in the page cache, then the page cache
// ** implementation must return a pointer to the page buffer with its content
// ** intact. If the requested page is not already in the cache, then the
// ** cache implementation should use the value of the createFlag
// ** parameter to help it determined what action to take:
// **
// **
// ** createFlag | Behavior when page is not already in cache
// ** |
---|
0 | Do not allocate a new page. Return NULL.
// ** |
1 | Allocate a new page if it easy and convenient to do so.
// ** Otherwise return NULL.
// ** |
2 | Make every effort to allocate a new page. Only return
// ** NULL if allocating a new page is effectively impossible.
// ** |
// **
// ** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite
// ** will only use a createFlag of 2 after a prior call with a createFlag of 1
// ** failed.)^ In between the xFetch() calls, SQLite may
// ** attempt to unpin one or more cache pages by spilling the content of
// ** pinned pages to disk and synching the operating system disk cache.
// **
// ** [[the xUnpin() page cache method]]
// ** ^xUnpin() is called by SQLite with a pointer to a currently pinned page
// ** as its second argument. If the third parameter, discard, is non-zero,
// ** then the page must be evicted from the cache.
// ** ^If the discard parameter is
// ** zero, then the page may be discarded or retained at the discretion of
// ** page cache implementation. ^The page cache implementation
// ** may choose to evict unpinned pages at any time.
// **
// ** The cache must not perform any reference counting. A single
// ** call to xUnpin() unpins the page regardless of the number of prior calls
// ** to xFetch().
// **
// ** [[the xRekey() page cache methods]]
// ** The xRekey() method is used to change the key value associated with the
// ** page passed as the second argument. If the cache
// ** previously contains an entry associated with newKey, it must be
// ** discarded. ^Any prior cache entry associated with newKey is guaranteed not
// ** to be pinned.
// **
// ** When SQLite calls the xTruncate() method, the cache must discard all
// ** existing cache entries with page numbers (keys) greater than or equal
// ** to the value of the iLimit parameter passed to xTruncate(). If any
// ** of these pages are pinned, they are implicitly unpinned, meaning that
// ** they can be safely discarded.
// **
// ** [[the xDestroy() page cache method]]
// ** ^The xDestroy() method is used to delete a cache allocated by xCreate().
// ** All resources associated with the specified cache should be freed. ^After
// ** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*]
// ** handle invalid, and will not use it with any other sqlite3_pcache_methods2
// ** functions.
// **
// ** [[the xShrink() page cache method]]
// ** ^SQLite invokes the xShrink() method when it wants the page cache to
// ** free up as much of heap memory as possible. The page cache implementation
// ** is not obligated to free any memory, but well-behaved implementations should
// ** do their best.
// */
type Tsqlite3_pcache_methods2 = struct {
FiVersion int32
FpArg uintptr
FxInit uintptr
FxShutdown uintptr
FxCreate uintptr
FxCachesize uintptr
FxPagecount uintptr
FxFetch uintptr
FxUnpin uintptr
FxRekey uintptr
FxTruncate uintptr
FxDestroy uintptr
FxShrink uintptr
}
type sqlite3_pcache_methods2 = Tsqlite3_pcache_methods2
type Tsqlite3_pcache_methods21 = struct {
FiVersion int32
FpArg uintptr
FxInit uintptr
FxShutdown uintptr
FxCreate uintptr
FxCachesize uintptr
FxPagecount uintptr
FxFetch uintptr
FxUnpin uintptr
FxRekey uintptr
FxTruncate uintptr
FxDestroy uintptr
FxShrink uintptr
}
type sqlite3_pcache_methods21 = Tsqlite3_pcache_methods21
// C documentation
//
// /*
// ** This is the obsolete pcache_methods object that has now been replaced
// ** by sqlite3_pcache_methods2. This object is not used by SQLite. It is
// ** retained in the header file for backwards compatibility only.
// */
type Tsqlite3_pcache_methods = struct {
FpArg uintptr
FxInit uintptr
FxShutdown uintptr
FxCreate uintptr
FxCachesize uintptr
FxPagecount uintptr
FxFetch uintptr
FxUnpin uintptr
FxRekey uintptr
FxTruncate uintptr
FxDestroy uintptr
}
type sqlite3_pcache_methods = Tsqlite3_pcache_methods
type Tsqlite3_pcache_methods1 = struct {
FpArg uintptr
FxInit uintptr
FxShutdown uintptr
FxCreate uintptr
FxCachesize uintptr
FxPagecount uintptr
FxFetch uintptr
FxUnpin uintptr
FxRekey uintptr
FxTruncate uintptr
FxDestroy uintptr
}
type sqlite3_pcache_methods1 = Tsqlite3_pcache_methods1
// C documentation
//
// /*
// ** CAPI3REF: Online Backup Object
// **
// ** The sqlite3_backup object records state information about an ongoing
// ** online backup operation. ^The sqlite3_backup object is created by
// ** a call to [sqlite3_backup_init()] and is destroyed by a call to
// ** [sqlite3_backup_finish()].
// **
// ** See Also: [Using the SQLite Online Backup API]
// */
type Tsqlite3_backup = struct {
FpDestDb uintptr
FpDest uintptr
FiDestSchema Tu32
FbDestLocked int32
FiNext TPgno
FpSrcDb uintptr
FpSrc uintptr
Frc int32
FnRemaining TPgno
FnPagecount TPgno
FisAttached int32
FpNext uintptr
}
type sqlite3_backup = Tsqlite3_backup
// C documentation
//
// /*
// ** CAPI3REF: Database Snapshot
// ** KEYWORDS: {snapshot} {sqlite3_snapshot}
// **
// ** An instance of the snapshot object records the state of a [WAL mode]
// ** database for some specific point in history.
// **
// ** In [WAL mode], multiple [database connections] that are open on the
// ** same database file can each be reading a different historical version
// ** of the database file. When a [database connection] begins a read
// ** transaction, that connection sees an unchanging copy of the database
// ** as it existed for the point in time when the transaction first started.
// ** Subsequent changes to the database from other connections are not seen
// ** by the reader until a new read transaction is started.
// **
// ** The sqlite3_snapshot object records state information about an historical
// ** version of the database file so that it is possible to later open a new read
// ** transaction that sees that historical version of the database rather than
// ** the most recent version.
// */
type Tsqlite3_snapshot = struct {
Fhidden [48]uint8
}
type sqlite3_snapshot = Tsqlite3_snapshot
/*
** CAPI3REF: Flags for sqlite3_deserialize()
**
** The following are allowed values for 6th argument (the F argument) to
** the [sqlite3_deserialize(D,S,P,N,M,F)] interface.
**
** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization
** in the P argument is held in memory obtained from [sqlite3_malloc64()]
** and that SQLite should take ownership of this memory and automatically
** free it when it has finished using it. Without this flag, the caller
** is responsible for freeing any dynamically allocated memory.
**
** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to
** grow the size of the database using calls to [sqlite3_realloc64()]. This
** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used.
** Without this flag, the deserialized database cannot increase in size beyond
** the number of bytes specified by the M parameter.
**
** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database
** should be treated as read-only.
*/
/*
** Undo the hack that converts floating point types to integer for
** builds on processors without floating point support.
*/
/******** Begin file sqlite3rtree.h *********/
/*
** 2010 August 30
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
*/
type Tsqlite3_rtree_geometry = struct {
FpContext uintptr
FnParam int32
FaParam uintptr
FpUser uintptr
FxDelUser uintptr
}
type sqlite3_rtree_geometry = Tsqlite3_rtree_geometry
type Tsqlite3_rtree_query_info = struct {
FpContext uintptr
FnParam int32
FaParam uintptr
FpUser uintptr
FxDelUser uintptr
FaCoord uintptr
FanQueue uintptr
FnCoord int32
FiLevel int32
FmxLevel int32
FiRowid Tsqlite3_int64
FrParentScore Tsqlite3_rtree_dbl
FeParentWithin int32
FeWithin int32
FrScore Tsqlite3_rtree_dbl
FapSqlParam uintptr
}
type sqlite3_rtree_query_info = Tsqlite3_rtree_query_info
// C documentation
//
// /* The double-precision datatype used by RTree depends on the
// ** SQLITE_RTREE_INT_ONLY compile-time option.
// */
type Tsqlite3_rtree_dbl = float64
type sqlite3_rtree_dbl = Tsqlite3_rtree_dbl
/*
** A pointer to a structure of the following type is passed as the first
** argument to callbacks registered using rtree_geometry_callback().
*/
type Tsqlite3_rtree_geometry1 = struct {
FpContext uintptr
FnParam int32
FaParam uintptr
FpUser uintptr
FxDelUser uintptr
}
type sqlite3_rtree_geometry1 = Tsqlite3_rtree_geometry1
/*
** A pointer to a structure of the following type is passed as the
** argument to scored geometry callback registered using
** sqlite3_rtree_query_callback().
**
** Note that the first 5 fields of this structure are identical to
** sqlite3_rtree_geometry. This structure is a subclass of
** sqlite3_rtree_geometry.
*/
type Tsqlite3_rtree_query_info1 = struct {
FpContext uintptr
FnParam int32
FaParam uintptr
FpUser uintptr
FxDelUser uintptr
FaCoord uintptr
FanQueue uintptr
FnCoord int32
FiLevel int32
FmxLevel int32
FiRowid Tsqlite3_int64
FrParentScore Tsqlite3_rtree_dbl
FeParentWithin int32
FeWithin int32
FrScore Tsqlite3_rtree_dbl
FapSqlParam uintptr
}
type sqlite3_rtree_query_info1 = Tsqlite3_rtree_query_info1
/*
** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
*/
/******** End of sqlite3rtree.h *********/
/******** Begin file sqlite3session.h *********/
/*
** Make sure we can call this stuff from C++.
*/
// C documentation
//
// /*
// ** CAPI3REF: Session Object Handle
// **
// ** An instance of this object is a [session] that can be used to
// ** record changes to a database.
// */
type Tsqlite3_session = struct {
Fdb uintptr
FzDb uintptr
FbEnableSize int32
FbEnable int32
FbIndirect int32
FbAutoAttach int32
FbImplicitPK int32
Frc int32
FpFilterCtx uintptr
FxTableFilter uintptr
FnMalloc Ti64
FnMaxChangesetSize Ti64
FpZeroBlob uintptr
FpNext uintptr
FpTable uintptr
Fhook TSessionHook
}
type sqlite3_session = Tsqlite3_session
// C documentation
//
// /*
// ** CAPI3REF: Changeset Iterator Handle
// **
// ** An instance of this object acts as a cursor for iterating
// ** over the elements of a [changeset] or [patchset].
// */
type Tsqlite3_changeset_iter = struct {
Fin TSessionInput
Ftblhdr TSessionBuffer
FbPatchset int32
FbInvert int32
FbSkipEmpty int32
Frc int32
FpConflict uintptr
FzTab uintptr
FnCol int32
Fop int32
FbIndirect int32
FabPK uintptr
FapValue uintptr
}
type sqlite3_changeset_iter = Tsqlite3_changeset_iter
// C documentation
//
// /*
// ** CAPI3REF: Changegroup Handle
// **
// ** A changegroup is an object used to combine two or more
// ** [changesets] or [patchsets]
// */
type Tsqlite3_changegroup = struct {
Frc int32
FbPatch int32
FpList uintptr
Fdb uintptr
FzDb uintptr
}
type sqlite3_changegroup = Tsqlite3_changegroup
/*
** CAPI3REF: Flags for sqlite3changeset_apply_v2
**
** The following flags may passed via the 9th parameter to
** [sqlite3changeset_apply_v2] and [sqlite3changeset_apply_v2_strm]:
**
**
** - SQLITE_CHANGESETAPPLY_NOSAVEPOINT
-
** Usually, the sessions module encloses all operations performed by
** a single call to apply_v2() or apply_v2_strm() in a [SAVEPOINT]. The
** SAVEPOINT is committed if the changeset or patchset is successfully
** applied, or rolled back if an error occurs. Specifying this flag
** causes the sessions module to omit this savepoint. In this case, if the
** caller has an open transaction or savepoint when apply_v2() is called,
** it may revert the partially applied changeset by rolling it back.
**
**
- SQLITE_CHANGESETAPPLY_INVERT
-
** Invert the changeset before applying it. This is equivalent to inverting
** a changeset using sqlite3changeset_invert() before applying it. It is
** an error to specify this flag with a patchset.
**
**
- SQLITE_CHANGESETAPPLY_IGNORENOOP
-
** Do not invoke the conflict handler callback for any changes that
** would not actually modify the database even if they were applied.
** Specifically, this means that the conflict handler is not invoked
** for:
**
** - a delete change if the row being deleted cannot be found,
**
- an update change if the modified fields are already set to
** their new values in the conflicting row, or
**
- an insert change if all fields of the conflicting row match
** the row being inserted.
**
**
** - SQLITE_CHANGESETAPPLY_FKNOACTION
-
** If this flag it set, then all foreign key constraints in the target
** database behave as if they were declared with "ON UPDATE NO ACTION ON
** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
** or SET DEFAULT.
*/
/*
** CAPI3REF: Constants Passed To The Conflict Handler
**
** Values that may be passed as the second argument to a conflict-handler.
**
**
** - SQLITE_CHANGESET_DATA
-
** The conflict handler is invoked with CHANGESET_DATA as the second argument
** when processing a DELETE or UPDATE change if a row with the required
** PRIMARY KEY fields is present in the database, but one or more other
** (non primary-key) fields modified by the update do not contain the
** expected "before" values.
**
** The conflicting row, in this case, is the database row with the matching
** primary key.
**
**
- SQLITE_CHANGESET_NOTFOUND
-
** The conflict handler is invoked with CHANGESET_NOTFOUND as the second
** argument when processing a DELETE or UPDATE change if a row with the
** required PRIMARY KEY fields is not present in the database.
**
** There is no conflicting row in this case. The results of invoking the
** sqlite3changeset_conflict() API are undefined.
**
**
- SQLITE_CHANGESET_CONFLICT
-
** CHANGESET_CONFLICT is passed as the second argument to the conflict
** handler while processing an INSERT change if the operation would result
** in duplicate primary key values.
**
** The conflicting row in this case is the database row with the matching
** primary key.
**
**
- SQLITE_CHANGESET_FOREIGN_KEY
-
** If foreign key handling is enabled, and applying a changeset leaves the
** database in a state containing foreign key violations, the conflict
** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument
** exactly once before the changeset is committed. If the conflict handler
** returns CHANGESET_OMIT, the changes, including those that caused the
** foreign key constraint violation, are committed. Or, if it returns
** CHANGESET_ABORT, the changeset is rolled back.
**
** No current or conflicting row information is provided. The only function
** it is possible to call on the supplied sqlite3_changeset_iter handle
** is sqlite3changeset_fk_conflicts().
**
**
- SQLITE_CHANGESET_CONSTRAINT
-
** If any other constraint violation occurs while applying a change (i.e.
** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is
** invoked with CHANGESET_CONSTRAINT as the second argument.
**
** There is no conflicting row in this case. The results of invoking the
** sqlite3changeset_conflict() API are undefined.
**
**
*/
/*
** CAPI3REF: Constants Returned By The Conflict Handler
**
** A conflict handler callback must return one of the following three values.
**
**
** - SQLITE_CHANGESET_OMIT
-
** If a conflict handler returns this value no special action is taken. The
** change that caused the conflict is not applied. The session module
** continues to the next change in the changeset.
**
**
- SQLITE_CHANGESET_REPLACE
-
** This value may only be returned if the second argument to the conflict
** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this
** is not the case, any changes applied so far are rolled back and the
** call to sqlite3changeset_apply() returns SQLITE_MISUSE.
**
** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict
** handler, then the conflicting row is either updated or deleted, depending
** on the type of change.
**
** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict
** handler, then the conflicting row is removed from the database and a
** second attempt to apply the change is made. If this second attempt fails,
** the original row is restored to the database before continuing.
**
**
- SQLITE_CHANGESET_ABORT
-
** If this value is returned, any changes applied so far are rolled back
** and the call to sqlite3changeset_apply() returns SQLITE_ABORT.
**
*/
// C documentation
//
// /*
// ** CAPI3REF: Rebasing changesets
// ** EXPERIMENTAL
// **
// ** Suppose there is a site hosting a database in state S0. And that
// ** modifications are made that move that database to state S1 and a
// ** changeset recorded (the "local" changeset). Then, a changeset based
// ** on S0 is received from another site (the "remote" changeset) and
// ** applied to the database. The database is then in state
// ** (S1+"remote"), where the exact state depends on any conflict
// ** resolution decisions (OMIT or REPLACE) made while applying "remote".
// ** Rebasing a changeset is to update it to take those conflict
// ** resolution decisions into account, so that the same conflicts
// ** do not have to be resolved elsewhere in the network.
// **
// ** For example, if both the local and remote changesets contain an
// ** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)":
// **
// ** local: INSERT INTO t1 VALUES(1, 'v1');
// ** remote: INSERT INTO t1 VALUES(1, 'v2');
// **
// ** and the conflict resolution is REPLACE, then the INSERT change is
// ** removed from the local changeset (it was overridden). Or, if the
// ** conflict resolution was "OMIT", then the local changeset is modified
// ** to instead contain:
// **
// ** UPDATE t1 SET b = 'v2' WHERE a=1;
// **
// ** Changes within the local changeset are rebased as follows:
// **
// **
// ** - Local INSERT
-
// ** This may only conflict with a remote INSERT. If the conflict
// ** resolution was OMIT, then add an UPDATE change to the rebased
// ** changeset. Or, if the conflict resolution was REPLACE, add
// ** nothing to the rebased changeset.
// **
// **
- Local DELETE
-
// ** This may conflict with a remote UPDATE or DELETE. In both cases the
// ** only possible resolution is OMIT. If the remote operation was a
// ** DELETE, then add no change to the rebased changeset. If the remote
// ** operation was an UPDATE, then the old.* fields of change are updated
// ** to reflect the new.* values in the UPDATE.
// **
// **
- Local UPDATE
-
// ** This may conflict with a remote UPDATE or DELETE. If it conflicts
// ** with a DELETE, and the conflict resolution was OMIT, then the update
// ** is changed into an INSERT. Any undefined values in the new.* record
// ** from the update change are filled in using the old.* values from
// ** the conflicting DELETE. Or, if the conflict resolution was REPLACE,
// ** the UPDATE change is simply omitted from the rebased changeset.
// **
// ** If conflict is with a remote UPDATE and the resolution is OMIT, then
// ** the old.* values are rebased using the new.* values in the remote
// ** change. Or, if the resolution is REPLACE, then the change is copied
// ** into the rebased changeset with updates to columns also updated by
// ** the conflicting remote UPDATE removed. If this means no columns would
// ** be updated, the change is omitted.
// **
// **
// ** A local change may be rebased against multiple remote changes
// ** simultaneously. If a single key is modified by multiple remote
// ** changesets, they are combined as follows before the local changeset
// ** is rebased:
// **
// **
// ** - If there has been one or more REPLACE resolutions on a
// ** key, it is rebased according to a REPLACE.
// **
// **
- If there have been no REPLACE resolutions on a key, then
// ** the local changeset is rebased according to the most recent
// ** of the OMIT resolutions.
// **
// **
// ** Note that conflict resolutions from multiple remote changesets are
// ** combined on a per-field basis, not per-row. This means that in the
// ** case of multiple remote UPDATE operations, some fields of a single
// ** local change may be rebased for REPLACE while others are rebased for
// ** OMIT.
// **
// ** In order to rebase a local changeset, the remote changeset must first
// ** be applied to the local database using sqlite3changeset_apply_v2() and
// ** the buffer of rebase information captured. Then:
// **
// **
// ** - An sqlite3_rebaser object is created by calling
// ** sqlite3rebaser_create().
// **
- The new object is configured with the rebase buffer obtained from
// ** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure().
// ** If the local changeset is to be rebased against multiple remote
// ** changesets, then sqlite3rebaser_configure() should be called
// ** multiple times, in the same order that the multiple
// ** sqlite3changeset_apply_v2() calls were made.
// **
- Each local changeset is rebased by calling sqlite3rebaser_rebase().
// **
- The sqlite3_rebaser object is deleted by calling
// ** sqlite3rebaser_delete().
// **
// */
type Tsqlite3_rebaser = struct {
Fgrp Tsqlite3_changegroup
}
type sqlite3_rebaser = Tsqlite3_rebaser
/*
** CAPI3REF: Values for sqlite3session_config().
*/
/*
** Make sure we can call this stuff from C++.
*/
/******** End of sqlite3session.h *********/
/******** Begin file fts5.h *********/
/*
** 2014 May 31
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** Interfaces to extend FTS5. Using the interfaces defined in this file,
** FTS5 may be extended with:
**
** * custom tokenizers, and
** * custom auxiliary functions.
*/
/*************************************************************************
** CUSTOM AUXILIARY FUNCTIONS
**
** Virtual table implementations may overload SQL functions by implementing
** the sqlite3_module.xFindFunction() method.
*/
type TFts5ExtensionApi = struct {
FiVersion int32
FxUserData uintptr
FxColumnCount uintptr
FxRowCount uintptr
FxColumnTotalSize uintptr
FxTokenize uintptr
FxPhraseCount uintptr
FxPhraseSize uintptr
FxInstCount uintptr
FxInst uintptr
FxRowid uintptr
FxColumnText uintptr
FxColumnSize uintptr
FxQueryPhrase uintptr
FxSetAuxdata uintptr
FxGetAuxdata uintptr
FxPhraseFirst uintptr
FxPhraseNext uintptr
FxPhraseFirstColumn uintptr
FxPhraseNextColumn uintptr
FxQueryToken uintptr
FxInstToken uintptr
}
type Fts5ExtensionApi = TFts5ExtensionApi
type TFts5PhraseIter = struct {
Fa uintptr
Fb uintptr
}
type Fts5PhraseIter = TFts5PhraseIter
type Tfts5_extension_function = uintptr
type fts5_extension_function = Tfts5_extension_function
type TFts5PhraseIter1 = struct {
Fa uintptr
Fb uintptr
}
type Fts5PhraseIter1 = TFts5PhraseIter1
/*
** EXTENSION API FUNCTIONS
**
** xUserData(pFts):
** Return a copy of the context pointer the extension function was
** registered with.
**
** xColumnTotalSize(pFts, iCol, pnToken):
** If parameter iCol is less than zero, set output variable *pnToken
** to the total number of tokens in the FTS5 table. Or, if iCol is
** non-negative but less than the number of columns in the table, return
** the total number of tokens in column iCol, considering all rows in
** the FTS5 table.
**
** If parameter iCol is greater than or equal to the number of columns
** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
** an OOM condition or IO error), an appropriate SQLite error code is
** returned.
**
** xColumnCount(pFts):
** Return the number of columns in the table.
**
** xColumnSize(pFts, iCol, pnToken):
** If parameter iCol is less than zero, set output variable *pnToken
** to the total number of tokens in the current row. Or, if iCol is
** non-negative but less than the number of columns in the table, set
** *pnToken to the number of tokens in column iCol of the current row.
**
** If parameter iCol is greater than or equal to the number of columns
** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
** an OOM condition or IO error), an appropriate SQLite error code is
** returned.
**
** This function may be quite inefficient if used with an FTS5 table
** created with the "columnsize=0" option.
**
** xColumnText:
** If parameter iCol is less than zero, or greater than or equal to the
** number of columns in the table, SQLITE_RANGE is returned.
**
** Otherwise, this function attempts to retrieve the text of column iCol of
** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
** of (*pz) and (*pn) are undefined.
**
** xPhraseCount:
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
** If parameter iCol is less than zero, or greater than or equal to the
** number of phrases in the current query, as returned by xPhraseCount,
** 0 is returned. Otherwise, this function returns the number of tokens in
** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
** the query within the current row. Return SQLITE_OK if successful, or
** an error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option. If the FTS5 table is created
** with either "detail=none" or "detail=column" and "content=" option
** (i.e. if it is a contentless table), then this API always returns 0.
**
** xInst:
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
** output by xInstCount(). If iIdx is less than zero or greater than
** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
** first token of the phrase. SQLITE_OK is returned if successful, or an
** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
**
** xRowid:
** Returns the rowid of the current row.
**
** xTokenize:
** Tokenize text using the tokenizer belonging to the FTS5 table.
**
** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
** This API function is used to query the FTS table for phrase iPhrase
** of the current query. Specifically, a query equivalent to:
**
** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
**
** with $p set to a phrase equivalent to the phrase iPhrase of the
** current query is executed. Any column filter that applies to
** phrase iPhrase of the current query is included in $p. For each
** row visited, the callback function passed as the fourth argument
** is invoked. The context and API objects passed to the callback
** function may be used to access the properties of each matched row.
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
** If parameter iPhrase is less than zero, or greater than or equal to
** the number of phrases in the query, as returned by xPhraseCount(),
** this function returns SQLITE_RANGE.
**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
** Otherwise, the error code is propagated upwards.
**
** If the query runs to completion without incident, SQLITE_OK is returned.
** Or, if some error occurs before the query completes or is aborted by
** the callback, an SQLite error code is returned.
**
**
** xSetAuxdata(pFts5, pAux, xDelete)
**
** Save the pointer passed as the second argument as the extension function's
** "auxiliary data". The pointer may then be retrieved by the current or any
** future invocation of the same fts5 extension function made as part of
** the same MATCH query using the xGetAuxdata() API.
**
** Each extension function is allocated a single auxiliary data slot for
** each FTS query (MATCH expression). If the extension function is invoked
** more than once for a single FTS query, then all invocations share a
** single auxiliary data context.
**
** If there is already an auxiliary data pointer when this function is
** invoked, then it is replaced by the new pointer. If an xDelete callback
** was specified along with the original pointer, it is invoked at this
** point.
**
** The xDelete callback, if one is specified, is also invoked on the
** auxiliary data pointer after the FTS5 query has finished.
**
** If an error (e.g. an OOM condition) occurs within this function,
** the auxiliary data is set to NULL and an error code returned. If the
** xDelete parameter was not NULL, it is invoked on the auxiliary data
** pointer before returning.
**
**
** xGetAuxdata(pFts5, bClear)
**
** Returns the current auxiliary data pointer for the fts5 extension
** function. See the xSetAuxdata() method for details.
**
** If the bClear argument is non-zero, then the auxiliary data is cleared
** (set to NULL) before this function returns. In this case the xDelete,
** if any, is not invoked.
**
**
** xRowCount(pFts5, pnRow)
**
** This function is used to retrieve the total number of rows in the table.
** In other words, the same value that would be returned by:
**
** SELECT count(*) FROM ftstable;
**
** xPhraseFirst()
** This function is used, along with type Fts5PhraseIter and the xPhraseNext
** method, to iterate through all instances of a single query phrase within
** the current row. This is the same information as is accessible via the
** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
** to use, this API may be faster under some circumstances. To iterate
** through instances of phrase iPhrase, use the following code:
**
** Fts5PhraseIter iter;
** int iCol, iOff;
** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
** iCol>=0;
** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
** ){
** // An instance of phrase iPhrase at offset iOff of column iCol
** }
**
** The Fts5PhraseIter structure is defined above. Applications should not
** modify this structure directly - it should only be used as shown above
** with the xPhraseFirst() and xPhraseNext() API methods (and by
** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option. If the FTS5 table is created
** with either "detail=none" or "detail=column" and "content=" option
** (i.e. if it is a contentless table), then this API always iterates
** through an empty set (all calls to xPhraseFirst() set iCol to -1).
**
** xPhraseNext()
** See xPhraseFirst above.
**
** xPhraseFirstColumn()
** This function and xPhraseNextColumn() are similar to the xPhraseFirst()
** and xPhraseNext() APIs described above. The difference is that instead
** of iterating through all instances of a phrase in the current row, these
** APIs are used to iterate through the set of columns in the current row
** that contain one or more instances of a specified phrase. For example:
**
** Fts5PhraseIter iter;
** int iCol;
** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
** iCol>=0;
** pApi->xPhraseNextColumn(pFts, &iter, &iCol)
** ){
** // Column iCol contains at least one instance of phrase iPhrase
** }
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" option. If the FTS5 table is created with either
** "detail=none" "content=" option (i.e. if it is a contentless table),
** then this API always iterates through an empty set (all calls to
** xPhraseFirstColumn() set iCol to -1).
**
** The information accessed using this API and its companion
** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
** (or xInst/xInstCount). The chief advantage of this API is that it is
** significantly more efficient than those alternatives when used with
** "detail=column" tables.
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
**
** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
** This is used to access token iToken of phrase iPhrase of the current
** query. Before returning, output parameter *ppToken is set to point
** to a buffer containing the requested token, and *pnToken to the
** size of this buffer in bytes.
**
** If iPhrase or iToken are less than zero, or if iPhrase is greater than
** or equal to the number of phrases in the query as reported by
** xPhraseCount(), or if iToken is equal to or greater than the number of
** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
are both zeroed.
**
** The output text is not a copy of the query text that specified the
** token. It is the output of the tokenizer module. For tokendata=1
** tables, this includes any embedded 0x00 and trailing data.
**
** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
** This is used to access token iToken of phrase hit iIdx within the
** current row. If iIdx is less than zero or greater than or equal to the
** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
** output variable (*ppToken) is set to point to a buffer containing the
** matching document token, and (*pnToken) to the size of that buffer in
** bytes. This API is not available if the specified token matches a
** prefix query term. In that case both output variables are always set
** to 0.
**
** The output text is not a copy of the document text that was tokenized.
** It is the output of the tokenizer module. For tokendata=1 tables, this
** includes any embedded 0x00 and trailing data.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
*/
type TFts5ExtensionApi1 = struct {
FiVersion int32
FxUserData uintptr
FxColumnCount uintptr
FxRowCount uintptr
FxColumnTotalSize uintptr
FxTokenize uintptr
FxPhraseCount uintptr
FxPhraseSize uintptr
FxInstCount uintptr
FxInst uintptr
FxRowid uintptr
FxColumnText uintptr
FxColumnSize uintptr
FxQueryPhrase uintptr
FxSetAuxdata uintptr
FxGetAuxdata uintptr
FxPhraseFirst uintptr
FxPhraseNext uintptr
FxPhraseFirstColumn uintptr
FxPhraseNextColumn uintptr
FxQueryToken uintptr
FxInstToken uintptr
}
type Fts5ExtensionApi1 = TFts5ExtensionApi1
type Tfts5_tokenizer = struct {
FxCreate uintptr
FxDelete uintptr
FxTokenize uintptr
}
type fts5_tokenizer = Tfts5_tokenizer
type Tfts5_tokenizer1 = struct {
FxCreate uintptr
FxDelete uintptr
FxTokenize uintptr
}
type fts5_tokenizer1 = Tfts5_tokenizer1
/* Flags that may be passed as the third argument to xTokenize() */
/* Flags that may be passed by the tokenizer implementation back to FTS5
** as the third argument to the supplied xToken callback. */
/*
** END OF CUSTOM TOKENIZERS
*************************************************************************/
// C documentation
//
// /*************************************************************************
// ** FTS5 EXTENSION REGISTRATION API
// */
type Tfts5_api = struct {
FiVersion int32
FxCreateTokenizer uintptr
FxFindTokenizer uintptr
FxCreateFunction uintptr
}
type fts5_api = Tfts5_api
type Tfts5_api1 = struct {
FiVersion int32
FxCreateTokenizer uintptr
FxFindTokenizer uintptr
FxCreateFunction uintptr
}
type fts5_api1 = Tfts5_api1
/*
** END OF REGISTRATION API
*************************************************************************/
/******** End of fts5.h *********/
/************** End of sqlite3.h *********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/*
** Reuse the STATIC_LRU for mutex access to sqlite3_temp_directory.
*/
/*
** Include the configuration header output by 'configure' if we're using the
** autoconf-based build
*/
/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/
/************** Begin file sqliteLimit.h *************************************/
/*
** 2007 May 7
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file defines various limits of what SQLite can process.
*/
/*
** The maximum length of a TEXT or BLOB in bytes. This also
** limits the size of a row in a table or index.
**
** The hard limit is the ability of a 32-bit signed integer
** to count the size: 2^31-1 or 2147483647.
*/
/*
** This is the maximum number of
**
** * Columns in a table
** * Columns in an index
** * Columns in a view
** * Terms in the SET clause of an UPDATE statement
** * Terms in the result set of a SELECT statement
** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement.
** * Terms in the VALUES clause of an INSERT statement
**
** The hard upper limit here is 32676. Most database people will
** tell you that in a well-normalized database, you usually should
** not have more than a dozen or so columns in any table. And if
** that is the case, there is no point in having more than a few
** dozen values in any of the other situations described above.
*/
/*
** The maximum length of a single SQL statement in bytes.
**
** It used to be the case that setting this value to zero would
** turn the limit off. That is no longer true. It is not possible
** to turn this limit off.
*/
/*
** The maximum depth of an expression tree. This is limited to
** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might
** want to place more severe limits on the complexity of an
** expression. A value of 0 means that there is no limit.
*/
/*
** The maximum number of terms in a compound SELECT statement.
** The code generator for compound SELECT statements does one
** level of recursion for each term. A stack overflow can result
** if the number of terms is too large. In practice, most SQL
** never has more than 3 or 4 terms. Use a value of 0 to disable
** any limit on the number of terms in a compound SELECT.
*/
/*
** The maximum number of opcodes in a VDBE program.
** Not currently enforced.
*/
/*
** The maximum number of arguments to an SQL function.
*/
/*
** The suggested maximum number of in-memory pages to use for
** the main database table and for temporary tables.
**
** IMPLEMENTATION-OF: R-30185-15359 The default suggested cache size is -2000,
** which means the cache size is limited to 2048000 bytes of memory.
** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be
** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options.
*/
/*
** The default number of frames to accumulate in the log file before
** checkpointing the database in WAL mode.
*/
/*
** The maximum number of attached databases. This must be between 0
** and 125. The upper bound of 125 is because the attached databases are
** counted using a signed 8-bit integer which has a maximum value of 127
** and we have to allow 2 extra counts for the "main" and "temp" databases.
*/
/*
** The maximum value of a ?nnn wildcard that the parser will accept.
** If the value exceeds 32767 then extra space is required for the Expr
** structure. But otherwise, we believe that the number can be as large
** as a signed 32-bit integer can hold.
*/
/* Maximum page size. The upper bound on this value is 65536. This a limit
** imposed by the use of 16-bit offsets within each page.
**
** Earlier versions of SQLite allowed the user to change this value at
** compile time. This is no longer permitted, on the grounds that it creates
** a library that is technically incompatible with an SQLite library
** compiled with a different limit. If a process operating on a database
** with a page-size of 65536 bytes crashes, then an instance of SQLite
** compiled with the default page-size limit will not be able to rollback
** the aborted transaction. This could lead to database corruption.
*/
/*
** The default size of a database page.
*/
/*
** Ordinarily, if no value is explicitly provided, SQLite creates databases
** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain
** device characteristics (sector-size and atomic write() support),
** SQLite may choose a larger value. This constant is the maximum value
** SQLite will choose on its own.
*/
/*
** Maximum number of pages in one database file.
**
** This is really just the default value for the max_page_count pragma.
** This value can be lowered (or raised) at run-time using that the
** max_page_count macro.
*/
/*
** Maximum length (in bytes) of the pattern in a LIKE or GLOB
** operator.
*/
/*
** Maximum depth of recursion for triggers.
**
** A value of 1 means that a trigger program will not be able to itself
** fire any triggers. A value of 0 means that no trigger programs at all
** may be executed.
*/
/************** End of sqliteLimit.h *****************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/* Disable nuisance warnings on Borland compilers */
/*
** A few places in the code require atomic load/store of aligned
** integer values.
*/
/*
** Include standard header files as necessary
*/
/*
** The following macros are used to cast pointers to integers and
** integers to pointers. The way you do this varies from one compiler
** to the next, so we have developed the following set of #if statements
** to generate appropriate macros for a wide range of compilers.
**
** The correct "ANSI" way to do this is to use the intptr_t type.
** Unfortunately, that typedef is not available on all compilers, or
** if it is available, it requires an #include of specific headers
** that vary from one machine to the next.
**
** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on
** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)).
** So we have to define the macros in different ways depending on the
** compiler.
*/
/*
** Macros to hint to the compiler that a function should or should not be
** inlined.
*/
/*
** Make sure that the compiler intrinsics we desire are enabled when
** compiling with an appropriate version of MSVC unless prevented by
** the SQLITE_DISABLE_INTRINSIC define.
*/
/*
** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit
** SEH support if the -DSQLITE_OMIT_SEH option is given.
*/
/*
** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly
** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0
*/
/* In all other cases, enable */
/*
** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
** 0 means mutexes are permanently disable and the library is never
** threadsafe. 1 means the library is serialized which is the highest
** level of threadsafety. 2 means the library is multithreaded - multiple
** threads can use SQLite as long as no two threads try to use the same
** database connection at the same time.
**
** Older versions of SQLite used an optional THREADSAFE macro.
** We support that for legacy.
**
** To ensure that the correct value of "THREADSAFE" is reported when querying
** for compile-time options at runtime (e.g. "PRAGMA compile_options"), this
** logic is partially replicated in ctime.c. If it is updated here, it should
** also be updated there.
*/
/*
** Powersafe overwrite is on by default. But can be turned off using
** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option.
*/
/*
** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by
** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in
** which case memory allocation statistics are disabled by default.
*/
/*
** Exactly one of the following macros must be defined in order to
** specify which memory allocation subsystem to use.
**
** SQLITE_SYSTEM_MALLOC // Use normal system malloc()
** SQLITE_WIN32_MALLOC // Use Win32 native heap API
** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails
** SQLITE_MEMDEBUG // Debugging version of system malloc()
**
** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the
** assert() macro is enabled, each call into the Win32 native heap subsystem
** will cause HeapValidate to be called. If heap validation should fail, an
** assertion will be triggered.
**
** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as
** the default.
*/
/*
** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the
** sizes of memory allocations below this value where possible.
*/
/*
** We need to define _XOPEN_SOURCE as follows in order to enable
** recursive mutexes on most Unix systems and fchmod() on OpenBSD.
** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit
** it.
*/
/*
** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that
** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true,
** make it true by defining or undefining NDEBUG.
**
** Setting NDEBUG makes the code smaller and faster by disabling the
** assert() statements in the code. So we want the default action
** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG
** is set. Thus NDEBUG becomes an opt-in rather than an opt-out
** feature.
*/
/*
** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on.
*/
/*
** The testcase() macro is used to aid in coverage testing. When
** doing coverage testing, the condition inside the argument to
** testcase() must be evaluated both true and false in order to
** get full branch coverage. The testcase() macro is inserted
** to help ensure adequate test coverage in places where simple
** condition/decision coverage is inadequate. For example, testcase()
** can be used to make sure boundary values are tested. For
** bitmask tests, testcase() can be used to make sure each bit
** is significant and used at least once. On switch statements
** where multiple cases go to the same block of code, testcase()
** can insure that all cases are evaluated.
*/
/*
** The TESTONLY macro is used to enclose variable declarations or
** other bits of code that are needed to support the arguments
** within testcase() and assert() macros.
*/
/*
** Sometimes we need a small amount of code such as a variable initialization
** to setup for a later assert() statement. We do not want this code to
** appear when assert() is disabled. The following macro is therefore
** used to contain that setup code. The "VVA" acronym stands for
** "Verification, Validation, and Accreditation". In other words, the
** code within VVA_ONLY() will only run during verification processes.
*/
/*
** Disable ALWAYS() and NEVER() (make them pass-throughs) for coverage
** and mutation testing
*/
/*
** The ALWAYS and NEVER macros surround boolean expressions which
** are intended to always be true or false, respectively. Such
** expressions could be omitted from the code completely. But they
** are included in a few cases in order to enhance the resilience
** of SQLite to unexpected behavior - to make the code "self-healing"
** or "ductile" rather than being "brittle" and crashing at the first
** hint of unplanned behavior.
**
** In other words, ALWAYS and NEVER are added for defensive code.
**
** When doing coverage testing ALWAYS and NEVER are hard-coded to
** be true and false so that the unreachable code they specify will
** not be counted as untested code.
*/
/*
** Some conditionals are optimizations only. In other words, if the
** conditionals are replaced with a constant 1 (true) or 0 (false) then
** the correct answer is still obtained, though perhaps not as quickly.
**
** The following macros mark these optimizations conditionals.
*/
/*
** Some malloc failures are only possible if SQLITE_TEST_REALLOC_STRESS is
** defined. We need to defend against those failures when testing with
** SQLITE_TEST_REALLOC_STRESS, but we don't want the unreachable branches
** during a normal build. The following macro can be used to disable tests
** that are always false except when SQLITE_TEST_REALLOC_STRESS is set.
*/
/*
** Declarations used for tracing the operating system interfaces.
*/
/*
** Is the sqlite3ErrName() function needed in the build? Currently,
** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when
** OSTRACE is enabled), and by several "test*.c" files (which are
** compiled using SQLITE_TEST).
*/
/*
** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN
*/
/*
** SQLITE_OMIT_VIRTUALTABLE implies SQLITE_OMIT_ALTERTABLE
*/
/*
** Return true (non-zero) if the input is an integer that is too large
** to fit in 32-bits. This macro is used inside of various testcase()
** macros to verify that we have tested SQLite for large-file support.
*/
/*
** The macro unlikely() is a hint that surrounds a boolean
** expression that is usually false. Macro likely() surrounds
** a boolean expression that is usually true. These hints could,
** in theory, be used by the compiler to generate better code, but
** currently they are just comments for human readers.
*/
/************** Include hash.h in the middle of sqliteInt.h ******************/
/************** Begin file hash.h ********************************************/
/*
** 2001 September 22
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This is the header file for the generic hash-table implementation
** used in SQLite.
*/
// C documentation
//
// /* Forward declarations of structures. */
type THash = struct {
Fhtsize uint32
Fcount uint32
Ffirst uintptr
Fht uintptr
}
type Hash = THash
type THashElem = struct {
Fnext uintptr
Fprev uintptr
Fdata uintptr
FpKey uintptr
}
type HashElem = THashElem
/* A complete hash table is an instance of the following structure.
** The internals of this structure are intended to be opaque -- client
** code should not attempt to access or modify the fields of this structure
** directly. Change this structure only by using the routines below.
** However, some of the "procedures" and "functions" for modifying and
** accessing this structure are really macros, so we can't really make
** this structure opaque.
**
** All elements of the hash table are on a single doubly-linked list.
** Hash.first points to the head of this list.
**
** There are Hash.htsize buckets. Each bucket points to a spot in
** the global doubly-linked list. The contents of the bucket are the
** element pointed to plus the next _ht.count-1 elements in the list.
**
** Hash.htsize and Hash.ht may be zero. In that case lookup is done
** by a linear search of the global list. For small tables, the
** Hash.ht table is never allocated because if there are few elements
** in the table, it is faster to do a linear search than to manage
** the hash table.
*/
type THash1 = struct {
Fhtsize uint32
Fcount uint32
Ffirst uintptr
Fht uintptr
}
type Hash1 = THash1
/* Each element in the hash table is an instance of the following
** structure. All elements are stored on a single doubly-linked list.
**
** Again, this structure is intended to be opaque, but it can't really
** be opaque because it is used by macros.
*/
type THashElem1 = struct {
Fnext uintptr
Fprev uintptr
Fdata uintptr
FpKey uintptr
}
type HashElem1 = THashElem1
type Tfpos_t = int64
type fpos_t = Tfpos_t
type Tsize_t = uint64
type size_t = Tsize_t
type Trsize_t = uint64
type rsize_t = Trsize_t
type Toff_t = int64
type off_t = Toff_t
type Tssize_t = int64
type ssize_t = Tssize_t
type Toff64_t = int64
type off64_t = Toff64_t
type t__sbuf = struct {
F_base uintptr
F_size int32
}
type t__sFILE = struct {
F_p uintptr
F_r int32
F_w int32
F_flags int16
F_file int16
F_bf t__sbuf
F_lbfsize int32
F_cookie uintptr
F_close uintptr
F_read uintptr
F_seek uintptr
F_write uintptr
F_ub t__sbuf
F_up uintptr
F_ur int32
F_ubuf [3]uint8
F_nbuf [1]uint8
F_lb t__sbuf
F_blksize int32
F_offset Tfpos_t
F_fl_mutex uintptr
F_fl_owner uintptr
F_fl_count int32
F_orientation int32
F_mbstate t__mbstate_t
F_flags2 int32
}
type TFILE = struct {
F_p uintptr
F_r int32
F_w int32
F_flags int16
F_file int16
F_bf t__sbuf
F_lbfsize int32
F_cookie uintptr
F_close uintptr
F_read uintptr
F_seek uintptr
F_write uintptr
F_ub t__sbuf
F_up uintptr
F_ur int32
F_ubuf [3]uint8
F_nbuf [1]uint8
F_lb t__sbuf
F_blksize int32
F_offset Tfpos_t
F_fl_mutex uintptr
F_fl_owner uintptr
F_fl_count int32
F_orientation int32
F_mbstate t__mbstate_t
F_flags2 int32
}
type FILE = TFILE
type Tcookie_io_functions_t = struct {
Fread uintptr
Fwrite uintptr
Fseek uintptr
Fclose1 uintptr
}
type cookie_io_functions_t = Tcookie_io_functions_t
type Trune_t = int32
type rune_t = Trune_t
type Twchar_t = int32
type wchar_t = Twchar_t
type Tdiv_t = struct {
Fquot int32
Frem int32
}
type div_t = Tdiv_t
type Tldiv_t = struct {
Fquot int64
Frem int64
}
type ldiv_t = Tldiv_t
type Tlldiv_t = struct {
Fquot int64
Frem int64
}
type lldiv_t = Tlldiv_t
type Terrno_t = int32
type errno_t = Terrno_t
type Tconstraint_handler_t = uintptr
type constraint_handler_t = Tconstraint_handler_t
type Tlocale_t = uintptr
type locale_t = Tlocale_t
type Tmode_t = uint16
type mode_t = Tmode_t
type Tptrdiff_t = int64
type ptrdiff_t = Tptrdiff_t
type Tmax_align_t = struct {
F__max_align1 int64
F__max_align2 float64
}
type max_align_t = Tmax_align_t
/* ISO/IEC 9899:2011 K.3.3.2 */
/*
** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY.
** This allows better measurements of where memcpy() is used when running
** cachegrind. But this macro version of memcpy() is very slow so it
** should not be used in production. This is a performance measurement
** hack only.
*/
/*
** If compiling for a processor that lacks floating point support,
** substitute integer for floating-point
*/
/*
** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0
** afterward. Having this macro allows us to cause the C compiler
** to omit code used by TEMP tables without messy #ifndef statements.
*/
/*
** The "file format" number is an integer that is incremented whenever
** the VDBE-level file format changes. The following macros define the
** the default file format for new databases and the maximum file format
** that the library can read.
*/
/*
** Determine whether triggers are recursive by default. This can be
** changed at run-time using a pragma.
*/
/*
** Provide a default value for SQLITE_TEMP_STORE in case it is not specified
** on the command-line
*/
/*
** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if
** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it
** to zero.
*/
/*
** The default initial allocation for the pagecache when using separate
** pagecaches for each database connection. A positive number is the
** number of pages. A negative number N translations means that a buffer
** of -1024*N bytes is allocated and used for as many pages as it will hold.
**
** The default value of "20" was chosen to minimize the run-time of the
** speedtest1 test program with options: --shrink-memory --reprepare
*/
/*
** Default value for the SQLITE_CONFIG_SORTERREF_SIZE option.
*/
/*
** The compile-time options SQLITE_MMAP_READWRITE and
** SQLITE_ENABLE_BATCH_ATOMIC_WRITE are not compatible with one another.
** You must choose one or the other (or neither) but not both.
*/
/*
** GCC does not define the offsetof() macro so we'll have to do it
** ourselves.
*/
/*
** Macros to compute minimum and maximum of two numbers.
*/
/*
** Swap two objects of type TYPE.
*/
/*
** Check to see if this machine uses EBCDIC. (Yes, believe it or
** not, there are still machines out there that use EBCDIC.)
*/
// C documentation
//
// /*
// ** Integers of known sizes. These typedefs might change for architectures
// ** where the sizes very. Preprocessor macros are available so that the
// ** types can be conveniently redefined at compile-type. Like this:
// **
// ** cc '-DUINTPTR_TYPE=long long int' ...
// */
type Ti64 = int64
type i64 = Ti64
/* 8-byte signed integer */
type Tu64 = uint64
type u64 = Tu64
/* 8-byte unsigned integer */
type Tu32 = uint32
type u32 = Tu32
/* 4-byte unsigned integer */
type Tu16 = uint16
type u16 = Tu16
/* 2-byte unsigned integer */
type Ti16 = int16
type i16 = Ti16
/* 2-byte signed integer */
type Tu8 = uint8
type u8 = Tu8
/* 1-byte unsigned integer */
type Ti8 = int8
type i8 = Ti8
/* 1-byte signed integer */
/*
** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value
** that can be stored in a u32 without loss of data. The value
** is 0x00000000ffffffff. But because of quirks of some compilers, we
** have to specify the value in the less intuitive manner shown:
*/
// C documentation
//
// /*
// ** The datatype used to store estimates of the number of rows in a
// ** table or index.
// */
type TtRowcnt = uint64
type tRowcnt = TtRowcnt
// C documentation
//
// /*
// ** Estimated quantities used for query planning are stored as 16-bit
// ** logarithms. For quantity X, the value stored is 10*log2(X). This
// ** gives a possible range of values of approximately 1.0e986 to 1e-986.
// ** But the allowed values are "grainy". Not every value is representable.
// ** For example, quantities 16 and 17 are both represented by a LogEst
// ** of 40. However, since LogEst quantities are suppose to be estimates,
// ** not exact values, this imprecision is not a problem.
// **
// ** "LogEst" is short for "Logarithmic Estimate".
// **
// ** Examples:
// ** 1 -> 0 20 -> 43 10000 -> 132
// ** 2 -> 10 25 -> 46 25000 -> 146
// ** 3 -> 16 100 -> 66 1000000 -> 199
// ** 4 -> 20 1000 -> 99 1048576 -> 200
// ** 10 -> 33 1024 -> 100 4294967296 -> 320
// **
// ** The LogEst can be negative to indicate fractional values.
// ** Examples:
// **
// ** 0.5 -> -10 0.1 -> -33 0.0625 -> -40
// */
type TLogEst = int16
type LogEst = TLogEst
/*
** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer
*/
// C documentation
//
// /* The uptr type is an unsigned integer large enough to hold a pointer
// */
type Tuptr = uint64
type uptr = Tuptr
/*
** Bits for the sqlite3WhereTrace mask:
**
** (---any--) Top-level block structure
** 0x-------F High-level debug messages
** 0x----FFF- More detail
** 0xFFFF---- Low-level debug messages
**
** 0x00000001 Code generation
** 0x00000002 Solver
** 0x00000004 Solver costs
** 0x00000008 WhereLoop inserts
**
** 0x00000010 Display sqlite3_index_info xBestIndex calls
** 0x00000020 Range an equality scan metrics
** 0x00000040 IN operator decisions
** 0x00000080 WhereLoop cost adjustements
** 0x00000100
** 0x00000200 Covering index decisions
** 0x00000400 OR optimization
** 0x00000800 Index scanner
** 0x00001000 More details associated with code generation
** 0x00002000
** 0x00004000 Show all WHERE terms at key points
** 0x00008000 Show the full SELECT statement at key places
**
** 0x00010000 Show more detail when printing WHERE terms
** 0x00020000 Show WHERE terms returned from whereScanNext()
*/
// C documentation
//
// /*
// ** An instance of the following structure is used to store the busy-handler
// ** callback for a given sqlite handle.
// **
// ** The sqlite.busyHandler member of the sqlite struct contains the busy
// ** callback for the database handle. Each pager opened via the sqlite
// ** handle is passed a pointer to sqlite.busyHandler. The busy-handler
// ** callback is currently invoked only from within pager.c.
// */
type TBusyHandler = struct {
FxBusyHandler uintptr
FpBusyArg uintptr
FnBusy int32
}
type BusyHandler = TBusyHandler
type TBusyHandler1 = struct {
FxBusyHandler uintptr
FpBusyArg uintptr
FnBusy int32
}
type BusyHandler1 = TBusyHandler1
/*
** Name of table that holds the database schema.
**
** The PREFERRED names are used wherever possible. But LEGACY is also
** used for backwards compatibility.
**
** 1. Queries can use either the PREFERRED or the LEGACY names
** 2. The sqlite3_set_authorizer() callback uses the LEGACY name
** 3. The PRAGMA table_list statement uses the PREFERRED name
**
** The LEGACY names are stored in the internal symbol hash table
** in support of (2). Names are translated using sqlite3PreferredTableName()
** for (3). The sqlite3FindTable() function takes care of translating
** names for (1).
**
** Note that "sqlite_temp_schema" can also be called "temp.sqlite_schema".
*/
/*
** The root-page of the schema table.
*/
/*
** The name of the schema table. The name is different for TEMP.
*/
/*
** A convenience macro that returns the number of elements in
** an array.
*/
/*
** Determine if the argument is a power of two
*/
/*
** The following value as a destructor means to use sqlite3DbFree().
** The sqlite3DbFree() routine requires two parameters instead of the
** one parameter that destructors normally want. So we have to introduce
** this magic value that the code knows to handle differently. Any
** pointer will work here as long as it is distinct from SQLITE_STATIC
** and SQLITE_TRANSIENT.
*/
/*
** When SQLITE_OMIT_WSD is defined, it means that the target platform does
** not support Writable Static Data (WSD) such as global and static variables.
** All variables must either be on the stack or dynamically allocated from
** the heap. When WSD is unsupported, the variable declarations scattered
** throughout the SQLite code must become constants instead. The SQLITE_WSD
** macro is used for this purpose. And instead of referencing the variable
** directly, we use its constant as a key to lookup the run-time allocated
** buffer that holds real variable. The constant is also the initializer
** for the run-time allocated buffer.
**
** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL
** macros become no-ops and have zero performance impact.
*/
/*
** The following macros are used to suppress compiler warnings and to
** make it clear to human readers when a function parameter is deliberately
** left unused within the body of a function. This usually happens when
** a function is called via a function pointer. For example the
** implementation of an SQL aggregate step callback may not use the
** parameter indicating the number of arguments passed to the aggregate,
** if it knows that this is enforced elsewhere.
**
** When a function parameter is not used at all within the body of a function,
** it is generally named "NotUsed" or "NotUsed2" to make things even clearer.
** However, these macros may also be used to suppress warnings related to
** parameters that may or may not be used depending on compilation options.
** For example those parameters only used in assert() statements. In these
** cases the parameters are named as per the usual conventions.
*/
// C documentation
//
// /*
// ** Forward references to structures
// */
type TAggInfo = struct {
FdirectMode Tu8
FuseSortingIdx Tu8
FnSortingColumn Tu16
FsortingIdx int32
FsortingIdxPTab int32
FiFirstReg int32
FpGroupBy uintptr
FaCol uintptr
FnColumn int32
FnAccumulator int32
FaFunc uintptr
FnFunc int32
FselId Tu32
}
type AggInfo = TAggInfo
type TAuthContext = struct {
FzAuthContext uintptr
FpParse uintptr
}
type AuthContext = TAuthContext
type TAutoincInfo = struct {
FpNext uintptr
FpTab uintptr
FiDb int32
FregCtr int32
}
type AutoincInfo = TAutoincInfo
type TBitvec = struct {
FiSize Tu32
FnSet Tu32
FiDivisor Tu32
Fu struct {
FaHash [0][124]Tu32
FapSub [0][62]uintptr
FaBitmap [496]Tu8
}
}
type Bitvec = TBitvec
type TCollSeq = struct {
FzName uintptr
Fenc Tu8
FpUser uintptr
FxCmp uintptr
FxDel uintptr
}
type CollSeq = TCollSeq
type TColumn = struct {
FzCnName uintptr
F__ccgo8 uint8
Faffinity int8
FszEst Tu8
FhName Tu8
FiDflt Tu16
FcolFlags Tu16
}
type Column = TColumn
type TCte = struct {
FzName uintptr
FpCols uintptr
FpSelect uintptr
FzCteErr uintptr
FpUse uintptr
FeM10d Tu8
}
type Cte = TCte
type TCteUse = struct {
FnUse int32
FaddrM9e int32
FregRtn int32
FiCur int32
FnRowEst TLogEst
FeM10d Tu8
}
type CteUse = TCteUse
type TDb = struct {
FzDbSName uintptr
FpBt uintptr
Fsafety_level Tu8
FbSyncSet Tu8
FpSchema uintptr
}
type Db = TDb
type TDbClientData = struct {
FpNext uintptr
FpData uintptr
FxDestructor uintptr
FzName [1]int8
}
type DbClientData = TDbClientData
type TDbFixer = struct {
FpParse uintptr
Fw TWalker
FpSchema uintptr
FbTemp Tu8
FzDb uintptr
FzType uintptr
FpName uintptr
}
type DbFixer = TDbFixer
type TSchema = struct {
Fschema_cookie int32
FiGeneration int32
FtblHash THash
FidxHash THash
FtrigHash THash
FfkeyHash THash
FpSeqTab uintptr
Ffile_format Tu8
Fenc Tu8
FschemaFlags Tu16
Fcache_size int32
}
type Schema = TSchema
type TExpr = struct {
Fop Tu8
FaffExpr int8
Fop2 Tu8
Fflags Tu32
Fu struct {
FiValue [0]int32
FzToken uintptr
}
FpLeft uintptr
FpRight uintptr
Fx struct {
FpSelect [0]uintptr
FpList uintptr
}
FnHeight int32
FiTable int32
FiColumn TynVar
FiAgg Ti16
Fw struct {
FiOfst [0]int32
FiJoin int32
}
FpAggInfo uintptr
Fy struct {
FpWin [0]uintptr
Fsub [0]struct {
FiAddr int32
FregReturn int32
}
FpTab uintptr
}
}
type Expr = TExpr
type TExprList = struct {
FnExpr int32
FnAlloc int32
Fa [1]TExprList_item
}
type ExprList = TExprList
type TFKey = struct {
FpFrom uintptr
FpNextFrom uintptr
FzTo uintptr
FpNextTo uintptr
FpPrevTo uintptr
FnCol int32
FisDeferred Tu8
FaAction [2]Tu8
FapTrigger [2]uintptr
FaCol [1]TsColMap
}
type FKey = TFKey
type TFpDecode = struct {
Fsign int8
FisSpecial int8
Fn int32
FiDP int32
Fz uintptr
FzBuf [24]int8
}
type FpDecode = TFpDecode
type TFuncDestructor = struct {
FnRef int32
FxDestroy uintptr
FpUserData uintptr
}
type FuncDestructor = TFuncDestructor
type TFuncDef = struct {
FnArg Ti8
FfuncFlags Tu32
FpUserData uintptr
FpNext uintptr
FxSFunc uintptr
FxFinalize uintptr
FxValue uintptr
FxInverse uintptr
FzName uintptr
Fu struct {
FpDestructor [0]uintptr
FpHash uintptr
}
}
type FuncDef = TFuncDef
type TFuncDefHash = struct {
Fa [23]uintptr
}
type FuncDefHash = TFuncDefHash
type TIdList = struct {
FnId int32
FeU4 Tu8
Fa [1]TIdList_item
}
type IdList = TIdList
type TIndex = struct {
FzName uintptr
FaiColumn uintptr
FaiRowLogEst uintptr
FpTable uintptr
FzColAff uintptr
FpNext uintptr
FpSchema uintptr
FaSortOrder uintptr
FazColl uintptr
FpPartIdxWhere uintptr
FaColExpr uintptr
Ftnum TPgno
FszIdxRow TLogEst
FnKeyCol Tu16
FnColumn Tu16
FonError Tu8
F__ccgo100 uint16
FnSample int32
FmxSample int32
FnSampleCol int32
FaAvgEq uintptr
FaSample uintptr
FaiRowEst uintptr
FnRowEst0 TtRowcnt
FcolNotIdxed TBitmask
}
type Index = TIndex
type TIndexedExpr = struct {
FpExpr uintptr
FiDataCur int32
FiIdxCur int32
FiIdxCol int32
FbMaybeNullRow Tu8
Faff Tu8
FpIENext uintptr
}
type IndexedExpr = TIndexedExpr
type TIndexSample = struct {
Fp uintptr
Fn int32
FanEq uintptr
FanLt uintptr
FanDLt uintptr
}
type IndexSample = TIndexSample
type TKeyInfo = struct {
FnRef Tu32
Fenc Tu8
FnKeyField Tu16
FnAllField Tu16
Fdb uintptr
FaSortFlags uintptr
FaColl [1]uintptr
}
type KeyInfo = TKeyInfo
type TLookaside = struct {
FbDisable Tu32
Fsz Tu16
FszTrue Tu16
FbMalloced Tu8
FnSlot Tu32
FanStat [3]Tu32
FpInit uintptr
FpFree uintptr
FpSmallInit uintptr
FpSmallFree uintptr
FpMiddle uintptr
FpStart uintptr
FpEnd uintptr
FpTrueEnd uintptr
}
type Lookaside = TLookaside
type TLookasideSlot = struct {
FpNext uintptr
}
type LookasideSlot = TLookasideSlot
type TModule = struct {
FpModule uintptr
FzName uintptr
FnRefModule int32
FpAux uintptr
FxDestroy uintptr
FpEpoTab uintptr
}
type Module = TModule
type TNameContext = struct {
FpParse uintptr
FpSrcList uintptr
FuNC struct {
FpAggInfo [0]uintptr
FpUpsert [0]uintptr
FiBaseReg [0]int32
FpEList uintptr
}
FpNext uintptr
FnRef int32
FnNcErr int32
FncFlags int32
FnNestedSelect Tu32
FpWinSelect uintptr
}
type NameContext = TNameContext
type TOnOrUsing = struct {
FpOn uintptr
FpUsing uintptr
}
type OnOrUsing = TOnOrUsing
type TParse = struct {
Fdb uintptr
FzErrMsg uintptr
FpVdbe uintptr
Frc int32
FcolNamesSet Tu8
FcheckSchema Tu8
Fnested Tu8
FnTempReg Tu8
FisMultiWrite Tu8
FmayAbort Tu8
FhasCompound Tu8
FokConstFactor Tu8
FdisableLookaside Tu8
FprepFlags Tu8
FwithinRJSubrtn Tu8
FnRangeReg int32
FiRangeReg int32
FnErr int32
FnTab int32
FnMem int32
FszOpAlloc int32
FiSelfTab int32
FnLabel int32
FnLabelAlloc int32
FaLabel uintptr
FpConstExpr uintptr
FpIdxEpr uintptr
FpIdxPartExpr uintptr
FconstraintName TToken
FwriteMask TyDbMask
FcookieMask TyDbMask
FregRowid int32
FregRoot int32
FnMaxArg int32
FnSelect int32
FnProgressSteps Tu32
FnTableLock int32
FaTableLock uintptr
FpAinc uintptr
FpToplevel uintptr
FpTriggerTab uintptr
FpTriggerPrg uintptr
FpCleanup uintptr
Fu1 struct {
FpReturning [0]uintptr
FaddrCrTab int32
F__ccgo_pad2 [4]byte
}
Foldmask Tu32
Fnewmask Tu32
FnQueryLoop TLogEst
FeTriggerOp Tu8
FbReturning Tu8
FeOrconf Tu8
FdisableTriggers Tu8
FaTempReg [8]int32
FpOuterParse uintptr
FsNameToken TToken
FsLastToken TToken
FnVar TynVar
FiPkSortOrder Tu8
Fexplain Tu8
FeParseMode Tu8
FnVtabLock int32
FnHeight int32
FaddrExplain int32
FpVList uintptr
FpReprepare uintptr
FzTail uintptr
FpNewTable uintptr
FpNewIndex uintptr
FpNewTrigger uintptr
FzAuthContext uintptr
FsArg TToken
FapVtabLock uintptr
FpWith uintptr
FpRename uintptr
}
type Parse = TParse
type TParseCleanup = struct {
FpNext uintptr
FpPtr uintptr
FxCleanup uintptr
}
type ParseCleanup = TParseCleanup
type TPreUpdate = struct {
Fv uintptr
FpCsr uintptr
Fop int32
FaRecord uintptr
Fkeyinfo TKeyInfo
FpUnpacked uintptr
FpNewUnpacked uintptr
FiNewReg int32
FiBlobWrite int32
FiKey1 Ti64
FiKey2 Ti64
FaNew uintptr
FpTab uintptr
FpPk uintptr
}
type PreUpdate = TPreUpdate
type TPrintfArguments = struct {
FnArg int32
FnUsed int32
FapArg uintptr
}
type PrintfArguments = TPrintfArguments
type TRCStr = struct {
FnRCRef Tu64
}
type RCStr = TRCStr
type TRenameToken = struct {
Fp uintptr
Ft TToken
FpNext uintptr
}
type RenameToken = TRenameToken
type TReturning = struct {
FpParse uintptr
FpReturnEL uintptr
FretTrig TTrigger
FretTStep TTriggerStep
FiRetCur int32
FnRetCol int32
FiRetReg int32
FzName [40]int8
}
type Returning = TReturning
type TRowSet = struct {
FpChunk uintptr
Fdb uintptr
FpEntry uintptr
FpLast uintptr
FpFresh uintptr
FpForest uintptr
FnFresh Tu16
FrsFlags Tu16
FiBatch int32
}
type RowSet = TRowSet
type TSavepoint = struct {
FzName uintptr
FnDeferredCons Ti64
FnDeferredImmCons Ti64
FpNext uintptr
}
type Savepoint = TSavepoint
type TSelect = struct {
Fop Tu8
FnSelectRow TLogEst
FselFlags Tu32
FiLimit int32
FiOffset int32
FselId Tu32
FaddrOpenEphm [2]int32
FpEList uintptr
FpSrc uintptr
FpWhere uintptr
FpGroupBy uintptr
FpHaving uintptr
FpOrderBy uintptr
FpPrior uintptr
FpNext uintptr
FpLimit uintptr
FpWith uintptr
FpWin uintptr
FpWinDefn uintptr
}
type Select = TSelect
type TSQLiteThread = struct {
FxTask uintptr
FpIn uintptr
FpResult uintptr
}
type SQLiteThread = TSQLiteThread
type TSelectDest = struct {
FeDest Tu8
FiSDParm int32
FiSDParm2 int32
FiSdst int32
FnSdst int32
FzAffSdst uintptr
FpOrderBy uintptr
}
type SelectDest = TSelectDest
type TSrcItem = struct {
FpSchema uintptr
FzDatabase uintptr
FzName uintptr
FzAlias uintptr
FpTab uintptr
FpSelect uintptr
FaddrFillSub int32
FregReturn int32
FregResult int32
Ffg struct {
F__ccgo_align [0]uint32
Fjointype Tu8
F__ccgo_align1 [2]byte
F__ccgo4 uint16
}
FiCursor int32
Fu3 struct {
FpUsing [0]uintptr
FpOn uintptr
}
FcolUsed TBitmask
Fu1 struct {
FpFuncArg [0]uintptr
FzIndexedBy uintptr
}
Fu2 struct {
FpCteUse [0]uintptr
FpIBIndex uintptr
}
}
type SrcItem = TSrcItem
type TSrcList = struct {
FnSrc int32
FnAlloc Tu32
Fa [1]TSrcItem
}
type SrcList = TSrcList
type TStrAccum = struct {
Fdb uintptr
FzText uintptr
FnAlloc Tu32
FmxAlloc Tu32
FnChar Tu32
FaccError Tu8
FprintfFlags Tu8
}
type StrAccum = TStrAccum
type Tsqlite3_str1 = TStrAccum
type sqlite3_str1 = Tsqlite3_str1
/* Internal alias for sqlite3_str */
type TTable = struct {
FzName uintptr
FaCol uintptr
FpIndex uintptr
FzColAff uintptr
FpCheck uintptr
Ftnum TPgno
FnTabRef Tu32
FtabFlags Tu32
FiPKey Ti16
FnCol Ti16
FnNVCol Ti16
FnRowLogEst TLogEst
FszTabRow TLogEst
FkeyConf Tu8
FeTabType Tu8
Fu struct {
Fview [0]struct {
FpSelect uintptr
}
Fvtab [0]struct {
FnArg int32
FazArg uintptr
Fp uintptr
}
Ftab struct {
FaddColOffset int32
FpFKey uintptr
FpDfltList uintptr
}
}
FpTrigger uintptr
FpSchema uintptr
}
type Table = TTable
type TTableLock = struct {
FiDb int32
FiTab TPgno
FisWriteLock Tu8
FzLockName uintptr
}
type TableLock = TTableLock
type TToken = struct {
Fz uintptr
Fn uint32
}
type Token = TToken
type TTrigger = struct {
FzName uintptr
Ftable uintptr
Fop Tu8
Ftr_tm Tu8
FbReturning Tu8
FpWhen uintptr
FpColumns uintptr
FpSchema uintptr
FpTabSchema uintptr
Fstep_list uintptr
FpNext uintptr
}
type Trigger = TTrigger
type TTriggerPrg = struct {
FpTrigger uintptr
FpNext uintptr
FpProgram uintptr
Forconf int32
FaColmask [2]Tu32
}
type TriggerPrg = TTriggerPrg
type TTriggerStep = struct {
Fop Tu8
Forconf Tu8
FpTrig uintptr
FpSelect uintptr
FzTarget uintptr
FpFrom uintptr
FpWhere uintptr
FpExprList uintptr
FpIdList uintptr
FpUpsert uintptr
FzSpan uintptr
FpNext uintptr
FpLast uintptr
}
type TriggerStep = TTriggerStep
type TUnpackedRecord = struct {
FpKeyInfo uintptr
FaMem uintptr
Fu struct {
Fi [0]Ti64
Fz uintptr
}
Fn int32
FnField Tu16
Fdefault_rc Ti8
FerrCode Tu8
Fr1 Ti8
Fr2 Ti8
FeqSeen Tu8
}
type UnpackedRecord = TUnpackedRecord
type TUpsert = struct {
FpUpsertTarget uintptr
FpUpsertTargetWhere uintptr
FpUpsertSet uintptr
FpUpsertWhere uintptr
FpNextUpsert uintptr
FisDoUpdate Tu8
FisDup Tu8
FpToFree uintptr
FpUpsertIdx uintptr
FpUpsertSrc uintptr
FregData int32
FiDataCur int32
FiIdxCur int32
}
type Upsert = TUpsert
type TVTable = struct {
Fdb uintptr
FpMod uintptr
FpVtab uintptr
FnRef int32
FbConstraint Tu8
FbAllSchemas Tu8
FeVtabRisk Tu8
FiSavepoint int32
FpNext uintptr
}
type VTable = TVTable
type TVtabCtx = struct {
FpVTable uintptr
FpTab uintptr
FpPrior uintptr
FbDeclared int32
}
type VtabCtx = TVtabCtx
type TWalker = struct {
FpParse uintptr
FxExprCallback uintptr
FxSelectCallback uintptr
FxSelectCallback2 uintptr
FwalkerDepth int32
FeCode Tu16
FmWFlags Tu16
Fu struct {
Fn [0]int32
FiCur [0]int32
FpSrcList [0]uintptr
FpCCurHint [0]uintptr
FpRefSrcList [0]uintptr
FaiCol [0]uintptr
FpIdxCover [0]uintptr
FpGroupBy [0]uintptr
FpSelect [0]uintptr
FpRewrite [0]uintptr
FpConst [0]uintptr
FpRename [0]uintptr
FpTab [0]uintptr
FpCovIdxCk [0]uintptr
FpSrcItem [0]uintptr
FpFix [0]uintptr
FaMem [0]uintptr
FpNC uintptr
}
}
type Walker = TWalker
type TWhereInfo = struct {
FpParse uintptr
FpTabList uintptr
FpOrderBy uintptr
FpResultSet uintptr
FpSelect uintptr
FaiCurOnePass [2]int32
FiContinue int32
FiBreak int32
FsavedNQueryLoop int32
FwctrlFlags Tu16
FiLimit TLogEst
FnLevel Tu8
FnOBSat Ti8
FeOnePass Tu8
FeDistinct Tu8
F__ccgo68 uint8
FnRowOut TLogEst
FiTop int32
FiEndWhere int32
FpLoops uintptr
FpMemToFree uintptr
FrevMask TBitmask
FsWC TWhereClause
FsMaskSet TWhereMaskSet
Fa [1]TWhereLevel
}
type WhereInfo = TWhereInfo
type TWindow = struct {
FzName uintptr
FzBase uintptr
FpPartition uintptr
FpOrderBy uintptr
FeFrmType Tu8
FeStart Tu8
FeEnd Tu8
FbImplicitFrame Tu8
FeExclude Tu8
FpStart uintptr
FpEnd uintptr
FppThis uintptr
FpNextWin uintptr
FpFilter uintptr
FpWFunc uintptr
FiEphCsr int32
FregAccum int32
FregResult int32
FcsrApp int32
FregApp int32
FregPart int32
FpOwner uintptr
FnBufferCol int32
FiArgCol int32
FregOne int32
FregStartRowid int32
FregEndRowid int32
FbExprArgs Tu8
}
type Window = TWindow
type TWith = struct {
FnCte int32
FbView int32
FpOuter uintptr
Fa [1]TCte
}
type With = TWith
// C documentation
//
// /*
// ** The bitmask datatype defined below is used for various optimizations.
// **
// ** Changing this from a 64-bit to a 32-bit type limits the number of
// ** tables in a join to 32 instead of 64. But it also reduces the size
// ** of the library by 738 bytes on ix86.
// */
type TBitmask = uint64
type Bitmask = TBitmask
/*
** The number of bits in a Bitmask. "BMS" means "BitMask Size".
*/
/*
** A bit in a Bitmask
*/
// C documentation
//
// /* A VList object records a mapping between parameters/variables/wildcards
// ** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer
// ** variable number associated with that parameter. See the format description
// ** on the sqlite3VListAdd() routine for more information. A VList is really
// ** just an array of integers.
// */
type TVList = int32
type VList = TVList
/************** End of os.h **************************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include pager.h in the middle of sqliteInt.h *****************/
/************** Begin file pager.h *******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the interface that the sqlite page cache
** subsystem. The page cache subsystem reads and writes a file a page
** at a time and provides a journal for rollback.
*/
/*
** Default maximum size for persistent journal files. A negative
** value means no limit. This value may be overridden using the
** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit".
*/
// C documentation
//
// /*
// ** The type used to represent a page number. The first page in a file
// ** is called page 1. 0 is used to represent "not a page".
// */
type TPgno = uint32
type Pgno = TPgno
// C documentation
//
// /*
// ** Each open file is managed by a separate instance of the "Pager" structure.
// */
type TPager = struct {
FpVfs uintptr
FexclusiveMode Tu8
FjournalMode Tu8
FuseJournal Tu8
FnoSync Tu8
FfullSync Tu8
FextraSync Tu8
FsyncFlags Tu8
FwalSyncFlags Tu8
FtempFile Tu8
FnoLock Tu8
FreadOnly Tu8
FmemDb Tu8
FmemVfs Tu8
FeState Tu8
FeLock Tu8
FchangeCountDone Tu8
FsetSuper Tu8
FdoNotSpill Tu8
FsubjInMemory Tu8
FbUseFetch Tu8
FhasHeldSharedLock Tu8
FdbSize TPgno
FdbOrigSize TPgno
FdbFileSize TPgno
FdbHintSize TPgno
FerrCode int32
FnRec int32
FcksumInit Tu32
FnSubRec Tu32
FpInJournal uintptr
Ffd uintptr
Fjfd uintptr
Fsjfd uintptr
FjournalOff Ti64
FjournalHdr Ti64
FpBackup uintptr
FaSavepoint uintptr
FnSavepoint int32
FiDataVersion Tu32
FdbFileVers [16]int8
FnMmapOut int32
FszMmap Tsqlite3_int64
FpMmapFreelist uintptr
FnExtra Tu16
FnReserve Ti16
FvfsFlags Tu32
FsectorSize Tu32
FmxPgno TPgno
FlckPgno TPgno
FpageSize Ti64
FjournalSizeLimit Ti64
FzFilename uintptr
FzJournal uintptr
FxBusyHandler uintptr
FpBusyHandlerArg uintptr
FaStat [4]Tu32
FxReiniter uintptr
FxGet uintptr
FpTmpSpace uintptr
FpPCache uintptr
FpWal uintptr
FzWal uintptr
}
type Pager = TPager
// C documentation
//
// /*
// ** Handle type for pages.
// */
type TDbPage = struct {
FpPage uintptr
FpData uintptr
FpExtra uintptr
FpCache uintptr
FpDirty uintptr
FpPager uintptr
Fpgno TPgno
Fflags Tu16
FnRef Ti64
FpDirtyNext uintptr
FpDirtyPrev uintptr
}
type DbPage = TDbPage
// C documentation
//
// /*
// ** Handle type for pages.
// */
type TPgHdr2 = TDbPage
type PgHdr2 = TPgHdr2
/* Functions to support testing and debugging. */
/************** End of pager.h ***********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include btree.h in the middle of sqliteInt.h *****************/
/************** Begin file btree.h *******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the interface that the sqlite B-Tree file
** subsystem. See comments in the source code for a detailed description
** of what each interface routine does.
*/
/* TODO: This definition is just included so other modules compile. It
** needs to be revisited.
*/
/*
** If defined as non-zero, auto-vacuum is enabled by default. Otherwise
** it must be turned on for each database using "PRAGMA auto_vacuum = 1".
*/
// C documentation
//
// /*
// ** Forward declarations of structure
// */
type TBtree = struct {
Fdb uintptr
FpBt uintptr
FinTrans Tu8
Fsharable Tu8
Flocked Tu8
FhasIncrblobCur Tu8
FwantToLock int32
FnBackup int32
FiBDataVersion Tu32
FpNext uintptr
FpPrev uintptr
Flock TBtLock
}
type Btree = TBtree
type TBtCursor = struct {
FeState Tu8
FcurFlags Tu8
FcurPagerFlags Tu8
Fhints Tu8
FskipNext int32
FpBtree uintptr
FaOverflow uintptr
FpKey uintptr
FpBt uintptr
FpNext uintptr
Finfo TCellInfo
FnKey Ti64
FpgnoRoot TPgno
FiPage Ti8
FcurIntKey Tu8
Fix Tu16
FaiIdx [19]Tu16
FpKeyInfo uintptr
FpPage uintptr
FapPage [19]uintptr
}
type BtCursor = TBtCursor
type TBtShared = struct {
FpPager uintptr
Fdb uintptr
FpCursor uintptr
FpPage1 uintptr
FopenFlags Tu8
FautoVacuum Tu8
FincrVacuum Tu8
FbDoTruncate Tu8
FinTransaction Tu8
Fmax1bytePayload Tu8
FnReserveWanted Tu8
FbtsFlags Tu16
FmaxLocal Tu16
FminLocal Tu16
FmaxLeaf Tu16
FminLeaf Tu16
FpageSize Tu32
FusableSize Tu32
FnTransaction int32
FnPage Tu32
FpSchema uintptr
FxFreeSchema uintptr
Fmutex uintptr
FpHasContent uintptr
FnRef int32
FpNext uintptr
FpLock uintptr
FpWriter uintptr
FpTmpSpace uintptr
FnPreformatSize int32
}
type BtShared = TBtShared
type TBtreePayload = struct {
FpKey uintptr
FnKey Tsqlite3_int64
FpData uintptr
FaMem uintptr
FnMem Tu16
FnData int32
FnZero int32
}
type BtreePayload = TBtreePayload
/* Allowed flags for sqlite3BtreeDelete() and sqlite3BtreeInsert() */
/* An instance of the BtreePayload object describes the content of a single
** entry in either an index or table btree.
**
** Index btrees (used for indexes and also WITHOUT ROWID tables) contain
** an arbitrary key and no data. These btrees have pKey,nKey set to the
** key and the pData,nData,nZero fields are uninitialized. The aMem,nMem
** fields give an array of Mem objects that are a decomposition of the key.
** The nMem field might be zero, indicating that no decomposition is available.
**
** Table btrees (used for rowid tables) contain an integer rowid used as
** the key and passed in the nKey field. The pKey field is zero.
** pData,nData hold the content of the new entry. nZero extra zero bytes
** are appended to the end of the content when constructing the entry.
** The aMem,nMem fields are uninitialized for table btrees.
**
** Field usage summary:
**
** Table BTrees Index Btrees
**
** pKey always NULL encoded key
** nKey the ROWID length of pKey
** pData data not used
** aMem not used decomposed key value
** nMem not used entries in aMem
** nData length of pData not used
** nZero extra zeros after pData not used
**
** This object is used to pass information into sqlite3BtreeInsert(). The
** same information used to be passed as five separate parameters. But placing
** the information into this object helps to keep the interface more
** organized and understandable, and it also helps the resulting code to
** run a little faster by using fewer registers for parameter passing.
*/
type TBtreePayload1 = struct {
FpKey uintptr
FnKey Tsqlite3_int64
FpData uintptr
FaMem uintptr
FnMem Tu16
FnData int32
FnZero int32
}
type BtreePayload1 = TBtreePayload1
/************** End of btree.h ***********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include vdbe.h in the middle of sqliteInt.h ******************/
/************** Begin file vdbe.h ********************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** Header file for the Virtual DataBase Engine (VDBE)
**
** This header defines the interface to the virtual database engine
** or VDBE. The VDBE implements an abstract machine that runs a
** simple program to access and modify the underlying database.
*/
/* #include */
// C documentation
//
// /*
// ** A single VDBE is an opaque structure named "Vdbe". Only routines
// ** in the source file sqliteVdbe.c are allowed to see the insides
// ** of this structure.
// */
type TVdbe = struct {
Fdb uintptr
FppVPrev uintptr
FpVNext uintptr
FpParse uintptr
FnVar TynVar
FnMem int32
FnCursor int32
FcacheCtr Tu32
Fpc int32
Frc int32
FnChange Ti64
FiStatement int32
FiCurrentTime Ti64
FnFkConstraint Ti64
FnStmtDefCons Ti64
FnStmtDefImmCons Ti64
FaMem uintptr
FapArg uintptr
FapCsr uintptr
FaVar uintptr
FaOp uintptr
FnOp int32
FnOpAlloc int32
FaColName uintptr
FpResultRow uintptr
FzErrMsg uintptr
FpVList uintptr
FstartTime Ti64
FnResColumn Tu16
FnResAlloc Tu16
FerrorAction Tu8
FminWriteFileFormat Tu8
FprepFlags Tu8
FeVdbeState Tu8
F__ccgo200 uint16
FbtreeMask TyDbMask
FlockMask TyDbMask
FaCounter [9]Tu32
FzSql uintptr
FpFree uintptr
FpFrame uintptr
FpDelFrame uintptr
FnFrame int32
Fexpmask Tu32
FpProgram uintptr
FpAuxData uintptr
}
type Vdbe = TVdbe
// C documentation
//
// /*
// ** The names of the following types declared in vdbeInt.h are required
// ** for the VdbeOp definition.
// */
type TMem = struct {
Fu TMemValue
Fz uintptr
Fn int32
Fflags Tu16
Fenc Tu8
FeSubtype Tu8
Fdb uintptr
FszMalloc int32
FuTemp Tu32
FzMalloc uintptr
FxDel uintptr
}
type Mem = TMem
// C documentation
//
// /*
// ** The names of the following types declared in vdbeInt.h are required
// ** for the VdbeOp definition.
// */
type Tsqlite3_value1 = TMem
type sqlite3_value1 = Tsqlite3_value1
type TSubProgram = struct {
FaOp uintptr
FnOp int32
FnMem int32
FnCsr int32
FaOnce uintptr
Ftoken uintptr
FpNext uintptr
}
type SubProgram = TSubProgram
/*
** A single instruction of the virtual machine has an opcode
** and as many as three operands. The instruction is recorded
** as an instance of the following structure:
*/
type TVdbeOp1 = struct {
Fopcode Tu8
Fp4type int8
Fp5 Tu16
Fp1 int32
Fp2 int32
Fp3 int32
Fp4 Tp4union
}
type VdbeOp1 = TVdbeOp1
type TVdbeOp = struct {
Fopcode Tu8
Fp4type int8
Fp5 Tu16
Fp1 int32
Fp2 int32
Fp3 int32
Fp4 Tp4union
}
type VdbeOp = TVdbeOp
/*
** A sub-routine used to implement a trigger program.
*/
type TSubProgram1 = struct {
FaOp uintptr
FnOp int32
FnMem int32
FnCsr int32
FaOnce uintptr
Ftoken uintptr
FpNext uintptr
}
type SubProgram1 = TSubProgram1
/*
** A smaller version of VdbeOp used for the VdbeAddOpList() function because
** it takes up less space.
*/
type TVdbeOpList1 = struct {
Fopcode Tu8
Fp1 int8
Fp2 int8
Fp3 int8
}
type VdbeOpList1 = TVdbeOpList1
type TVdbeOpList = struct {
Fopcode Tu8
Fp1 int8
Fp2 int8
Fp3 int8
}
type VdbeOpList = TVdbeOpList
type TRecordCompare = uintptr
type RecordCompare = TRecordCompare
/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on
** each VDBE opcode.
**
** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op
** comments in VDBE programs that show key decision points in the code
** generator.
*/
/*
** The VdbeCoverage macros are used to set a coverage testing point
** for VDBE branch instructions. The coverage testing points are line
** numbers in the sqlite3.c source file. VDBE branch coverage testing
** only works with an amalgamation build. That's ok since a VDBE branch
** coverage build designed for testing the test suite only. No application
** should ever ship with VDBE branch coverage measuring turned on.
**
** VdbeCoverage(v) // Mark the previously coded instruction
** // as a branch
**
** VdbeCoverageIf(v, conditional) // Mark previous if conditional true
**
** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken
**
** VdbeCoverageNeverTaken(v) // Previous branch is never taken
**
** VdbeCoverageNeverNull(v) // Previous three-way branch is only
** // taken on the first two ways. The
** // NULL option is not possible
**
** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested
** // in distinguishing equal and not-equal.
**
** Every VDBE branch operation must be tagged with one of the macros above.
** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch()
** routine in vdbe.c, alerting the developer to the missed tag.
**
** During testing, the test application will invoke
** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback
** routine that is invoked as each bytecode branch is taken. The callback
** contains the sqlite3.c source line number of the VdbeCoverage macro and
** flags to indicate whether or not the branch was taken. The test application
** is responsible for keeping track of this and reporting byte-code branches
** that are never taken.
**
** See the VdbeBranchTaken() macro and vdbeTakeBranch() function in the
** vdbe.c source file for additional information.
*/
/************** End of vdbe.h ************************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include pcache.h in the middle of sqliteInt.h ****************/
/************** Begin file pcache.h ******************************************/
/*
** 2008 August 05
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the interface that the sqlite page cache
** subsystem.
*/
type TPgHdr = struct {
FpPage uintptr
FpData uintptr
FpExtra uintptr
FpCache uintptr
FpDirty uintptr
FpPager uintptr
Fpgno TPgno
Fflags Tu16
FnRef Ti64
FpDirtyNext uintptr
FpDirtyPrev uintptr
}
type PgHdr = TPgHdr
type TPCache = struct {
FpDirty uintptr
FpDirtyTail uintptr
FpSynced uintptr
FnRefSum Ti64
FszCache int32
FszSpill int32
FszPage int32
FszExtra int32
FbPurgeable Tu8
FeCreate Tu8
FxStress uintptr
FpStress uintptr
FpCache uintptr
}
type PCache = TPCache
/************** End of mutex.h ***********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
/* The SQLITE_EXTRA_DURABLE compile-time option used to set the default
** synchronous setting to EXTRA. It is no longer supported.
*/
/*
** Default synchronous levels.
**
** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ
** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
**
** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS
** OFF 1 0
** NORMAL 2 1
** FULL 3 2
** EXTRA 4 3
**
** The "PRAGMA synchronous" statement also uses the zero-based numbers.
** In other words, the zero-based numbers are used for all external interfaces
** and the one-based values are used internally.
*/
/*
** Each database file to be accessed by the system is an instance
** of the following structure. There are normally two of these structures
** in the sqlite.aDb[] array. aDb[0] is the main database file and
** aDb[1] is the database file used to hold temporary tables. Additional
** databases may be attached.
*/
type TDb1 = struct {
FzDbSName uintptr
FpBt uintptr
Fsafety_level Tu8
FbSyncSet Tu8
FpSchema uintptr
}
type Db1 = TDb1
/*
** An instance of the following structure stores a database schema.
**
** Most Schema objects are associated with a Btree. The exception is
** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing.
** In shared cache mode, a single Schema object can be shared by multiple
** Btrees that refer to the same underlying BtShared object.
**
** Schema objects are automatically deallocated when the last Btree that
** references them is destroyed. The TEMP Schema is manually freed by
** sqlite3_close().
*
** A thread must be holding a mutex on the corresponding Btree in order
** to access Schema content. This implies that the thread must also be
** holding a mutex on the sqlite3 connection pointer that owns the Btree.
** For a TEMP Schema, only the connection mutex is required.
*/
type TSchema1 = struct {
Fschema_cookie int32
FiGeneration int32
FtblHash THash
FidxHash THash
FtrigHash THash
FfkeyHash THash
FpSeqTab uintptr
Ffile_format Tu8
Fenc Tu8
FschemaFlags Tu16
Fcache_size int32
}
type Schema1 = TSchema1
/*
** These macros can be used to test, set, or clear bits in the
** Db.pSchema->flags field.
*/
/*
** Allowed values for the DB.pSchema->flags field.
**
** The DB_SchemaLoaded flag is set after the database schema has been
** read into internal hash tables.
**
** DB_UnresetViews means that one or more views have column names that
** have been filled out. If the schema changes, these column names might
** changes and so the view will need to be reset.
*/
/*
** The number of different kinds of things that can be limited
** using the sqlite3_limit() interface.
*/
/*
** Lookaside malloc is a set of fixed-size buffers that can be used
** to satisfy small transient memory allocation requests for objects
** associated with a particular database connection. The use of
** lookaside malloc provides a significant performance enhancement
** (approx 10%) by avoiding numerous malloc/free requests while parsing
** SQL statements.
**
** The Lookaside structure holds configuration information about the
** lookaside malloc subsystem. Each available memory allocation in
** the lookaside subsystem is stored on a linked list of LookasideSlot
** objects.
**
** Lookaside allocations are only allowed for objects that are associated
** with a particular database connection. Hence, schema information cannot
** be stored in lookaside because in shared cache mode the schema information
** is shared by multiple database connections. Therefore, while parsing
** schema information, the Lookaside.bEnabled flag is cleared so that
** lookaside allocations are not used to construct the schema objects.
**
** New lookaside allocations are only allowed if bDisable==0. When
** bDisable is greater than zero, sz is set to zero which effectively
** disables lookaside without adding a new test for the bDisable flag
** in a performance-critical path. sz should be set by to szTrue whenever
** bDisable changes back to zero.
**
** Lookaside buffers are initially held on the pInit list. As they are
** used and freed, they are added back to the pFree list. New allocations
** come off of pFree first, then pInit as a fallback. This dual-list
** allows use to compute a high-water mark - the maximum number of allocations
** outstanding at any point in the past - by subtracting the number of
** allocations on the pInit list from the total number of allocations.
**
** Enhancement on 2019-12-12: Two-size-lookaside
** The default lookaside configuration is 100 slots of 1200 bytes each.
** The larger slot sizes are important for performance, but they waste
** a lot of space, as most lookaside allocations are less than 128 bytes.
** The two-size-lookaside enhancement breaks up the lookaside allocation
** into two pools: One of 128-byte slots and the other of the default size
** (1200-byte) slots. Allocations are filled from the small-pool first,
** failing over to the full-size pool if that does not work. Thus more
** lookaside slots are available while also using less memory.
** This enhancement can be omitted by compiling with
** SQLITE_OMIT_TWOSIZE_LOOKASIDE.
*/
type TLookaside1 = struct {
FbDisable Tu32
Fsz Tu16
FszTrue Tu16
FbMalloced Tu8
FnSlot Tu32
FanStat [3]Tu32
FpInit uintptr
FpFree uintptr
FpSmallInit uintptr
FpSmallFree uintptr
FpMiddle uintptr
FpStart uintptr
FpEnd uintptr
FpTrueEnd uintptr
}
type Lookaside1 = TLookaside1
type TLookasideSlot1 = struct {
FpNext uintptr
}
type LookasideSlot1 = TLookasideSlot1
/* Size of the smaller allocations in two-size lookaside */
/*
** A hash table for built-in function definitions. (Application-defined
** functions use a regular table table from hash.h.)
**
** Hash each FuncDef structure into one of the FuncDefHash.a[] slots.
** Collisions are on the FuncDef.u.pHash chain. Use the SQLITE_FUNC_HASH()
** macro to compute a hash on the function name.
*/
type TFuncDefHash1 = struct {
Fa [23]uintptr
}
type FuncDefHash1 = TFuncDefHash1
// C documentation
//
// /*
// ** typedef for the authorization callback function.
// */
type Tsqlite3_xauth = uintptr
type sqlite3_xauth = Tsqlite3_xauth
/* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing
** in the style of sqlite3_trace()
*/
/*
** Maximum number of sqlite3.aDb[] entries. This is the number of attached
** databases plus 2 for "main" and "temp".
*/
/*
** Each database connection is an instance of the following structure.
*/
type Tsqlite31 = struct {
FpVfs uintptr
FpVdbe uintptr
FpDfltColl uintptr
Fmutex uintptr
FaDb uintptr
FnDb int32
FmDbFlags Tu32
Fflags Tu64
FlastRowid Ti64
FszMmap Ti64
FnSchemaLock Tu32
FopenFlags uint32
FerrCode int32
FerrByteOffset int32
FerrMask int32
FiSysErrno int32
FdbOptFlags Tu32
Fenc Tu8
FautoCommit Tu8
Ftemp_store Tu8
FmallocFailed Tu8
FbBenignMalloc Tu8
FdfltLockMode Tu8
FnextAutovac int8
FsuppressErr Tu8
FvtabOnConflict Tu8
FisTransactionSavepoint Tu8
FmTrace Tu8
FnoSharedCache Tu8
FnSqlExec Tu8
FeOpenState Tu8
FnextPagesize int32
FnChange Ti64
FnTotalChange Ti64
FaLimit [12]int32
FnMaxSorterMmap int32
Finit1 Tsqlite3InitInfo
FnVdbeActive int32
FnVdbeRead int32
FnVdbeWrite int32
FnVdbeExec int32
FnVDestroy int32
FnExtension int32
FaExtension uintptr
Ftrace struct {
FxV2 [0]uintptr
FxLegacy uintptr
}
FpTraceArg uintptr
FxProfile uintptr
FpProfileArg uintptr
FpCommitArg uintptr
FxCommitCallback uintptr
FpRollbackArg uintptr
FxRollbackCallback uintptr
FpUpdateArg uintptr
FxUpdateCallback uintptr
FpAutovacPagesArg uintptr
FxAutovacDestr uintptr
FxAutovacPages uintptr
FpParse uintptr
FpPreUpdateArg uintptr
FxPreUpdateCallback uintptr
FpPreUpdate uintptr
FxWalCallback uintptr
FpWalArg uintptr
FxCollNeeded uintptr
FxCollNeeded16 uintptr
FpCollNeededArg uintptr
FpErr uintptr
Fu1 struct {
FnotUsed1 [0]float64
FisInterrupted int32
F__ccgo_pad2 [4]byte
}
Flookaside TLookaside
FxAuth Tsqlite3_xauth
FpAuthArg uintptr
FxProgress uintptr
FpProgressArg uintptr
FnProgressOps uint32
FnVTrans int32
FaModule THash
FpVtabCtx uintptr
FaVTrans uintptr
FpDisconnect uintptr
FaFunc THash
FaCollSeq THash
FbusyHandler TBusyHandler
FaDbStatic [2]TDb
FpSavepoint uintptr
FnAnalysisLimit int32
FbusyTimeout int32
FnSavepoint int32
FnStatement int32
FnDeferredCons Ti64
FnDeferredImmCons Ti64
FpnBytesFreed uintptr
FpDbData uintptr
FpBlockingConnection uintptr
FpUnlockConnection uintptr
FpUnlockArg uintptr
FxUnlockNotify uintptr
FpNextBlocked uintptr
}
type sqlite31 = Tsqlite31
/*
** A macro to discover the encoding of a database.
*/
/*
** A u64 constant where the lower 32 bits are all zeros. Only the
** upper 32 bits are included in the argument. Necessary because some
** C-compilers still do not accept LL integer literals.
*/
/*
** Possible values for the sqlite3.flags.
**
** Value constraints (enforced via assert()):
** SQLITE_FullFSync == PAGER_FULLFSYNC
** SQLITE_CkptFullFSync == PAGER_CKPT_FULLFSYNC
** SQLITE_CacheSpill == PAGER_CACHE_SPILL
*/
/* result set is empty */
/* DELETE, or UPDATE and return */
/* the count using a callback. */
/* Flags used only if debugging */
/*
** Allowed values for sqlite3.mDbFlags
*/
/*
** Bits of the sqlite3.dbOptFlags field that are used by the
** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to
** selectively disable various optimizations.
*/
/* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */
/* TH3 expects this value ^^^^^^^^^^ to be 0x40000. Coordinate any change */
/* TH3 expects this value ^^^^^^^^^^ See flatten04.test */
/*
** Macros for testing whether or not optimizations are enabled or disabled.
*/
/*
** Return true if it OK to factor constant expressions into the initialization
** code. The argument is a Parse object for the code generator.
*/
/* Possible values for the sqlite3.eOpenState field.
** The numbers are randomly selected such that a minimum of three bits must
** change to convert any number to another or to zero
*/
/*
** Each SQL function is defined by an instance of the following
** structure. For global built-in functions (ex: substr(), max(), count())
** a pointer to this structure is held in the sqlite3BuiltinFunctions object.
** For per-connection application-defined functions, a pointer to this
** structure is held in the db->aHash hash table.
**
** The u.pHash field is used by the global built-ins. The u.pDestructor
** field is used by per-connection app-def functions.
*/
type TFuncDef1 = struct {
FnArg Ti8
FfuncFlags Tu32
FpUserData uintptr
FpNext uintptr
FxSFunc uintptr
FxFinalize uintptr
FxValue uintptr
FxInverse uintptr
FzName uintptr
Fu struct {
FpDestructor [0]uintptr
FpHash uintptr
}
}
type FuncDef1 = TFuncDef1
/*
** This structure encapsulates a user-function destructor callback (as
** configured using create_function_v2()) and a reference counter. When
** create_function_v2() is called to create a function with a destructor,
** a single object of this type is allocated. FuncDestructor.nRef is set to
** the number of FuncDef objects created (either 1 or 3, depending on whether
** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor
** member of each of the new FuncDef objects is set to point to the allocated
** FuncDestructor.
**
** Thereafter, when one of the FuncDef objects is deleted, the reference
** count on this object is decremented. When it reaches 0, the destructor
** is invoked and the FuncDestructor structure freed.
*/
type TFuncDestructor1 = struct {
FnRef int32
FxDestroy uintptr
FpUserData uintptr
}
type FuncDestructor1 = TFuncDestructor1
/*
** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF
** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. And
** SQLITE_FUNC_CONSTANT must be the same as SQLITE_DETERMINISTIC. There
** are assert() statements in the code to verify this.
**
** Value constraints (enforced via assert()):
** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg
** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd
** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG
** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG
** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API
** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!!
** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API
**
** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the
** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is
** used internally and if set means that the function has side effects.
** SQLITE_INNOCUOUS is used by application code and means "not unsafe".
** See multiple instances of tag-20230109-1.
*/
/* 0x0200 -- available for reuse */
/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */
/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */
/* Identifier numbers for each in-line function */
/*
** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
** used to create the initializers for the FuncDef structures.
**
** FUNCTION(zName, nArg, iArg, bNC, xFunc)
** Used to create a scalar function definition of a function zName
** implemented by C function xFunc that accepts nArg arguments. The
** value passed as iArg is cast to a (void*) and made available
** as the user-data (sqlite3_user_data()) for the function. If
** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set.
**
** VFUNCTION(zName, nArg, iArg, bNC, xFunc)
** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag.
**
** SFUNCTION(zName, nArg, iArg, bNC, xFunc)
** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
** adds the SQLITE_DIRECTONLY flag.
**
** INLINE_FUNC(zName, nArg, iFuncId, mFlags)
** zName is the name of a function that is implemented by in-line
** byte code rather than by the usual callbacks. The iFuncId
** parameter determines the function id. The mFlags parameter is
** optional SQLITE_FUNC_ flags for this function.
**
** TEST_FUNC(zName, nArg, iFuncId, mFlags)
** zName is the name of a test-only function implemented by in-line
** byte code rather than by the usual callbacks. The iFuncId
** parameter determines the function id. The mFlags parameter is
** optional SQLITE_FUNC_ flags for this function.
**
** DFUNCTION(zName, nArg, iArg, bNC, xFunc)
** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
** adds the SQLITE_FUNC_SLOCHNG flag. Used for date & time functions
** and functions like sqlite_version() that can change, but not during
** a single query. The iArg is ignored. The user-data is always set
** to a NULL pointer. The bNC parameter is not used.
**
** MFUNCTION(zName, nArg, xPtr, xFunc)
** For math-library functions. xPtr is an arbitrary pointer.
**
** PURE_DATE(zName, nArg, iArg, bNC, xFunc)
** Used for "pure" date/time functions, this macro is like DFUNCTION
** except that it does set the SQLITE_FUNC_CONSTANT flags. iArg is
** ignored and the user-data for these functions is set to an
** arbitrary non-NULL pointer. The bNC parameter is not used.
**
** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal)
** Used to create an aggregate function definition implemented by
** the C functions xStep and xFinal. The first four parameters
** are interpreted in the same way as the first 4 parameters to
** FUNCTION().
**
** WAGGREGATE(zName, nArg, iArg, xStep, xFinal, xValue, xInverse)
** Used to create an aggregate function definition implemented by
** the C functions xStep and xFinal. The first four parameters
** are interpreted in the same way as the first 4 parameters to
** FUNCTION().
**
** LIKEFUNC(zName, nArg, pArg, flags)
** Used to create a scalar function definition of a function zName
** that accepts nArg arguments and is implemented by a call to C
** function likeFunc. Argument pArg is cast to a (void *) and made
** available as the function user-data (sqlite3_user_data()). The
** FuncDef.flags variable is set to the value passed as the flags
** parameter.
*/
/*
** All current savepoints are stored in a linked list starting at
** sqlite3.pSavepoint. The first element in the list is the most recently
** opened savepoint. Savepoints are added to the list by the vdbe
** OP_Savepoint instruction.
*/
type TSavepoint1 = struct {
FzName uintptr
FnDeferredCons Ti64
FnDeferredImmCons Ti64
FpNext uintptr
}
type Savepoint1 = TSavepoint1
/*
** The following are used as the second parameter to sqlite3Savepoint(),
** and as the P1 argument to the OP_Savepoint instruction.
*/
/*
** Each SQLite module (virtual table definition) is defined by an
** instance of the following structure, stored in the sqlite3.aModule
** hash table.
*/
type TModule1 = struct {
FpModule uintptr
FzName uintptr
FnRefModule int32
FpAux uintptr
FxDestroy uintptr
FpEpoTab uintptr
}
type Module1 = TModule1
/*
** Information about each column of an SQL table is held in an instance
** of the Column structure, in the Table.aCol[] array.
**
** Definitions:
**
** "table column index" This is the index of the column in the
** Table.aCol[] array, and also the index of
** the column in the original CREATE TABLE stmt.
**
** "storage column index" This is the index of the column in the
** record BLOB generated by the OP_MakeRecord
** opcode. The storage column index is less than
** or equal to the table column index. It is
** equal if and only if there are no VIRTUAL
** columns to the left.
**
** Notes on zCnName:
** The zCnName field stores the name of the column, the datatype of the
** column, and the collating sequence for the column, in that order, all in
** a single allocation. Each string is 0x00 terminated. The datatype
** is only included if the COLFLAG_HASTYPE bit of colFlags is set and the
** collating sequence name is only included if the COLFLAG_HASCOLL bit is
** set.
*/
type TColumn1 = struct {
FzCnName uintptr
F__ccgo8 uint8
Faffinity int8
FszEst Tu8
FhName Tu8
FiDflt Tu16
FcolFlags Tu16
}
type Column1 = TColumn1
/* Allowed values for Column.eCType.
**
** Values must match entries in the global constant arrays
** sqlite3StdTypeLen[] and sqlite3StdType[]. Each value is one more
** than the offset into these arrays for the corresponding name.
** Adjust the SQLITE_N_STDTYPE value if adding or removing entries.
*/
/* Allowed values for Column.colFlags.
**
** Constraints:
** TF_HasVirtual == COLFLAG_VIRTUAL
** TF_HasStored == COLFLAG_STORED
** TF_HasHidden == COLFLAG_HIDDEN
*/
/*
** A "Collating Sequence" is defined by an instance of the following
** structure. Conceptually, a collating sequence consists of a name and
** a comparison routine that defines the order of that sequence.
**
** If CollSeq.xCmp is NULL, it means that the
** collating sequence is undefined. Indices built on an undefined
** collating sequence may not be read or written.
*/
type TCollSeq1 = struct {
FzName uintptr
Fenc Tu8
FpUser uintptr
FxCmp uintptr
FxDel uintptr
}
type CollSeq1 = TCollSeq1
/*
** A sort order can be either ASC or DESC.
*/
/*
** Column affinity types.
**
** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and
** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve
** the speed a little by numbering the values consecutively.
**
** But rather than start with 0 or 1, we begin with 'A'. That way,
** when multiple affinity types are concatenated into a string and
** used as the P4 operand, they will be more readable.
**
** Note also that the numeric types are grouped together so that testing
** for a numeric type is a single comparison. And the BLOB type is first.
*/
/*
** The SQLITE_AFF_MASK values masks off the significant bits of an
** affinity value.
*/
/*
** Additional bit values that can be ORed with an affinity without
** changing the affinity.
**
** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL.
** It causes an assert() to fire if either operand to a comparison
** operator is NULL. It is added to certain comparison operators to
** prove that the operands are always NOT NULL.
*/
/*
** An object of this type is created for each virtual table present in
** the database schema.
**
** If the database schema is shared, then there is one instance of this
** structure for each database connection (sqlite3*) that uses the shared
** schema. This is because each database connection requires its own unique
** instance of the sqlite3_vtab* handle used to access the virtual table
** implementation. sqlite3_vtab* handles can not be shared between
** database connections, even when the rest of the in-memory database
** schema is shared, as the implementation often stores the database
** connection handle passed to it via the xConnect() or xCreate() method
** during initialization internally. This database connection handle may
** then be used by the virtual table implementation to access real tables
** within the database. So that they appear as part of the callers
** transaction, these accesses need to be made via the same database
** connection as that used to execute SQL operations on the virtual table.
**
** All VTable objects that correspond to a single table in a shared
** database schema are initially stored in a linked-list pointed to by
** the Table.pVTable member variable of the corresponding Table object.
** When an sqlite3_prepare() operation is required to access the virtual
** table, it searches the list for the VTable that corresponds to the
** database connection doing the preparing so as to use the correct
** sqlite3_vtab* handle in the compiled query.
**
** When an in-memory Table object is deleted (for example when the
** schema is being reloaded for some reason), the VTable objects are not
** deleted and the sqlite3_vtab* handles are not xDisconnect()ed
** immediately. Instead, they are moved from the Table.pVTable list to
** another linked list headed by the sqlite3.pDisconnect member of the
** corresponding sqlite3 structure. They are then deleted/xDisconnected
** next time a statement is prepared using said sqlite3*. This is done
** to avoid deadlock issues involving multiple sqlite3.mutex mutexes.
** Refer to comments above function sqlite3VtabUnlockList() for an
** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect
** list without holding the corresponding sqlite3.mutex mutex.
**
** The memory for objects of this type is always allocated by
** sqlite3DbMalloc(), using the connection handle stored in VTable.db as
** the first argument.
*/
type TVTable1 = struct {
Fdb uintptr
FpMod uintptr
FpVtab uintptr
FnRef int32
FbConstraint Tu8
FbAllSchemas Tu8
FeVtabRisk Tu8
FiSavepoint int32
FpNext uintptr
}
type VTable1 = TVTable1
/* Allowed values for VTable.eVtabRisk
*/
/*
** The schema for each SQL table, virtual table, and view is represented
** in memory by an instance of the following structure.
*/
type TTable1 = struct {
FzName uintptr
FaCol uintptr
FpIndex uintptr
FzColAff uintptr
FpCheck uintptr
Ftnum TPgno
FnTabRef Tu32
FtabFlags Tu32
FiPKey Ti16
FnCol Ti16
FnNVCol Ti16
FnRowLogEst TLogEst
FszTabRow TLogEst
FkeyConf Tu8
FeTabType Tu8
Fu struct {
Fview [0]struct {
FpSelect uintptr
}
Fvtab [0]struct {
FnArg int32
FazArg uintptr
Fp uintptr
}
Ftab struct {
FaddColOffset int32
FpFKey uintptr
FpDfltList uintptr
}
}
FpTrigger uintptr
FpSchema uintptr
}
type Table1 = TTable1
/*
** Allowed values for Table.tabFlags.
**
** TF_OOOHidden applies to tables or view that have hidden columns that are
** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING
** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden,
** the TF_OOOHidden attribute would apply in this case. Such tables require
** special handling during INSERT processing. The "OOO" means "Out Of Order".
**
** Constraints:
**
** TF_HasVirtual == COLFLAG_VIRTUAL
** TF_HasStored == COLFLAG_STORED
** TF_HasHidden == COLFLAG_HIDDEN
*/
/*
** Allowed values for Table.eTabType
*/
/*
** Test to see whether or not a table is a virtual table. This is
** done as a macro so that it will be optimized out when virtual
** table support is omitted from the build.
*/
/*
** Macros to determine if a column is hidden. IsOrdinaryHiddenColumn()
** only works for non-virtual tables (ordinary tables and views) and is
** always false unless SQLITE_ENABLE_HIDDEN_COLUMNS is defined. The
** IsHiddenColumn() macro is general purpose.
*/
/* Does the table have a rowid */
/* Macro is true if the SQLITE_ALLOW_ROWID_IN_VIEW (mis-)feature is
** available. By default, this macro is false
*/
/*
** Each foreign key constraint is an instance of the following structure.
**
** A foreign key is associated with two tables. The "from" table is
** the table that contains the REFERENCES clause that creates the foreign
** key. The "to" table is the table that is named in the REFERENCES clause.
** Consider this example:
**
** CREATE TABLE ex1(
** a INTEGER PRIMARY KEY,
** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x)
** );
**
** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2".
** Equivalent names:
**
** from-table == child-table
** to-table == parent-table
**
** Each REFERENCES clause generates an instance of the following structure
** which is attached to the from-table. The to-table need not exist when
** the from-table is created. The existence of the to-table is not checked.
**
** The list of all parents for child Table X is held at X.pFKey.
**
** A list of all children for a table named Z (which might not even exist)
** is held in Schema.fkeyHash with a hash key of Z.
*/
type TFKey1 = struct {
FpFrom uintptr
FpNextFrom uintptr
FzTo uintptr
FpNextTo uintptr
FpPrevTo uintptr
FnCol int32
FisDeferred Tu8
FaAction [2]Tu8
FapTrigger [2]uintptr
FaCol [1]TsColMap
}
type FKey1 = TFKey1
/*
** SQLite supports many different ways to resolve a constraint
** error. ROLLBACK processing means that a constraint violation
** causes the operation in process to fail and for the current transaction
** to be rolled back. ABORT processing means the operation in process
** fails and any prior changes from that one operation are backed out,
** but the transaction is not rolled back. FAIL processing means that
** the operation in progress stops and returns an error code. But prior
** changes due to the same operation are not backed out and no rollback
** occurs. IGNORE means that the particular row that caused the constraint
** error is not inserted or updated. Processing continues and no error
** is returned. REPLACE means that preexisting database rows that caused
** a UNIQUE constraint violation are removed so that the new insert or
** update can proceed. Processing continues and no error is reported.
** UPDATE applies to insert operations only and means that the insert
** is omitted and the DO UPDATE clause of an upsert is run instead.
**
** RESTRICT, SETNULL, SETDFLT, and CASCADE actions apply only to foreign keys.
** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the
** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign
** key is set to NULL. SETDFLT means that the foreign key is set
** to its default value. CASCADE means that a DELETE or UPDATE of the
** referenced table row is propagated into the row that holds the
** foreign key.
**
** The OE_Default value is a place holder that means to use whatever
** conflict resolution algorithm is required from context.
**
** The following symbolic values are used to record which type
** of conflict resolution action to take.
*/
/*
** An instance of the following structure is passed as the first
** argument to sqlite3VdbeKeyCompare and is used to control the
** comparison of the two index keys.
**
** Note that aSortOrder[] and aColl[] have nField+1 slots. There
** are nField slots for the columns of an index then one extra slot
** for the rowid at the end.
*/
type TKeyInfo1 = struct {
FnRef Tu32
Fenc Tu8
FnKeyField Tu16
FnAllField Tu16
Fdb uintptr
FaSortFlags uintptr
FaColl [1]uintptr
}
type KeyInfo1 = TKeyInfo1
/*
** Allowed bit values for entries in the KeyInfo.aSortFlags[] array.
*/
/*
** This object holds a record which has been parsed out into individual
** fields, for the purposes of doing a comparison.
**
** A record is an object that contains one or more fields of data.
** Records are used to store the content of a table row and to store
** the key of an index. A blob encoding of a record is created by
** the OP_MakeRecord opcode of the VDBE and is disassembled by the
** OP_Column opcode.
**
** An instance of this object serves as a "key" for doing a search on
** an index b+tree. The goal of the search is to find the entry that
** is closed to the key described by this object. This object might hold
** just a prefix of the key. The number of fields is given by
** pKeyInfo->nField.
**
** The r1 and r2 fields are the values to return if this key is less than
** or greater than a key in the btree, respectively. These are normally
** -1 and +1 respectively, but might be inverted to +1 and -1 if the b-tree
** is in DESC order.
**
** The key comparison functions actually return default_rc when they find
** an equals comparison. default_rc can be -1, 0, or +1. If there are
** multiple entries in the b-tree with the same key (when only looking
** at the first pKeyInfo->nFields,) then default_rc can be set to -1 to
** cause the search to find the last match, or +1 to cause the search to
** find the first match.
**
** The key comparison functions will set eqSeen to true if they ever
** get and equal results when comparing this structure to a b-tree record.
** When default_rc!=0, the search might end up on the record immediately
** before the first match or immediately after the last match. The
** eqSeen field will indicate whether or not an exact match exists in the
** b-tree.
*/
type TUnpackedRecord1 = struct {
FpKeyInfo uintptr
FaMem uintptr
Fu struct {
Fi [0]Ti64
Fz uintptr
}
Fn int32
FnField Tu16
Fdefault_rc Ti8
FerrCode Tu8
Fr1 Ti8
Fr2 Ti8
FeqSeen Tu8
}
type UnpackedRecord1 = TUnpackedRecord1
/*
** Each SQL index is represented in memory by an
** instance of the following structure.
**
** The columns of the table that are to be indexed are described
** by the aiColumn[] field of this structure. For example, suppose
** we have the following table and index:
**
** CREATE TABLE Ex1(c1 int, c2 int, c3 text);
** CREATE INDEX Ex2 ON Ex1(c3,c1);
**
** In the Table structure describing Ex1, nCol==3 because there are
** three columns in the table. In the Index structure describing
** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed.
** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the
** first column to be indexed (c3) has an index of 2 in Ex1.aCol[].
** The second column to be indexed (c1) has an index of 0 in
** Ex1.aCol[], hence Ex2.aiColumn[1]==0.
**
** The Index.onError field determines whether or not the indexed columns
** must be unique and what to do if they are not. When Index.onError=OE_None,
** it means this is not a unique index. Otherwise it is a unique index
** and the value of Index.onError indicates which conflict resolution
** algorithm to employ when an attempt is made to insert a non-unique
** element.
**
** The colNotIdxed bitmask is used in combination with SrcItem.colUsed
** for a fast test to see if an index can serve as a covering index.
** colNotIdxed has a 1 bit for every column of the original table that
** is *not* available in the index. Thus the expression
** "colUsed & colNotIdxed" will be non-zero if the index is not a
** covering index. The most significant bit of of colNotIdxed will always
** be true (note-20221022-a). If a column beyond the 63rd column of the
** table is used, the "colUsed & colNotIdxed" test will always be non-zero
** and we have to assume either that the index is not covering, or use
** an alternative (slower) algorithm to determine whether or not
** the index is covering.
**
** While parsing a CREATE TABLE or CREATE INDEX statement in order to
** generate VDBE code (as opposed to parsing one read from an sqlite_schema
** table as part of parsing an existing database schema), transient instances
** of this structure may be created. In this case the Index.tnum variable is
** used to store the address of a VDBE instruction, not a database page
** number (it cannot - the database page is not allocated until the VDBE
** program is executed). See convertToWithoutRowidTable() for details.
*/
type TIndex1 = struct {
FzName uintptr
FaiColumn uintptr
FaiRowLogEst uintptr
FpTable uintptr
FzColAff uintptr
FpNext uintptr
FpSchema uintptr
FaSortOrder uintptr
FazColl uintptr
FpPartIdxWhere uintptr
FaColExpr uintptr
Ftnum TPgno
FszIdxRow TLogEst
FnKeyCol Tu16
FnColumn Tu16
FonError Tu8
F__ccgo100 uint16
FnSample int32
FmxSample int32
FnSampleCol int32
FaAvgEq uintptr
FaSample uintptr
FaiRowEst uintptr
FnRowEst0 TtRowcnt
FcolNotIdxed TBitmask
}
type Index1 = TIndex1
/*
** Allowed values for Index.idxType
*/
/* Return true if index X is a PRIMARY KEY index */
/* Return true if index X is a UNIQUE index */
/* The Index.aiColumn[] values are normally positive integer. But
** there are some negative values that have special meaning:
*/
/*
** Each sample stored in the sqlite_stat4 table is represented in memory
** using a structure of this type. See documentation at the top of the
** analyze.c source file for additional information.
*/
type TIndexSample1 = struct {
Fp uintptr
Fn int32
FanEq uintptr
FanLt uintptr
FanDLt uintptr
}
type IndexSample1 = TIndexSample1
/*
** Possible values to use within the flags argument to sqlite3GetToken().
*/
/*
** Each token coming out of the lexer is an instance of
** this structure. Tokens are also used as part of an expression.
**
** The memory that "z" points to is owned by other objects. Take care
** that the owner of the "z" string does not deallocate the string before
** the Token goes out of scope! Very often, the "z" points to some place
** in the middle of the Parse.zSql text. But it might also point to a
** static string.
*/
type TToken1 = struct {
Fz uintptr
Fn uint32
}
type Token1 = TToken1
/*
** An instance of this structure contains information needed to generate
** code for a SELECT that contains aggregate functions.
**
** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a
** pointer to this structure. The Expr.iAgg field is the index in
** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate
** code for that node.
**
** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the
** original Select structure that describes the SELECT statement. These
** fields do not need to be freed when deallocating the AggInfo structure.
*/
type TAggInfo1 = struct {
FdirectMode Tu8
FuseSortingIdx Tu8
FnSortingColumn Tu16
FsortingIdx int32
FsortingIdxPTab int32
FiFirstReg int32
FpGroupBy uintptr
FaCol uintptr
FnColumn int32
FnAccumulator int32
FaFunc uintptr
FnFunc int32
FselId Tu32
}
type AggInfo1 = TAggInfo1
/*
** Macros to compute aCol[] and aFunc[] register numbers.
**
** These macros should not be used prior to the call to
** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg.
** The assert()s that are part of this macro verify that constraint.
*/
// C documentation
//
// /*
// ** The datatype ynVar is a signed integer, either 16-bit or 32-bit.
// ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater
// ** than 32767 we have to make it 32-bit. 16-bit is preferred because
// ** it uses less memory in the Expr object, which is a big memory user
// ** in systems with lots of prepared statements. And few applications
// ** need more than about 10 or 20 variables. But some extreme users want
// ** to have prepared statements with over 32766 variables, and for them
// ** the option is available (at compile-time).
// */
type TynVar = int16
type ynVar = TynVar
/*
** Each node of an expression in the parse tree is an instance
** of this structure.
**
** Expr.op is the opcode. The integer parser token codes are reused
** as opcodes here. For example, the parser defines TK_GE to be an integer
** code representing the ">=" operator. This same integer code is reused
** to represent the greater-than-or-equal-to operator in the expression
** tree.
**
** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB,
** or TK_STRING), then Expr.u.zToken contains the text of the SQL literal. If
** the expression is a variable (TK_VARIABLE), then Expr.u.zToken contains the
** variable name. Finally, if the expression is an SQL function (TK_FUNCTION),
** then Expr.u.zToken contains the name of the function.
**
** Expr.pRight and Expr.pLeft are the left and right subexpressions of a
** binary operator. Either or both may be NULL.
**
** Expr.x.pList is a list of arguments if the expression is an SQL function,
** a CASE expression or an IN expression of the form " IN (, ...)".
** Expr.x.pSelect is used if the expression is a sub-select or an expression of
** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the
** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is
** valid.
**
** An expression of the form ID or ID.ID refers to a column in a table.
** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is
** the integer cursor number of a VDBE cursor pointing to that table and
** Expr.iColumn is the column number for the specific column. If the
** expression is used as a result in an aggregate SELECT, then the
** value is also stored in the Expr.iAgg column in the aggregate so that
** it can be accessed after all aggregates are computed.
**
** If the expression is an unbound variable marker (a question mark
** character '?' in the original SQL) then the Expr.iTable holds the index
** number for that variable.
**
** If the expression is a subquery then Expr.iColumn holds an integer
** register number containing the result of the subquery. If the
** subquery gives a constant result, then iTable is -1. If the subquery
** gives a different answer at different times during statement processing
** then iTable is the address of a subroutine that computes the subquery.
**
** If the Expr is of type OP_Column, and the table it is selecting from
** is a disk table or the "old.*" pseudo-table, then pTab points to the
** corresponding table definition.
**
** ALLOCATION NOTES:
**
** Expr objects can use a lot of memory space in database schema. To
** help reduce memory requirements, sometimes an Expr object will be
** truncated. And to reduce the number of memory allocations, sometimes
** two or more Expr objects will be stored in a single memory allocation,
** together with Expr.u.zToken strings.
**
** If the EP_Reduced and EP_TokenOnly flags are set when
** an Expr object is truncated. When EP_Reduced is set, then all
** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees
** are contained within the same memory allocation. Note, however, that
** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately
** allocated, regardless of whether or not EP_Reduced is set.
*/
type TExpr1 = struct {
Fop Tu8
FaffExpr int8
Fop2 Tu8
Fflags Tu32
Fu struct {
FiValue [0]int32
FzToken uintptr
}
FpLeft uintptr
FpRight uintptr
Fx struct {
FpSelect [0]uintptr
FpList uintptr
}
FnHeight int32
FiTable int32
FiColumn TynVar
FiAgg Ti16
Fw struct {
FiOfst [0]int32
FiJoin int32
}
FpAggInfo uintptr
Fy struct {
FpWin [0]uintptr
Fsub [0]struct {
FiAddr int32
FregReturn int32
}
FpTab uintptr
}
}
type Expr1 = TExpr1
/* The following are the meanings of bits in the Expr.flags field.
** Value restrictions:
**
** EP_Agg == NC_HasAgg == SF_HasAgg
** EP_Win == NC_HasWin
*/
/* 0x80000000 // Available */
/* The EP_Propagate mask is a set of properties that automatically propagate
** upwards into parent nodes.
*/
/* Macros can be used to test, set, or clear bits in the
** Expr.flags field.
*/
/* Macros used to ensure that the correct members of unions are accessed
** in Expr.
*/
/* Flags for use with Expr.vvaFlags
*/
/* The ExprSetVVAProperty() macro is used for Verification, Validation,
** and Accreditation only. It works like ExprSetProperty() during VVA
** processes but is a no-op for delivery.
*/
/*
** Macros to determine the number of bytes required by a normal Expr
** struct, an Expr struct with the EP_Reduced flag set in Expr.flags
** and an Expr struct with the EP_TokenOnly flag set.
*/
/*
** Flags passed to the sqlite3ExprDup() function. See the header comment
** above sqlite3ExprDup() for details.
*/
/*
** True if the expression passed as an argument was a function with
** an OVER() clause (a window function).
*/
/*
** A list of expressions. Each expression may optionally have a
** name. An expr/name combination can be used in several ways, such
** as the list of "expr AS ID" fields following a "SELECT" or in the
** list of "ID = expr" items in an UPDATE. A list of expressions can
** also be used as the argument to a function, in which case the a.zName
** field is not used.
**
** In order to try to keep memory usage down, the Expr.a.zEName field
** is used for multiple purposes:
**
** eEName Usage
** ---------- -------------------------
** ENAME_NAME (1) the AS of result set column
** (2) COLUMN= of an UPDATE
**
** ENAME_TAB DB.TABLE.NAME used to resolve names
** of subqueries
**
** ENAME_SPAN Text of the original result set
** expression.
*/
type TExprList1 = struct {
FnExpr int32
FnAlloc int32
Fa [1]TExprList_item
}
type ExprList1 = TExprList1
/*
** Allowed values for Expr.a.eEName
*/
/*
** An instance of this structure can hold a simple list of identifiers,
** such as the list "a,b,c" in the following statements:
**
** INSERT INTO t(a,b,c) VALUES ...;
** CREATE INDEX idx ON t(a,b,c);
** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...;
**
** The IdList.a.idx field is used when the IdList represents the list of
** column names after a table name in an INSERT statement. In the statement
**
** INSERT INTO t(a,b,c) ...
**
** If "a" is the k-th column of table "t", then IdList.a[0].idx==k.
*/
type TIdList1 = struct {
FnId int32
FeU4 Tu8
Fa [1]TIdList_item
}
type IdList1 = TIdList1
/*
** Allowed values for IdList.eType, which determines which value of the a.u4
** is valid.
*/
/*
** The SrcItem object represents a single term in the FROM clause of a query.
** The SrcList object is mostly an array of SrcItems.
**
** The jointype starts out showing the join type between the current table
** and the next table on the list. The parser builds the list this way.
** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each
** jointype expresses the join between the table and the previous table.
**
** In the colUsed field, the high-order bit (bit 63) is set if the table
** contains more than 63 columns and the 64-th or later column is used.
**
** Union member validity:
**
** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc
** u1.pFuncArg fg.isTabFunc && !fg.isIndexedBy
** u2.pIBIndex fg.isIndexedBy && !fg.isCte
** u2.pCteUse fg.isCte && !fg.isIndexedBy
*/
type TSrcItem1 = struct {
FpSchema uintptr
FzDatabase uintptr
FzName uintptr
FzAlias uintptr
FpTab uintptr
FpSelect uintptr
FaddrFillSub int32
FregReturn int32
FregResult int32
Ffg struct {
F__ccgo_align [0]uint32
Fjointype Tu8
F__ccgo_align1 [2]byte
F__ccgo4 uint16
}
FiCursor int32
Fu3 struct {
FpUsing [0]uintptr
FpOn uintptr
}
FcolUsed TBitmask
Fu1 struct {
FpFuncArg [0]uintptr
FzIndexedBy uintptr
}
Fu2 struct {
FpCteUse [0]uintptr
FpIBIndex uintptr
}
}
type SrcItem1 = TSrcItem1
/*
** The OnOrUsing object represents either an ON clause or a USING clause.
** It can never be both at the same time, but it can be neither.
*/
type TOnOrUsing1 = struct {
FpOn uintptr
FpUsing uintptr
}
type OnOrUsing1 = TOnOrUsing1
/*
** This object represents one or more tables that are the source of
** content for an SQL statement. For example, a single SrcList object
** is used to hold the FROM clause of a SELECT statement. SrcList also
** represents the target tables for DELETE, INSERT, and UPDATE statements.
**
*/
type TSrcList1 = struct {
FnSrc int32
FnAlloc Tu32
Fa [1]TSrcItem
}
type SrcList1 = TSrcList1
/*
** Permitted values of the SrcList.a.jointype field
*/
/*
** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin()
** and the WhereInfo.wctrlFlags member.
**
** Value constraints (enforced via assert()):
** WHERE_USE_LIMIT == SF_FixedLimit
*/
/* 0x2000 not currently used */
/* 0x8000 not currently used */
/* Allowed return values from sqlite3WhereIsDistinct()
*/
/*
** A NameContext defines a context in which to resolve table and column
** names. The context consists of a list of tables (the pSrcList) field and
** a list of named expression (pEList). The named expression list may
** be NULL. The pSrc corresponds to the FROM clause of a SELECT or
** to the table being operated on by INSERT, UPDATE, or DELETE. The
** pEList corresponds to the result set of a SELECT and is NULL for
** other statements.
**
** NameContexts can be nested. When resolving names, the inner-most
** context is searched first. If no match is found, the next outer
** context is checked. If there is still no match, the next context
** is checked. This process continues until either a match is found
** or all contexts are check. When a match is found, the nRef member of
** the context containing the match is incremented.
**
** Each subquery gets a new NameContext. The pNext field points to the
** NameContext in the parent query. Thus the process of scanning the
** NameContext list corresponds to searching through successively outer
** subqueries looking for a match.
*/
type TNameContext1 = struct {
FpParse uintptr
FpSrcList uintptr
FuNC struct {
FpAggInfo [0]uintptr
FpUpsert [0]uintptr
FiBaseReg [0]int32
FpEList uintptr
}
FpNext uintptr
FnRef int32
FnNcErr int32
FncFlags int32
FnNestedSelect Tu32
FpWinSelect uintptr
}
type NameContext1 = TNameContext1
/*
** Allowed values for the NameContext, ncFlags field.
**
** Value constraints (all checked via assert()):
** NC_HasAgg == SF_HasAgg == EP_Agg
** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX
** NC_OrderAgg == SF_OrderByReqd == SQLITE_FUNC_ANYORDER
** NC_HasWin == EP_Win
**
*/
/*
** An instance of the following object describes a single ON CONFLICT
** clause in an upsert.
**
** The pUpsertTarget field is only set if the ON CONFLICT clause includes
** conflict-target clause. (In "ON CONFLICT(a,b)" the "(a,b)" is the
** conflict-target clause.) The pUpsertTargetWhere is the optional
** WHERE clause used to identify partial unique indexes.
**
** pUpsertSet is the list of column=expr terms of the UPDATE statement.
** The pUpsertSet field is NULL for a ON CONFLICT DO NOTHING. The
** pUpsertWhere is the WHERE clause for the UPDATE and is NULL if the
** WHERE clause is omitted.
*/
type TUpsert1 = struct {
FpUpsertTarget uintptr
FpUpsertTargetWhere uintptr
FpUpsertSet uintptr
FpUpsertWhere uintptr
FpNextUpsert uintptr
FisDoUpdate Tu8
FisDup Tu8
FpToFree uintptr
FpUpsertIdx uintptr
FpUpsertSrc uintptr
FregData int32
FiDataCur int32
FiIdxCur int32
}
type Upsert1 = TUpsert1
/*
** An instance of the following structure contains all information
** needed to generate code for a single SELECT statement.
**
** See the header comment on the computeLimitRegisters() routine for a
** detailed description of the meaning of the iLimit and iOffset fields.
**
** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes.
** These addresses must be stored so that we can go back and fill in
** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor
** the number of columns in P2 can be computed at the same time
** as the OP_OpenEphm instruction is coded because not
** enough information about the compound query is known at that point.
** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences
** for the result set. The KeyInfo for addrOpenEphm[2] contains collating
** sequences for the ORDER BY clause.
*/
type TSelect1 = struct {
Fop Tu8
FnSelectRow TLogEst
FselFlags Tu32
FiLimit int32
FiOffset int32
FselId Tu32
FaddrOpenEphm [2]int32
FpEList uintptr
FpSrc uintptr
FpWhere uintptr
FpGroupBy uintptr
FpHaving uintptr
FpOrderBy uintptr
FpPrior uintptr
FpNext uintptr
FpLimit uintptr
FpWith uintptr
FpWin uintptr
FpWinDefn uintptr
}
type Select1 = TSelect1
/*
** Allowed values for Select.selFlags. The "SF" prefix stands for
** "Select Flag".
**
** Value constraints (all checked via assert())
** SF_HasAgg == NC_HasAgg
** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX
** SF_OrderByReqd == NC_OrderAgg == SQLITE_FUNC_ANYORDER
** SF_FixedLimit == WHERE_USE_LIMIT
*/
/* True if S exists and has SF_NestedFrom */
/*
** The results of a SELECT can be distributed in several ways, as defined
** by one of the following macros. The "SRT" prefix means "SELECT Result
** Type".
**
** SRT_Union Store results as a key in a temporary index
** identified by pDest->iSDParm.
**
** SRT_Except Remove results from the temporary index pDest->iSDParm.
**
** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result
** set is not empty.
**
** SRT_Discard Throw the results away. This is used by SELECT
** statements within triggers whose only purpose is
** the side-effects of functions.
**
** SRT_Output Generate a row of output (using the OP_ResultRow
** opcode) for each row in the result set.
**
** SRT_Mem Only valid if the result is a single column.
** Store the first column of the first result row
** in register pDest->iSDParm then abandon the rest
** of the query. This destination implies "LIMIT 1".
**
** SRT_Set The result must be a single column. Store each
** row of result as the key in table pDest->iSDParm.
** Apply the affinity pDest->affSdst before storing
** results. Used to implement "IN (SELECT ...)".
**
** SRT_EphemTab Create an temporary table pDest->iSDParm and store
** the result there. The cursor is left open after
** returning. This is like SRT_Table except that
** this destination uses OP_OpenEphemeral to create
** the table first.
**
** SRT_Coroutine Generate a co-routine that returns a new row of
** results each time it is invoked. The entry point
** of the co-routine is stored in register pDest->iSDParm
** and the result row is stored in pDest->nDest registers
** starting with pDest->iSdst.
**
** SRT_Table Store results in temporary table pDest->iSDParm.
** SRT_Fifo This is like SRT_EphemTab except that the table
** is assumed to already be open. SRT_Fifo has
** the additional property of being able to ignore
** the ORDER BY clause.
**
** SRT_DistFifo Store results in a temporary table pDest->iSDParm.
** But also use temporary table pDest->iSDParm+1 as
** a record of all prior results and ignore any duplicate
** rows. Name means: "Distinct Fifo".
**
** SRT_Queue Store results in priority queue pDest->iSDParm (really
** an index). Append a sequence number so that all entries
** are distinct.
**
** SRT_DistQueue Store results in priority queue pDest->iSDParm only if
** the same record has never been stored before. The
** index at pDest->iSDParm+1 hold all prior stores.
**
** SRT_Upfrom Store results in the temporary table already opened by
** pDest->iSDParm. If (pDest->iSDParm<0), then the temp
** table is an intkey table - in this case the first
** column returned by the SELECT is used as the integer
** key. If (pDest->iSDParm>0), then the table is an index
** table. (pDest->iSDParm) is the number of key columns in
** each index record in this case.
*/
/* The DISTINCT clause is ignored for all of the above. Not that
** IgnorableDistinct() implies IgnorableOrderby() */
/* The ORDER BY clause is ignored for all of the above */
/*
** An instance of this object describes where to put of the results of
** a SELECT statement.
*/
type TSelectDest1 = struct {
FeDest Tu8
FiSDParm int32
FiSDParm2 int32
FiSdst int32
FnSdst int32
FzAffSdst uintptr
FpOrderBy uintptr
}
type SelectDest1 = TSelectDest1
/*
** During code generation of statements that do inserts into AUTOINCREMENT
** tables, the following information is attached to the Table.u.autoInc.p
** pointer of each autoincrement table to record some side information that
** the code generator needs. We have to keep per-table autoincrement
** information in case inserts are done within triggers. Triggers do not
** normally coordinate their activities, but we do need to coordinate the
** loading and saving of autoincrement information.
*/
type TAutoincInfo1 = struct {
FpNext uintptr
FpTab uintptr
FiDb int32
FregCtr int32
}
type AutoincInfo1 = TAutoincInfo1
/*
** At least one instance of the following structure is created for each
** trigger that may be fired while parsing an INSERT, UPDATE or DELETE
** statement. All such objects are stored in the linked list headed at
** Parse.pTriggerPrg and deleted once statement compilation has been
** completed.
**
** A Vdbe sub-program that implements the body and WHEN clause of trigger
** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of
** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable.
** The Parse.pTriggerPrg list never contains two entries with the same
** values for both pTrigger and orconf.
**
** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns
** accessed (or set to 0 for triggers fired as a result of INSERT
** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to
** a mask of new.* columns used by the program.
*/
type TTriggerPrg1 = struct {
FpTrigger uintptr
FpNext uintptr
FpProgram uintptr
Forconf int32
FaColmask [2]Tu32
}
type TriggerPrg1 = TTriggerPrg1
// C documentation
//
// /*
// ** The yDbMask datatype for the bitmask of all attached databases.
// */
type TyDbMask = uint32
type yDbMask = TyDbMask
/*
** For each index X that has as one of its arguments either an expression
** or the name of a virtual generated column, and if X is in scope such that
** the value of the expression can simply be read from the index, then
** there is an instance of this object on the Parse.pIdxExpr list.
**
** During code generation, while generating code to evaluate expressions,
** this list is consulted and if a matching expression is found, the value
** is read from the index rather than being recomputed.
*/
type TIndexedExpr1 = struct {
FpExpr uintptr
FiDataCur int32
FiIdxCur int32
FiIdxCol int32
FbMaybeNullRow Tu8
Faff Tu8
FpIENext uintptr
}
type IndexedExpr1 = TIndexedExpr1
/*
** An instance of the ParseCleanup object specifies an operation that
** should be performed after parsing to deallocation resources obtained
** during the parse and which are no longer needed.
*/
type TParseCleanup1 = struct {
FpNext uintptr
FpPtr uintptr
FxCleanup uintptr
}
type ParseCleanup1 = TParseCleanup1
/*
** An SQL parser context. A copy of this structure is passed through
** the parser and down into all the parser action routine in order to
** carry around information that is global to the entire parse.
**
** The structure is divided into two parts. When the parser and code
** generate call themselves recursively, the first part of the structure
** is constant but the second part is reset at the beginning and end of
** each recursion.
**
** The nTableLock and aTableLock variables are only used if the shared-cache
** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are
** used to store the set of table-locks required by the statement being
** compiled. Function sqlite3TableLock() is used to add entries to the
** list.
*/
type TParse1 = struct {
Fdb uintptr
FzErrMsg uintptr
FpVdbe uintptr
Frc int32
FcolNamesSet Tu8
FcheckSchema Tu8
Fnested Tu8
FnTempReg Tu8
FisMultiWrite Tu8
FmayAbort Tu8
FhasCompound Tu8
FokConstFactor Tu8
FdisableLookaside Tu8
FprepFlags Tu8
FwithinRJSubrtn Tu8
FnRangeReg int32
FiRangeReg int32
FnErr int32
FnTab int32
FnMem int32
FszOpAlloc int32
FiSelfTab int32
FnLabel int32
FnLabelAlloc int32
FaLabel uintptr
FpConstExpr uintptr
FpIdxEpr uintptr
FpIdxPartExpr uintptr
FconstraintName TToken
FwriteMask TyDbMask
FcookieMask TyDbMask
FregRowid int32
FregRoot int32
FnMaxArg int32
FnSelect int32
FnProgressSteps Tu32
FnTableLock int32
FaTableLock uintptr
FpAinc uintptr
FpToplevel uintptr
FpTriggerTab uintptr
FpTriggerPrg uintptr
FpCleanup uintptr
Fu1 struct {
FpReturning [0]uintptr
FaddrCrTab int32
F__ccgo_pad2 [4]byte
}
Foldmask Tu32
Fnewmask Tu32
FnQueryLoop TLogEst
FeTriggerOp Tu8
FbReturning Tu8
FeOrconf Tu8
FdisableTriggers Tu8
FaTempReg [8]int32
FpOuterParse uintptr
FsNameToken TToken
FsLastToken TToken
FnVar TynVar
FiPkSortOrder Tu8
Fexplain Tu8
FeParseMode Tu8
FnVtabLock int32
FnHeight int32
FaddrExplain int32
FpVList uintptr
FpReprepare uintptr
FzTail uintptr
FpNewTable uintptr
FpNewIndex uintptr
FpNewTrigger uintptr
FzAuthContext uintptr
FsArg TToken
FapVtabLock uintptr
FpWith uintptr
FpRename uintptr
}
type Parse1 = TParse1
/* Allowed values for Parse.eParseMode
*/
/*
** Sizes and pointers of various parts of the Parse object.
*/
/*
** Return true if currently inside an sqlite3_declare_vtab() call.
*/
/*
** An instance of the following structure can be declared on a stack and used
** to save the Parse.zAuthContext value so that it can be restored later.
*/
type TAuthContext1 = struct {
FzAuthContext uintptr
FpParse uintptr
}
type AuthContext1 = TAuthContext1
/*
** Bitfield flags for P5 value in various opcodes.
**
** Value constraints (enforced via assert()):
** OPFLAG_LENGTHARG == SQLITE_FUNC_LENGTH
** OPFLAG_TYPEOFARG == SQLITE_FUNC_TYPEOF
** OPFLAG_BULKCSR == BTREE_BULKLOAD
** OPFLAG_SEEKEQ == BTREE_SEEK_EQ
** OPFLAG_FORDELETE == BTREE_FORDELETE
** OPFLAG_SAVEPOSITION == BTREE_SAVEPOSITION
** OPFLAG_AUXDELETE == BTREE_AUXDELETE
*/
/* Also used in P2 (not P5) of OP_Delete */
/*
** Each trigger present in the database schema is stored as an instance of
** struct Trigger.
**
** Pointers to instances of struct Trigger are stored in two ways.
** 1. In the "trigHash" hash table (part of the sqlite3* that represents the
** database). This allows Trigger structures to be retrieved by name.
** 2. All triggers associated with a single table form a linked list, using the
** pNext member of struct Trigger. A pointer to the first element of the
** linked list is stored as the "pTrigger" member of the associated
** struct Table.
**
** The "step_list" member points to the first element of a linked list
** containing the SQL statements specified as the trigger program.
*/
type TTrigger1 = struct {
FzName uintptr
Ftable uintptr
Fop Tu8
Ftr_tm Tu8
FbReturning Tu8
FpWhen uintptr
FpColumns uintptr
FpSchema uintptr
FpTabSchema uintptr
Fstep_list uintptr
FpNext uintptr
}
type Trigger1 = TTrigger1
/*
** A trigger is either a BEFORE or an AFTER trigger. The following constants
** determine which.
**
** If there are multiple triggers, you might of some BEFORE and some AFTER.
** In that cases, the constants below can be ORed together.
*/
/*
** An instance of struct TriggerStep is used to store a single SQL statement
** that is a part of a trigger-program.
**
** Instances of struct TriggerStep are stored in a singly linked list (linked
** using the "pNext" member) referenced by the "step_list" member of the
** associated struct Trigger instance. The first element of the linked list is
** the first step of the trigger-program.
**
** The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
** "SELECT" statement. The meanings of the other members is determined by the
** value of "op" as follows:
**
** (op == TK_INSERT)
** orconf -> stores the ON CONFLICT algorithm
** pSelect -> The content to be inserted - either a SELECT statement or
** a VALUES clause.
** zTarget -> Dequoted name of the table to insert into.
** pIdList -> If this is an INSERT INTO ... () VALUES ...
** statement, then this stores the column-names to be
** inserted into.
** pUpsert -> The ON CONFLICT clauses for an Upsert
**
** (op == TK_DELETE)
** zTarget -> Dequoted name of the table to delete from.
** pWhere -> The WHERE clause of the DELETE statement if one is specified.
** Otherwise NULL.
**
** (op == TK_UPDATE)
** zTarget -> Dequoted name of the table to update.
** pWhere -> The WHERE clause of the UPDATE statement if one is specified.
** Otherwise NULL.
** pExprList -> A list of the columns to update and the expressions to update
** them to. See sqlite3Update() documentation of "pChanges"
** argument.
**
** (op == TK_SELECT)
** pSelect -> The SELECT statement
**
** (op == TK_RETURNING)
** pExprList -> The list of expressions that follow the RETURNING keyword.
**
*/
type TTriggerStep1 = struct {
Fop Tu8
Forconf Tu8
FpTrig uintptr
FpSelect uintptr
FzTarget uintptr
FpFrom uintptr
FpWhere uintptr
FpExprList uintptr
FpIdList uintptr
FpUpsert uintptr
FzSpan uintptr
FpNext uintptr
FpLast uintptr
}
type TriggerStep1 = TTriggerStep1
/*
** Information about a RETURNING clause
*/
type TReturning1 = struct {
FpParse uintptr
FpReturnEL uintptr
FretTrig TTrigger
FretTStep TTriggerStep
FiRetCur int32
FnRetCol int32
FiRetReg int32
FzName [40]int8
}
type Returning1 = TReturning1
/*
** The following object is the header for an "RCStr" or "reference-counted
** string". An RCStr is passed around and used like any other char*
** that has been dynamically allocated. The important interface
** differences:
**
** 1. RCStr strings are reference counted. They are deallocated
** when the reference count reaches zero.
**
** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than
** sqlite3_free()
**
** 3. Make a (read-only) copy of a read-only RCStr string using
** sqlite3RCStrRef().
**
** "String" is in the name, but an RCStr object can also be used to hold
** binary data.
*/
type TRCStr1 = struct {
FnRCRef Tu64
}
type RCStr1 = TRCStr1
// C documentation
//
// /*
// ** A pointer to this structure is used to communicate information
// ** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback.
// */
type TInitData = struct {
Fdb uintptr
FpzErrMsg uintptr
FiDb int32
Frc int32
FmInitFlags Tu32
FnInitRow Tu32
FmxPage TPgno
}
type InitData = TInitData
/*
** Allowed values for mInitFlags
*/
/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled
** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning
** parameters are for temporary use during development, to help find
** optimal values for parameters in the query planner. The should not
** be used on trunk check-ins. They are a temporary mechanism available
** for transient development builds only.
**
** Tuning parameters are numbered starting with 1.
*/
/*
** Structure containing global configuration data for the SQLite library.
**
** This structure also contains some state information.
*/
type TSqlite3Config = struct {
FbMemstat int32
FbCoreMutex Tu8
FbFullMutex Tu8
FbOpenUri Tu8
FbUseCis Tu8
FbSmallMalloc Tu8
FbExtraSchemaChecks Tu8
FbUseLongDouble Tu8
FmxStrlen int32
FneverCorrupt int32
FszLookaside int32
FnLookaside int32
FnStmtSpill int32
Fm Tsqlite3_mem_methods
Fmutex Tsqlite3_mutex_methods
Fpcache2 Tsqlite3_pcache_methods2
FpHeap uintptr
FnHeap int32
FmnReq int32
FmxReq int32
FszMmap Tsqlite3_int64
FmxMmap Tsqlite3_int64
FpPage uintptr
FszPage int32
FnPage int32
FmxParserStack int32
FsharedCacheEnabled int32
FszPma Tu32
FisInit int32
FinProgress int32
FisMutexInit int32
FisMallocInit int32
FisPCacheInit int32
FnRefInitMutex int32
FpInitMutex uintptr
FxLog uintptr
FpLogArg uintptr
FmxMemdbSize Tsqlite3_int64
FxTestCallback uintptr
FbLocaltimeFault int32
FxAltLocaltime uintptr
FiOnceResetThreshold int32
FszSorterRef Tu32
FiPrngSeed uint32
}
type Sqlite3Config = TSqlite3Config
/*
** This macro is used inside of assert() statements to indicate that
** the assert is only valid on a well-formed database. Instead of:
**
** assert( X );
**
** One writes:
**
** assert( X || CORRUPT_DB );
**
** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate
** that the database is definitely corrupt, only that it might be corrupt.
** For most test cases, CORRUPT_DB is set to false using a special
** sqlite3_test_control(). This enables assert() statements to prove
** things that are always true for well-formed databases.
*/
/*
** Context pointer passed down through the tree-walk.
*/
type TWalker1 = struct {
FpParse uintptr
FxExprCallback uintptr
FxSelectCallback uintptr
FxSelectCallback2 uintptr
FwalkerDepth int32
FeCode Tu16
FmWFlags Tu16
Fu struct {
Fn [0]int32
FiCur [0]int32
FpSrcList [0]uintptr
FpCCurHint [0]uintptr
FpRefSrcList [0]uintptr
FaiCol [0]uintptr
FpIdxCover [0]uintptr
FpGroupBy [0]uintptr
FpSelect [0]uintptr
FpRewrite [0]uintptr
FpConst [0]uintptr
FpRename [0]uintptr
FpTab [0]uintptr
FpCovIdxCk [0]uintptr
FpSrcItem [0]uintptr
FpFix [0]uintptr
FaMem [0]uintptr
FpNC uintptr
}
}
type Walker1 = TWalker1
/*
** The following structure contains information used by the sqliteFix...
** routines as they walk the parse tree to make database references
** explicit.
*/
type TDbFixer1 = struct {
FpParse uintptr
Fw TWalker
FpSchema uintptr
FbTemp Tu8
FzDb uintptr
FzType uintptr
FpName uintptr
}
type DbFixer1 = TDbFixer1
/*
** Return code from the parse-tree walking primitives and their
** callbacks.
*/
/*
** A single common table expression
*/
type TCte1 = struct {
FzName uintptr
FpCols uintptr
FpSelect uintptr
FzCteErr uintptr
FpUse uintptr
FeM10d Tu8
}
type Cte1 = TCte1
/*
** Allowed values for the materialized flag (eM10d):
*/
/*
** An instance of the With object represents a WITH clause containing
** one or more CTEs (common table expressions).
*/
type TWith1 = struct {
FnCte int32
FbView int32
FpOuter uintptr
Fa [1]TCte
}
type With1 = TWith1
/*
** The Cte object is not guaranteed to persist for the entire duration
** of code generation. (The query flattener or other parser tree
** edits might delete it.) The following object records information
** about each Common Table Expression that must be preserved for the
** duration of the parse.
**
** The CteUse objects are freed using sqlite3ParserAddCleanup() rather
** than sqlite3SelectDelete(), which is what enables them to persist
** until the end of code generation.
*/
type TCteUse1 = struct {
FnUse int32
FaddrM9e int32
FregRtn int32
FiCur int32
FnRowEst TLogEst
FeM10d Tu8
}
type CteUse1 = TCteUse1
/* Client data associated with sqlite3_set_clientdata() and
** sqlite3_get_clientdata().
*/
type TDbClientData1 = struct {
FpNext uintptr
FpData uintptr
FxDestructor uintptr
FzName [1]int8
}
type DbClientData1 = TDbClientData1
/*
** This object is used in various ways, most (but not all) related to window
** functions.
**
** (1) A single instance of this structure is attached to the
** the Expr.y.pWin field for each window function in an expression tree.
** This object holds the information contained in the OVER clause,
** plus additional fields used during code generation.
**
** (2) All window functions in a single SELECT form a linked-list
** attached to Select.pWin. The Window.pFunc and Window.pExpr
** fields point back to the expression that is the window function.
**
** (3) The terms of the WINDOW clause of a SELECT are instances of this
** object on a linked list attached to Select.pWinDefn.
**
** (4) For an aggregate function with a FILTER clause, an instance
** of this object is stored in Expr.y.pWin with eFrmType set to
** TK_FILTER. In this case the only field used is Window.pFilter.
**
** The uses (1) and (2) are really the same Window object that just happens
** to be accessible in two different ways. Use case (3) are separate objects.
*/
type TWindow1 = struct {
FzName uintptr
FzBase uintptr
FpPartition uintptr
FpOrderBy uintptr
FeFrmType Tu8
FeStart Tu8
FeEnd Tu8
FbImplicitFrame Tu8
FeExclude Tu8
FpStart uintptr
FpEnd uintptr
FppThis uintptr
FpNextWin uintptr
FpFilter uintptr
FpWFunc uintptr
FiEphCsr int32
FregAccum int32
FregResult int32
FcsrApp int32
FregApp int32
FregPart int32
FpOwner uintptr
FnBufferCol int32
FiArgCol int32
FregOne int32
FregStartRowid int32
FregEndRowid int32
FbExprArgs Tu8
}
type Window1 = TWindow1
/*
** An instance of the following structure holds information about SQL
** functions arguments that are the parameters to the printf() function.
*/
type TPrintfArguments1 = struct {
FnArg int32
FnUsed int32
FapArg uintptr
}
type PrintfArguments1 = TPrintfArguments1
/*
** An instance of this object receives the decoding of a floating point
** value into an approximate decimal representation.
*/
type TFpDecode1 = struct {
Fsign int8
FisSpecial int8
Fn int32
FiDP int32
Fz uintptr
FzBuf [24]int8
}
type FpDecode1 = TFpDecode1
/************** End of sqliteInt.h *******************************************/
/************** Begin file os_common.h ***************************************/
/*
** 2004 May 22
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** This file contains macros and a little bit of code that is common to
** all of the platform-specific files (os_*.c) and is #included into those
** files.
**
** This file should be #included by the os_*.c files only. It is not a
** general purpose header file.
*/
/*
** At least two bugs have slipped in because we changed the MEMORY_DEBUG
** macro to SQLITE_DEBUG and some older makefiles have not yet made the
** switch. The following code should catch this problem at compile-time.
*/
/*
** Macros for performance tracing. Normally turned off. Only works
** on i486 hardware.
*/
/*
** If we compile with the SQLITE_TEST macro set, then the following block
** of code will give us the ability to simulate a disk I/O error. This
** is used for testing the I/O recovery logic.
*/
/*
** When testing, keep a count of the number of open files.
*/
/************** End of os_common.h *******************************************/
/************** Begin file ctime.c *******************************************/
/* DO NOT EDIT!
** This file is automatically generated by the script in the canonical
** SQLite source tree at tool/mkctimec.tcl.
**
** To modify this header, edit any of the various lists in that script
** which specify categories of generated conditionals in this file.
*/
/*
** 2010 February 23
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file implements routines used to report what compile-time options
** SQLite was built with.
*/
/*
** Include the configuration header output by 'configure' if we're using the
** autoconf-based build
*/
/* These macros are provided to "stringify" the value of the define
** for those options in which the value is meaningful. */
/* Like CTIMEOPT_VAL, but especially for SQLITE_DEFAULT_LOOKASIDE. This
** option requires a separate macro because legal values contain a single
** comma. e.g. (-DSQLITE_DEFAULT_LOOKASIDE="100,100") */
/* #include "sqliteInt.h" */
// C documentation
//
// /*
// ** An array of names of all compile-time options. This array should
// ** be sorted A-Z.
// **
// ** This array looks large, but in a typical installation actually uses
// ** only a handful of compile-time options, so most times this array is usually
// ** rather short and uses little memory space.
// */
var _sqlite3azCompileOpt = [54]uintptr{
0: __ccgo_ts,
1: __ccgo_ts + 20,
2: __ccgo_ts + 42,
3: __ccgo_ts + 61,
4: __ccgo_ts + 86,
5: __ccgo_ts + 108,
6: __ccgo_ts + 138,
7: __ccgo_ts + 158,
8: __ccgo_ts + 178,
9: __ccgo_ts + 201,
10: __ccgo_ts + 226,
11: __ccgo_ts + 253,
12: __ccgo_ts + 278,
13: __ccgo_ts + 300,
14: __ccgo_ts + 332,
15: __ccgo_ts + 358,
16: __ccgo_ts + 383,
17: __ccgo_ts + 404,
18: __ccgo_ts + 427,
19: __ccgo_ts + 446,
20: __ccgo_ts + 458,
21: __ccgo_ts + 473,
22: __ccgo_ts + 495,
23: __ccgo_ts + 520,
24: __ccgo_ts + 543,
25: __ccgo_ts + 565,
26: __ccgo_ts + 576,
27: __ccgo_ts + 589,
28: __ccgo_ts + 604,
29: __ccgo_ts + 620,
30: __ccgo_ts + 633,
31: __ccgo_ts + 654,
32: __ccgo_ts + 678,
33: __ccgo_ts + 701,
34: __ccgo_ts + 717,
35: __ccgo_ts + 733,
36: __ccgo_ts + 757,
37: __ccgo_ts + 784,
38: __ccgo_ts + 804,
39: __ccgo_ts + 825,
40: __ccgo_ts + 847,
41: __ccgo_ts + 877,
42: __ccgo_ts + 902,
43: __ccgo_ts + 928,
44: __ccgo_ts + 948,
45: __ccgo_ts + 974,
46: __ccgo_ts + 997,
47: __ccgo_ts + 1023,
48: __ccgo_ts + 1045,
49: __ccgo_ts + 1066,
50: __ccgo_ts + 1077,
51: __ccgo_ts + 1085,
52: __ccgo_ts + 1099,
53: __ccgo_ts + 1112,
}
func _sqlite3CompileOptions(tls *libc.TLS, pnOpt uintptr) (r uintptr) {
*(*int32)(unsafe.Pointer(pnOpt)) = int32(libc.Uint64FromInt64(432) / libc.Uint64FromInt64(8))
return uintptr(unsafe.Pointer(&_sqlite3azCompileOpt))
}
/************** End of ctime.c ***********************************************/
/************** Begin file global.c ******************************************/
/*
** 2008 June 13
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains definitions of global variables and constants.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /* An array to map all upper-case characters into their corresponding
// ** lower-case character.
// **
// ** SQLite only considers US-ASCII (or EBCDIC) characters. We do not
// ** handle case conversions for the UTF character set since the tables
// ** involved are nearly as big or bigger than SQLite itself.
// */
var _sqlite3UpperToLower = [274]uint8{
1: uint8(1),
2: uint8(2),
3: uint8(3),
4: uint8(4),
5: uint8(5),
6: uint8(6),
7: uint8(7),
8: uint8(8),
9: uint8(9),
10: uint8(10),
11: uint8(11),
12: uint8(12),
13: uint8(13),
14: uint8(14),
15: uint8(15),
16: uint8(16),
17: uint8(17),
18: uint8(18),
19: uint8(19),
20: uint8(20),
21: uint8(21),
22: uint8(22),
23: uint8(23),
24: uint8(24),
25: uint8(25),
26: uint8(26),
27: uint8(27),
28: uint8(28),
29: uint8(29),
30: uint8(30),
31: uint8(31),
32: uint8(32),
33: uint8(33),
34: uint8(34),
35: uint8(35),
36: uint8(36),
37: uint8(37),
38: uint8(38),
39: uint8(39),
40: uint8(40),
41: uint8(41),
42: uint8(42),
43: uint8(43),
44: uint8(44),
45: uint8(45),
46: uint8(46),
47: uint8(47),
48: uint8(48),
49: uint8(49),
50: uint8(50),
51: uint8(51),
52: uint8(52),
53: uint8(53),
54: uint8(54),
55: uint8(55),
56: uint8(56),
57: uint8(57),
58: uint8(58),
59: uint8(59),
60: uint8(60),
61: uint8(61),
62: uint8(62),
63: uint8(63),
64: uint8(64),
65: uint8(97),
66: uint8(98),
67: uint8(99),
68: uint8(100),
69: uint8(101),
70: uint8(102),
71: uint8(103),
72: uint8(104),
73: uint8(105),
74: uint8(106),
75: uint8(107),
76: uint8(108),
77: uint8(109),
78: uint8(110),
79: uint8(111),
80: uint8(112),
81: uint8(113),
82: uint8(114),
83: uint8(115),
84: uint8(116),
85: uint8(117),
86: uint8(118),
87: uint8(119),
88: uint8(120),
89: uint8(121),
90: uint8(122),
91: uint8(91),
92: uint8(92),
93: uint8(93),
94: uint8(94),
95: uint8(95),
96: uint8(96),
97: uint8(97),
98: uint8(98),
99: uint8(99),
100: uint8(100),
101: uint8(101),
102: uint8(102),
103: uint8(103),
104: uint8(104),
105: uint8(105),
106: uint8(106),
107: uint8(107),
108: uint8(108),
109: uint8(109),
110: uint8(110),
111: uint8(111),
112: uint8(112),
113: uint8(113),
114: uint8(114),
115: uint8(115),
116: uint8(116),
117: uint8(117),
118: uint8(118),
119: uint8(119),
120: uint8(120),
121: uint8(121),
122: uint8(122),
123: uint8(123),
124: uint8(124),
125: uint8(125),
126: uint8(126),
127: uint8(127),
128: uint8(128),
129: uint8(129),
130: uint8(130),
131: uint8(131),
132: uint8(132),
133: uint8(133),
134: uint8(134),
135: uint8(135),
136: uint8(136),
137: uint8(137),
138: uint8(138),
139: uint8(139),
140: uint8(140),
141: uint8(141),
142: uint8(142),
143: uint8(143),
144: uint8(144),
145: uint8(145),
146: uint8(146),
147: uint8(147),
148: uint8(148),
149: uint8(149),
150: uint8(150),
151: uint8(151),
152: uint8(152),
153: uint8(153),
154: uint8(154),
155: uint8(155),
156: uint8(156),
157: uint8(157),
158: uint8(158),
159: uint8(159),
160: uint8(160),
161: uint8(161),
162: uint8(162),
163: uint8(163),
164: uint8(164),
165: uint8(165),
166: uint8(166),
167: uint8(167),
168: uint8(168),
169: uint8(169),
170: uint8(170),
171: uint8(171),
172: uint8(172),
173: uint8(173),
174: uint8(174),
175: uint8(175),
176: uint8(176),
177: uint8(177),
178: uint8(178),
179: uint8(179),
180: uint8(180),
181: uint8(181),
182: uint8(182),
183: uint8(183),
184: uint8(184),
185: uint8(185),
186: uint8(186),
187: uint8(187),
188: uint8(188),
189: uint8(189),
190: uint8(190),
191: uint8(191),
192: uint8(192),
193: uint8(193),
194: uint8(194),
195: uint8(195),
196: uint8(196),
197: uint8(197),
198: uint8(198),
199: uint8(199),
200: uint8(200),
201: uint8(201),
202: uint8(202),
203: uint8(203),
204: uint8(204),
205: uint8(205),
206: uint8(206),
207: uint8(207),
208: uint8(208),
209: uint8(209),
210: uint8(210),
211: uint8(211),
212: uint8(212),
213: uint8(213),
214: uint8(214),
215: uint8(215),
216: uint8(216),
217: uint8(217),
218: uint8(218),
219: uint8(219),
220: uint8(220),
221: uint8(221),
222: uint8(222),
223: uint8(223),
224: uint8(224),
225: uint8(225),
226: uint8(226),
227: uint8(227),
228: uint8(228),
229: uint8(229),
230: uint8(230),
231: uint8(231),
232: uint8(232),
233: uint8(233),
234: uint8(234),
235: uint8(235),
236: uint8(236),
237: uint8(237),
238: uint8(238),
239: uint8(239),
240: uint8(240),
241: uint8(241),
242: uint8(242),
243: uint8(243),
244: uint8(244),
245: uint8(245),
246: uint8(246),
247: uint8(247),
248: uint8(248),
249: uint8(249),
250: uint8(250),
251: uint8(251),
252: uint8(252),
253: uint8(253),
254: uint8(254),
255: uint8(255),
256: uint8(1),
259: uint8(1),
260: uint8(1),
263: uint8(1),
265: uint8(1),
267: uint8(1),
268: uint8(1),
270: uint8(1),
273: uint8(1),
}
var _sqlite3aLTb = uintptr(unsafe.Pointer(&_sqlite3UpperToLower)) + uintptr(libc.Int32FromInt32(256)-libc.Int32FromInt32(OP_Ne))
var _sqlite3aEQb = uintptr(unsafe.Pointer(&_sqlite3UpperToLower)) + uintptr(libc.Int32FromInt32(256)+libc.Int32FromInt32(6)-libc.Int32FromInt32(OP_Ne))
var _sqlite3aGTb = uintptr(unsafe.Pointer(&_sqlite3UpperToLower)) + uintptr(libc.Int32FromInt32(256)+libc.Int32FromInt32(12)-libc.Int32FromInt32(OP_Ne))
// C documentation
//
// /*
// ** The following 256 byte lookup table is used to support SQLites built-in
// ** equivalents to the following standard library functions:
// **
// ** isspace() 0x01
// ** isalpha() 0x02
// ** isdigit() 0x04
// ** isalnum() 0x06
// ** isxdigit() 0x08
// ** toupper() 0x20
// ** SQLite identifier character 0x40 $, _, or non-ascii
// ** Quote character 0x80
// **
// ** Bit 0x20 is set if the mapped character requires translation to upper
// ** case. i.e. if the character is a lower-case ASCII character.
// ** If x is a lower-case ASCII character, then its upper-case equivalent
// ** is (x - 0x20). Therefore toupper() can be implemented as:
// **
// ** (x & ~(map[x]&0x20))
// **
// ** The equivalent of tolower() is implemented using the sqlite3UpperToLower[]
// ** array. tolower() is used more often than toupper() by SQLite.
// **
// ** Bit 0x40 is set if the character is non-alphanumeric and can be used in an
// ** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any
// ** non-ASCII UTF character. Hence the test for whether or not a character is
// ** part of an identifier is 0x46.
// */
var _sqlite3CtypeMap = [256]uint8{
9: uint8(0x01),
10: uint8(0x01),
11: uint8(0x01),
12: uint8(0x01),
13: uint8(0x01),
32: uint8(0x01),
34: uint8(0x80),
36: uint8(0x40),
39: uint8(0x80),
48: uint8(0x0c),
49: uint8(0x0c),
50: uint8(0x0c),
51: uint8(0x0c),
52: uint8(0x0c),
53: uint8(0x0c),
54: uint8(0x0c),
55: uint8(0x0c),
56: uint8(0x0c),
57: uint8(0x0c),
65: uint8(0x0a),
66: uint8(0x0a),
67: uint8(0x0a),
68: uint8(0x0a),
69: uint8(0x0a),
70: uint8(0x0a),
71: uint8(0x02),
72: uint8(0x02),
73: uint8(0x02),
74: uint8(0x02),
75: uint8(0x02),
76: uint8(0x02),
77: uint8(0x02),
78: uint8(0x02),
79: uint8(0x02),
80: uint8(0x02),
81: uint8(0x02),
82: uint8(0x02),
83: uint8(0x02),
84: uint8(0x02),
85: uint8(0x02),
86: uint8(0x02),
87: uint8(0x02),
88: uint8(0x02),
89: uint8(0x02),
90: uint8(0x02),
91: uint8(0x80),
95: uint8(0x40),
96: uint8(0x80),
97: uint8(0x2a),
98: uint8(0x2a),
99: uint8(0x2a),
100: uint8(0x2a),
101: uint8(0x2a),
102: uint8(0x2a),
103: uint8(0x22),
104: uint8(0x22),
105: uint8(0x22),
106: uint8(0x22),
107: uint8(0x22),
108: uint8(0x22),
109: uint8(0x22),
110: uint8(0x22),
111: uint8(0x22),
112: uint8(0x22),
113: uint8(0x22),
114: uint8(0x22),
115: uint8(0x22),
116: uint8(0x22),
117: uint8(0x22),
118: uint8(0x22),
119: uint8(0x22),
120: uint8(0x22),
121: uint8(0x22),
122: uint8(0x22),
128: uint8(0x40),
129: uint8(0x40),
130: uint8(0x40),
131: uint8(0x40),
132: uint8(0x40),
133: uint8(0x40),
134: uint8(0x40),
135: uint8(0x40),
136: uint8(0x40),
137: uint8(0x40),
138: uint8(0x40),
139: uint8(0x40),
140: uint8(0x40),
141: uint8(0x40),
142: uint8(0x40),
143: uint8(0x40),
144: uint8(0x40),
145: uint8(0x40),
146: uint8(0x40),
147: uint8(0x40),
148: uint8(0x40),
149: uint8(0x40),
150: uint8(0x40),
151: uint8(0x40),
152: uint8(0x40),
153: uint8(0x40),
154: uint8(0x40),
155: uint8(0x40),
156: uint8(0x40),
157: uint8(0x40),
158: uint8(0x40),
159: uint8(0x40),
160: uint8(0x40),
161: uint8(0x40),
162: uint8(0x40),
163: uint8(0x40),
164: uint8(0x40),
165: uint8(0x40),
166: uint8(0x40),
167: uint8(0x40),
168: uint8(0x40),
169: uint8(0x40),
170: uint8(0x40),
171: uint8(0x40),
172: uint8(0x40),
173: uint8(0x40),
174: uint8(0x40),
175: uint8(0x40),
176: uint8(0x40),
177: uint8(0x40),
178: uint8(0x40),
179: uint8(0x40),
180: uint8(0x40),
181: uint8(0x40),
182: uint8(0x40),
183: uint8(0x40),
184: uint8(0x40),
185: uint8(0x40),
186: uint8(0x40),
187: uint8(0x40),
188: uint8(0x40),
189: uint8(0x40),
190: uint8(0x40),
191: uint8(0x40),
192: uint8(0x40),
193: uint8(0x40),
194: uint8(0x40),
195: uint8(0x40),
196: uint8(0x40),
197: uint8(0x40),
198: uint8(0x40),
199: uint8(0x40),
200: uint8(0x40),
201: uint8(0x40),
202: uint8(0x40),
203: uint8(0x40),
204: uint8(0x40),
205: uint8(0x40),
206: uint8(0x40),
207: uint8(0x40),
208: uint8(0x40),
209: uint8(0x40),
210: uint8(0x40),
211: uint8(0x40),
212: uint8(0x40),
213: uint8(0x40),
214: uint8(0x40),
215: uint8(0x40),
216: uint8(0x40),
217: uint8(0x40),
218: uint8(0x40),
219: uint8(0x40),
220: uint8(0x40),
221: uint8(0x40),
222: uint8(0x40),
223: uint8(0x40),
224: uint8(0x40),
225: uint8(0x40),
226: uint8(0x40),
227: uint8(0x40),
228: uint8(0x40),
229: uint8(0x40),
230: uint8(0x40),
231: uint8(0x40),
232: uint8(0x40),
233: uint8(0x40),
234: uint8(0x40),
235: uint8(0x40),
236: uint8(0x40),
237: uint8(0x40),
238: uint8(0x40),
239: uint8(0x40),
240: uint8(0x40),
241: uint8(0x40),
242: uint8(0x40),
243: uint8(0x40),
244: uint8(0x40),
245: uint8(0x40),
246: uint8(0x40),
247: uint8(0x40),
248: uint8(0x40),
249: uint8(0x40),
250: uint8(0x40),
251: uint8(0x40),
252: uint8(0x40),
253: uint8(0x40),
254: uint8(0x40),
255: uint8(0x40),
}
/* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards
** compatibility for legacy applications, the URI filename capability is
** disabled by default.
**
** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled
** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options.
**
** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally
** disabled. The default value may be changed by compiling with the
** SQLITE_USE_URI symbol defined.
*/
/* EVIDENCE-OF: R-38720-18127 The default setting is determined by the
** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if
** that compile-time option is omitted.
*/
/* The minimum PMA size is set to this value multiplied by the database
** page size in bytes.
*/
/* Statement journals spill to disk when their size exceeds the following
** threshold (in bytes). 0 means that statement journals are created and
** written to disk immediately (the default behavior for SQLite versions
** before 3.12.0). -1 means always keep the entire statement journal in
** memory. (The statement journal is also always held entirely in memory
** if journal_mode=MEMORY or if temp_store=MEMORY, regardless of this
** setting.)
*/
/*
** The default lookaside-configuration, the format "SZ,N". SZ is the
** number of bytes in each lookaside slot (should be a multiple of 8)
** and N is the number of slots. The lookaside-configuration can be
** changed as start-time using sqlite3_config(SQLITE_CONFIG_LOOKASIDE)
** or at run-time for an individual database connection using
** sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE);
**
** With the two-size-lookaside enhancement, less lookaside is required.
** The default configuration of 1200,40 actually provides 30 1200-byte slots
** and 93 128-byte slots, which is more lookaside than is available
** using the older 1200,100 configuration without two-size-lookaside.
*/
/* The default maximum size of an in-memory database created using
** sqlite3_deserialize()
*/
// C documentation
//
// /*
// ** The following singleton contains the global configuration for
// ** the SQLite library.
// */
var _sqlite3Config = TSqlite3Config{
FbCoreMutex: uint8(1),
FbFullMutex: libc.BoolUint8(true),
FbUseCis: uint8(SQLITE_ALLOW_COVERING_INDEX_SCAN),
FbExtraSchemaChecks: uint8(1),
FmxStrlen: int32(0x7ffffffe),
FszLookaside: int32(1200),
FnLookaside: int32(40),
FnStmtSpill: libc.Int32FromInt32(64) * libc.Int32FromInt32(1024),
FmxMmap: int64(SQLITE_MAX_MMAP_SIZE),
FnPage: int32(SQLITE_DEFAULT_PCACHE_INITSZ),
FszPma: uint32(SQLITE_SORTER_PMASZ),
FmxMemdbSize: int64(SQLITE_MEMDB_DEFAULT_MAXSIZE),
FiOnceResetThreshold: int32(0x7ffffffe),
FszSorterRef: uint32(SQLITE_DEFAULT_SORTERREF_SIZE),
}
// C documentation
//
// /*
// ** Hash table for global functions - functions common to all
// ** database connections. After initialization, this table is
// ** read-only.
// */
var _sqlite3BuiltinFunctions TFuncDefHash
// C documentation
//
// /*
// ** The value of the "pending" byte must be 0x40000000 (1 byte past the
// ** 1-gibabyte boundary) in a compatible database. SQLite never uses
// ** the database page that contains the pending byte. It never attempts
// ** to read or write that page. The pending byte page is set aside
// ** for use by the VFS layers as space for managing file locks.
// **
// ** During testing, it is often desirable to move the pending byte to
// ** a different position in the file. This allows code that has to
// ** deal with the pending byte to run on files that are much smaller
// ** than 1 GiB. The sqlite3_test_control() interface can be used to
// ** move the pending byte.
// **
// ** IMPORTANT: Changing the pending byte to any value other than
// ** 0x40000000 results in an incompatible database file format!
// ** Changing the pending byte during operation will result in undefined
// ** and incorrect behavior.
// */
var _sqlite3PendingByte = int32(0x40000000)
// C documentation
//
// /*
// ** Tracing flags set by SQLITE_TESTCTRL_TRACEFLAGS.
// */
var _sqlite3TreeTrace = uint32(0)
var _sqlite3WhereTrace = uint32(0)
// C documentation
//
// /* #include "opcodes.h" */
// /*
// ** Properties of opcodes. The OPFLG_INITIALIZER macro is
// ** created by mkopcodeh.awk during compilation. Data is obtained
// ** from the comments following the "case OP_xxxx:" statements in
// ** the vdbe.c file.
// */
var _sqlite3OpcodeProperty = [190]uint8{
4: uint8(0x10),
6: uint8(0x41),
8: uint8(0x01),
9: uint8(0x01),
10: uint8(0x01),
11: uint8(0x01),
12: uint8(0x03),
13: uint8(0x03),
14: uint8(0x01),
15: uint8(0x01),
16: uint8(0x03),
17: uint8(0x03),
18: uint8(0x01),
19: uint8(0x12),
20: uint8(0x01),
21: uint8(0x49),
22: uint8(0x49),
23: uint8(0x49),
24: uint8(0x49),
25: uint8(0x01),
26: uint8(0x49),
27: uint8(0x49),
28: uint8(0x49),
29: uint8(0x49),
30: uint8(0x49),
31: uint8(0x49),
32: uint8(0x41),
33: uint8(0x01),
34: uint8(0x41),
35: uint8(0x41),
36: uint8(0x41),
37: uint8(0x01),
38: uint8(0x41),
39: uint8(0x41),
40: uint8(0x41),
41: uint8(0x41),
42: uint8(0x41),
43: uint8(0x26),
44: uint8(0x26),
45: uint8(0x41),
46: uint8(0x23),
47: uint8(0x0b),
48: uint8(0x01),
49: uint8(0x01),
50: uint8(0x03),
51: uint8(0x03),
52: uint8(0x0b),
53: uint8(0x0b),
54: uint8(0x0b),
55: uint8(0x0b),
56: uint8(0x0b),
57: uint8(0x0b),
58: uint8(0x01),
59: uint8(0x03),
60: uint8(0x03),
61: uint8(0x03),
62: uint8(0x01),
63: uint8(0x41),
64: uint8(0x01),
67: uint8(0x02),
68: uint8(0x02),
69: uint8(0x08),
71: uint8(0x10),
72: uint8(0x10),
73: uint8(0x10),
75: uint8(0x10),
77: uint8(0x10),
78: uint8(0x10),
81: uint8(0x10),
82: uint8(0x10),
86: uint8(0x02),
87: uint8(0x02),
88: uint8(0x02),
91: uint8(0x12),
92: uint8(0x1e),
93: uint8(0x20),
94: uint8(0x40),
98: uint8(0x10),
99: uint8(0x10),
101: uint8(0x40),
102: uint8(0x26),
103: uint8(0x26),
104: uint8(0x26),
105: uint8(0x26),
106: uint8(0x26),
107: uint8(0x26),
108: uint8(0x26),
109: uint8(0x26),
110: uint8(0x26),
111: uint8(0x26),
112: uint8(0x40),
114: uint8(0x12),
115: uint8(0x40),
116: uint8(0x40),
117: uint8(0x10),
118: uint8(0x40),
122: uint8(0x40),
124: uint8(0x40),
125: uint8(0x40),
126: uint8(0x10),
127: uint8(0x10),
133: uint8(0x40),
135: uint8(0x50),
137: uint8(0x40),
138: uint8(0x04),
139: uint8(0x04),
141: uint8(0x40),
142: uint8(0x50),
143: uint8(0x40),
144: uint8(0x10),
147: uint8(0x10),
153: uint8(0x10),
156: uint8(0x06),
157: uint8(0x10),
159: uint8(0x04),
160: uint8(0x1a),
173: uint8(0x40),
174: uint8(0x10),
175: uint8(0x50),
176: uint8(0x40),
178: uint8(0x10),
179: uint8(0x10),
180: uint8(0x02),
181: uint8(0x12),
182: uint8(0x12),
}
// C documentation
//
// /*
// ** Name of the default collating sequence
// */
var _sqlite3StrBINARY = [7]int8{'B', 'I', 'N', 'A', 'R', 'Y'}
// C documentation
//
// /*
// ** Standard typenames. These names must match the COLTYPE_* definitions.
// ** Adjust the SQLITE_N_STDTYPE value if adding or removing entries.
// **
// ** sqlite3StdType[] The actual names of the datatypes.
// **
// ** sqlite3StdTypeLen[] The length (in bytes) of each entry
// ** in sqlite3StdType[].
// **
// ** sqlite3StdTypeAffinity[] The affinity associated with each entry
// ** in sqlite3StdType[].
// */
var _sqlite3StdTypeLen = [6]uint8{
0: uint8(3),
1: uint8(4),
2: uint8(3),
3: uint8(7),
4: uint8(4),
5: uint8(4),
}
var _sqlite3StdTypeAffinity = [6]int8{
0: int8(SQLITE_AFF_NUMERIC),
1: int8(SQLITE_AFF_BLOB),
2: int8(SQLITE_AFF_INTEGER),
3: int8(SQLITE_AFF_INTEGER),
4: int8(SQLITE_AFF_REAL),
5: int8(SQLITE_AFF_TEXT),
}
var _sqlite3StdType = [6]uintptr{
0: __ccgo_ts + 1125,
1: __ccgo_ts + 1129,
2: __ccgo_ts + 1134,
3: __ccgo_ts + 1138,
4: __ccgo_ts + 1146,
5: __ccgo_ts + 1151,
}
/************** End of global.c **********************************************/
/************** Begin file status.c ******************************************/
/*
** 2008 June 18
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This module implements the sqlite3_status() interface and related
** functionality.
*/
/* #include "sqliteInt.h" */
/************** Include vdbeInt.h in the middle of status.c ******************/
/************** Begin file vdbeInt.h *****************************************/
/*
** 2003 September 6
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This is the header file for information that is private to the
** VDBE. This information used to all be at the top of the single
** source code file "vdbe.c". When that file became too big (over
** 6000 lines long) it was split up into several smaller files and
** this header information was factored out.
*/
/*
** The maximum number of times that a statement will try to reparse
** itself before giving up and returning SQLITE_SCHEMA.
*/
/*
** VDBE_DISPLAY_P4 is true or false depending on whether or not the
** "explain" P4 display logic is enabled.
*/
// C documentation
//
// /*
// ** SQL is translated into a sequence of instructions to be
// ** executed by a virtual machine. Each instruction is an instance
// ** of the following structure.
// */
type TOp = struct {
Fopcode Tu8
Fp4type int8
Fp5 Tu16
Fp1 int32
Fp2 int32
Fp3 int32
Fp4 Tp4union
}
type Op = TOp
// C documentation
//
// /*
// ** Boolean values
// */
type TBool = uint32
type Bool = TBool
// C documentation
//
// /* Opaque type used by code in vdbesort.c */
type TVdbeSorter = struct {
FmnPmaSize int32
FmxPmaSize int32
FmxKeysize int32
Fpgsz int32
FpReader uintptr
FpMerger uintptr
Fdb uintptr
FpKeyInfo uintptr
FpUnpacked uintptr
Flist TSorterList
FiMemory int32
FnMemory int32
FbUsePMA Tu8
FbUseThreads Tu8
FiPrev Tu8
FnTask Tu8
FtypeMask Tu8
FaTask [1]TSortSubtask
}
type VdbeSorter = TVdbeSorter
// C documentation
//
// /* Elements of the linked list at Vdbe.pAuxData */
type TAuxData = struct {
FiAuxOp int32
FiAuxArg int32
FpAux uintptr
FxDeleteAux uintptr
FpNextAux uintptr
}
type AuxData = TAuxData
// C documentation
//
// /* A cache of large TEXT or BLOB values in a VdbeCursor */
type TVdbeTxtBlbCache = struct {
FpCValue uintptr
FiOffset Ti64
FiCol int32
FcacheStatus Tu32
FcolCacheCtr Tu32
}
type VdbeTxtBlbCache = TVdbeTxtBlbCache
/* Types of VDBE cursors */
// C documentation
//
// /*
// ** A VdbeCursor is an superclass (a wrapper) for various cursor objects:
// **
// ** * A b-tree cursor
// ** - In the main database or in an ephemeral database
// ** - On either an index or a table
// ** * A sorter
// ** * A virtual table
// ** * A one-row "pseudotable" stored in a single register
// */
type TVdbeCursor = struct {
FeCurType Tu8
FiDb Ti8
FnullRow Tu8
FdeferredMoveto Tu8
FisTable Tu8
F__ccgo_align5 [3]byte
F__ccgo8 uint8
FseekHit Tu16
Fub struct {
FaAltMap [0]uintptr
FpBtx uintptr
}
FseqCount Ti64
FcacheStatus Tu32
FseekResult int32
FpAltCursor uintptr
Fuc struct {
FpVCur [0]uintptr
FpSorter [0]uintptr
FpCursor uintptr
}
FpKeyInfo uintptr
FiHdrOffset Tu32
FpgnoRoot TPgno
FnField Ti16
FnHdrParsed Tu16
FmovetoTarget Ti64
FaOffset uintptr
FaRow uintptr
FpayloadSize Tu32
FszRow Tu32
FpCache uintptr
FaType [1]Tu32
}
type VdbeCursor = TVdbeCursor
type TVdbeCursor1 = struct {
FeCurType Tu8
FiDb Ti8
FnullRow Tu8
FdeferredMoveto Tu8
FisTable Tu8
F__ccgo_align5 [3]byte
F__ccgo8 uint8
FseekHit Tu16
Fub struct {
FaAltMap [0]uintptr
FpBtx uintptr
}
FseqCount Ti64
FcacheStatus Tu32
FseekResult int32
FpAltCursor uintptr
Fuc struct {
FpVCur [0]uintptr
FpSorter [0]uintptr
FpCursor uintptr
}
FpKeyInfo uintptr
FiHdrOffset Tu32
FpgnoRoot TPgno
FnField Ti16
FnHdrParsed Tu16
FmovetoTarget Ti64
FaOffset uintptr
FaRow uintptr
FpayloadSize Tu32
FszRow Tu32
FpCache uintptr
FaType [1]Tu32
}
type VdbeCursor1 = TVdbeCursor1
/* Return true if P is a null-only cursor
*/
/*
** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
*/
/*
** Large TEXT or BLOB values can be slow to load, so we want to avoid
** loading them more than once. For that reason, large TEXT and BLOB values
** can be stored in a cache defined by this object, and attached to the
** VdbeCursor using the pCache field.
*/
type TVdbeTxtBlbCache1 = struct {
FpCValue uintptr
FiOffset Ti64
FiCol int32
FcacheStatus Tu32
FcolCacheCtr Tu32
}
type VdbeTxtBlbCache1 = TVdbeTxtBlbCache1
// C documentation
//
// /*
// ** When a sub-program is executed (OP_Program), a structure of this type
// ** is allocated to store the current value of the program counter, as
// ** well as the current memory cell array and various other frame specific
// ** values stored in the Vdbe struct. When the sub-program is finished,
// ** these values are copied back to the Vdbe from the VdbeFrame structure,
// ** restoring the state of the VM to as it was before the sub-program
// ** began executing.
// **
// ** The memory for a VdbeFrame object is allocated and managed by a memory
// ** cell in the parent (calling) frame. When the memory cell is deleted or
// ** overwritten, the VdbeFrame object is not freed immediately. Instead, it
// ** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame
// ** list is deleted when the VM is reset in VdbeHalt(). The reason for doing
// ** this instead of deleting the VdbeFrame immediately is to avoid recursive
// ** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the
// ** child frame are released.
// **
// ** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is
// ** set to NULL if the currently executing frame is the main program.
// */
type TVdbeFrame = struct {
Fv uintptr
FpParent uintptr
FaOp uintptr
FaMem uintptr
FapCsr uintptr
FaOnce uintptr
Ftoken uintptr
FlastRowid Ti64
FpAuxData uintptr
FnCursor int32
Fpc int32
FnOp int32
FnMem int32
FnChildMem int32
FnChildCsr int32
FnChange Ti64
FnDbChange Ti64
}
type VdbeFrame = TVdbeFrame
type TVdbeFrame1 = struct {
Fv uintptr
FpParent uintptr
FaOp uintptr
FaMem uintptr
FapCsr uintptr
FaOnce uintptr
Ftoken uintptr
FlastRowid Ti64
FpAuxData uintptr
FnCursor int32
Fpc int32
FnOp int32
FnMem int32
FnChildMem int32
FnChildCsr int32
FnChange Ti64
FnDbChange Ti64
}
type VdbeFrame1 = TVdbeFrame1
/*
** Size of struct Mem not including the Mem.zMalloc member or anything that
** follows.
*/
/* One or more of the following flags are set to indicate the
** representations of the value stored in the Mem struct.
**
** * MEM_Null An SQL NULL value
**
** * MEM_Null|MEM_Zero An SQL NULL with the virtual table
** UPDATE no-change flag set
**
** * MEM_Null|MEM_Term| An SQL NULL, but also contains a
** MEM_Subtype pointer accessible using
** sqlite3_value_pointer().
**
** * MEM_Null|MEM_Cleared Special SQL NULL that compares non-equal
** to other NULLs even using the IS operator.
**
** * MEM_Str A string, stored in Mem.z with
** length Mem.n. Zero-terminated if
** MEM_Term is set. This flag is
** incompatible with MEM_Blob and
** MEM_Null, but can appear with MEM_Int,
** MEM_Real, and MEM_IntReal.
**
** * MEM_Blob A blob, stored in Mem.z length Mem.n.
** Incompatible with MEM_Str, MEM_Null,
** MEM_Int, MEM_Real, and MEM_IntReal.
**
** * MEM_Blob|MEM_Zero A blob in Mem.z of length Mem.n plus
** MEM.u.i extra 0x00 bytes at the end.
**
** * MEM_Int Integer stored in Mem.u.i.
**
** * MEM_Real Real stored in Mem.u.r.
**
** * MEM_IntReal Real stored as an integer in Mem.u.i.
**
** If the MEM_Null flag is set, then the value is an SQL NULL value.
** For a pointer type created using sqlite3_bind_pointer() or
** sqlite3_result_pointer() the MEM_Term and MEM_Subtype flags are also set.
**
** If the MEM_Str flag is set then Mem.z points at a string representation.
** Usually this is encoded in the same unicode encoding as the main
** database (see below for exceptions). If the MEM_Term flag is also
** set, then the string is nul terminated. The MEM_Int and MEM_Real
** flags may coexist with the MEM_Str flag.
*/
/* Extra bits that modify the meanings of the core datatypes above
*/
/* 0x0080 // Available */
/* Bits that determine the storage for Mem.z for a string or blob or
** aggregate accumulator.
*/
/* Return TRUE if Mem X contains dynamically allocated content - anything
** that needs to be deallocated to avoid a leak.
*/
/*
** Clear any existing type flags from a Mem and replace them with f
*/
/*
** True if Mem X is a NULL-nochng type.
*/
/*
** Return true if a memory cell has been initialized and is valid.
** is for use inside assert() statements only.
**
** A Memory cell is initialized if at least one of the
** MEM_Null, MEM_Str, MEM_Int, MEM_Real, MEM_Blob, or MEM_IntReal bits
** is set. It is "undefined" if all those bits are zero.
*/
/*
** Each auxiliary data pointer stored by a user defined function
** implementation calling sqlite3_set_auxdata() is stored in an instance
** of this structure. All such structures associated with a single VM
** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed
** when the VM is halted (if not before).
*/
type TAuxData1 = struct {
FiAuxOp int32
FiAuxArg int32
FpAux uintptr
FxDeleteAux uintptr
FpNextAux uintptr
}
type AuxData1 = TAuxData1
/*
** The "context" argument for an installable function. A pointer to an
** instance of this structure is the first argument to the routines used
** implement the SQL functions.
**
** There is a typedef for this structure in sqlite.h. So all routines,
** even the public interface to SQLite, can use a pointer to this structure.
** But this file is the only place where the internal details of this
** structure are known.
**
** This structure is defined inside of vdbeInt.h because it uses substructures
** (Mem) which are only defined there.
*/
type Tsqlite3_context1 = struct {
FpOut uintptr
FpFunc uintptr
FpMem uintptr
FpVdbe uintptr
FiOp int32
FisError int32
Fenc Tu8
FskipFlag Tu8
Fargc Tu8
Fargv [1]uintptr
}
type sqlite3_context1 = Tsqlite3_context1
// C documentation
//
// /* A bitfield type for use inside of structures. Always follow with :N where
// ** N is the number of bits.
// */
type Tbft = uint32
type bft = Tbft
/* Bit Field Type */
// C documentation
//
// /* The ScanStatus object holds a single value for the
// ** sqlite3_stmt_scanstatus() interface.
// **
// ** aAddrRange[]:
// ** This array is used by ScanStatus elements associated with EQP
// ** notes that make an SQLITE_SCANSTAT_NCYCLE value available. It is
// ** an array of up to 3 ranges of VM addresses for which the Vdbe.anCycle[]
// ** values should be summed to calculate the NCYCLE value. Each pair of
// ** integer addresses is a start and end address (both inclusive) for a range
// ** instructions. A start value of 0 indicates an empty range.
// */
type TScanStatus = struct {
FaddrExplain int32
FaAddrRange [6]int32
FaddrLoop int32
FaddrVisit int32
FiSelectID int32
FnEst TLogEst
FzName uintptr
}
type ScanStatus = TScanStatus
type TScanStatus1 = struct {
FaddrExplain int32
FaAddrRange [6]int32
FaddrLoop int32
FaddrVisit int32
FiSelectID int32
FnEst TLogEst
FzName uintptr
}
type ScanStatus1 = TScanStatus1
// C documentation
//
// /* The DblquoteStr object holds the text of a double-quoted
// ** string for a prepared statement. A linked list of these objects
// ** is constructed during statement parsing and is held on Vdbe.pDblStr.
// ** When computing a normalized SQL statement for an SQL statement, that
// ** list is consulted for each double-quoted identifier to see if the
// ** identifier should really be a string literal.
// */
type TDblquoteStr = struct {
FpNextStr uintptr
Fz [8]int8
}
type DblquoteStr = TDblquoteStr
type TDblquoteStr1 = struct {
FpNextStr uintptr
Fz [8]int8
}
type DblquoteStr1 = TDblquoteStr1
/*
** An instance of the virtual machine. This structure contains the complete
** state of the virtual machine.
**
** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare()
** is really a pointer to an instance of this structure.
*/
type TVdbe1 = struct {
Fdb uintptr
FppVPrev uintptr
FpVNext uintptr
FpParse uintptr
FnVar TynVar
FnMem int32
FnCursor int32
FcacheCtr Tu32
Fpc int32
Frc int32
FnChange Ti64
FiStatement int32
FiCurrentTime Ti64
FnFkConstraint Ti64
FnStmtDefCons Ti64
FnStmtDefImmCons Ti64
FaMem uintptr
FapArg uintptr
FapCsr uintptr
FaVar uintptr
FaOp uintptr
FnOp int32
FnOpAlloc int32
FaColName uintptr
FpResultRow uintptr
FzErrMsg uintptr
FpVList uintptr
FstartTime Ti64
FnResColumn Tu16
FnResAlloc Tu16
FerrorAction Tu8
FminWriteFileFormat Tu8
FprepFlags Tu8
FeVdbeState Tu8
F__ccgo200 uint16
FbtreeMask TyDbMask
FlockMask TyDbMask
FaCounter [9]Tu32
FzSql uintptr
FpFree uintptr
FpFrame uintptr
FpDelFrame uintptr
FnFrame int32
Fexpmask Tu32
FpProgram uintptr
FpAuxData uintptr
}
type Vdbe1 = TVdbe1
/*
** The following are allowed values for Vdbe.eVdbeState
*/
/*
** Structure used to store the context required by the
** sqlite3_preupdate_*() API functions.
*/
type TPreUpdate1 = struct {
Fv uintptr
FpCsr uintptr
Fop int32
FaRecord uintptr
Fkeyinfo TKeyInfo
FpUnpacked uintptr
FpNewUnpacked uintptr
FiNewReg int32
FiBlobWrite int32
FiKey1 Ti64
FiKey2 Ti64
FaNew uintptr
FpTab uintptr
FpPk uintptr
}
type PreUpdate1 = TPreUpdate1
// C documentation
//
// /*
// ** An instance of this object is used to pass an vector of values into
// ** OP_VFilter, the xFilter method of a virtual table. The vector is the
// ** set of values on the right-hand side of an IN constraint.
// **
// ** The value as passed into xFilter is an sqlite3_value with a "pointer"
// ** type, such as is generated by sqlite3_result_pointer() and read by
// ** sqlite3_value_pointer. Such values have MEM_Term|MEM_Subtype|MEM_Null
// ** and a subtype of 'p'. The sqlite3_vtab_in_first() and _next() interfaces
// ** know how to use this object to step through all the values in the
// ** right operand of the IN constraint.
// */
type TValueList = struct {
FpCsr uintptr
FpOut uintptr
}
type ValueList = TValueList
type TValueList1 = struct {
FpCsr uintptr
FpOut uintptr
}
type ValueList1 = TValueList1
/************** End of vdbeInt.h *********************************************/
/************** Continuing where we left off in status.c *********************/
// C documentation
//
// /*
// ** Variables in which to record status information.
// */
type Tsqlite3StatValueType = int64
type sqlite3StatValueType = Tsqlite3StatValueType
type Tsqlite3StatType = struct {
FnowValue [10]Tsqlite3StatValueType
FmxValue [10]Tsqlite3StatValueType
}
type sqlite3StatType = Tsqlite3StatType
type Tsqlite3StatType1 = struct {
FnowValue [10]Tsqlite3StatValueType
FmxValue [10]Tsqlite3StatValueType
}
type sqlite3StatType1 = Tsqlite3StatType1
var _sqlite3Stat = Tsqlite3StatType1{}
// C documentation
//
// /*
// ** Elements of sqlite3Stat[] are protected by either the memory allocator
// ** mutex, or by the pcache1 mutex. The following array determines which.
// */
var _statMutex = [10]int8{
1: int8(1),
2: int8(1),
7: int8(1),
}
/* The "wsdStat" macro will resolve to the status information
** state vector. If writable static data is unsupported on the target,
** we have to locate the state vector at run-time. In the more common
** case where writable static data is supported, wsdStat can refer directly
** to the "sqlite3Stat" state vector declared above.
*/
// C documentation
//
// /*
// ** Return the current value of a status parameter. The caller must
// ** be holding the appropriate mutex.
// */
func _sqlite3StatusValue(tls *libc.TLS, op int32) (r Tsqlite3_int64) {
return *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8))
}
// C documentation
//
// /*
// ** Add N to the value of a status record. The caller must hold the
// ** appropriate mutex. (Locking is checked by assert()).
// **
// ** The StatusUp() routine can accept positive or negative values for N.
// ** The value of N is added to the current status value and the high-water
// ** mark is adjusted if necessary.
// **
// ** The StatusDown() routine lowers the current value by N. The highwater
// ** mark is unchanged. N must be non-negative for StatusDown().
// */
func _sqlite3StatusUp(tls *libc.TLS, op int32, N int32) {
*(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8)) += int64(N)
if *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8)) > *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8)) {
*(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8)) = *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8))
}
}
func _sqlite3StatusDown(tls *libc.TLS, op int32, N int32) {
*(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8)) -= int64(N)
}
// C documentation
//
// /*
// ** Adjust the highwater mark if necessary.
// ** The caller must hold the appropriate mutex.
// */
func _sqlite3StatusHighwater(tls *libc.TLS, op int32, X int32) {
var newValue Tsqlite3StatValueType
_ = newValue
newValue = int64(X)
if newValue > *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8)) {
*(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8)) = newValue
}
}
// C documentation
//
// /*
// ** Query status information.
// */
func Xsqlite3_status64(tls *libc.TLS, op int32, pCurrent uintptr, pHighwater uintptr, resetFlag int32) (r int32) {
var pMutex, v1 uintptr
_, _ = pMutex, v1
if op < 0 || op >= int32(libc.Uint64FromInt64(80)/libc.Uint64FromInt64(8)) {
return _sqlite3MisuseError(tls, int32(23890))
}
if _statMutex[op] != 0 {
v1 = _sqlite3Pcache1Mutex(tls)
} else {
v1 = _sqlite3MallocMutex(tls)
}
pMutex = v1
Xsqlite3_mutex_enter(tls, pMutex)
*(*Tsqlite3_int64)(unsafe.Pointer(pCurrent)) = *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8))
*(*Tsqlite3_int64)(unsafe.Pointer(pHighwater)) = *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8))
if resetFlag != 0 {
*(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + 80 + uintptr(op)*8)) = *(*Tsqlite3StatValueType)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Stat)) + uintptr(op)*8))
}
Xsqlite3_mutex_leave(tls, pMutex)
_ = pMutex /* Prevent warning when SQLITE_THREADSAFE=0 */
return SQLITE_OK
}
func Xsqlite3_status(tls *libc.TLS, op int32, pCurrent uintptr, pHighwater uintptr, resetFlag int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* iCur at bp+0 */ Tsqlite3_int64
var _ /* iHwtr at bp+8 */ Tsqlite3_int64
_ = rc
*(*Tsqlite3_int64)(unsafe.Pointer(bp)) = 0
*(*Tsqlite3_int64)(unsafe.Pointer(bp + 8)) = 0
rc = Xsqlite3_status64(tls, op, bp, bp+8, resetFlag)
if rc == 0 {
*(*int32)(unsafe.Pointer(pCurrent)) = int32(*(*Tsqlite3_int64)(unsafe.Pointer(bp)))
*(*int32)(unsafe.Pointer(pHighwater)) = int32(*(*Tsqlite3_int64)(unsafe.Pointer(bp + 8)))
}
return rc
}
// C documentation
//
// /*
// ** Return the number of LookasideSlot elements on the linked list
// */
func _countLookasideSlots(tls *libc.TLS, p uintptr) (r Tu32) {
var cnt Tu32
_ = cnt
cnt = uint32(0)
for p != 0 {
p = (*TLookasideSlot)(unsafe.Pointer(p)).FpNext
cnt++
}
return cnt
}
// C documentation
//
// /*
// ** Count the number of slots of lookaside memory that are outstanding
// */
func _sqlite3LookasideUsed(tls *libc.TLS, db uintptr, pHighwater uintptr) (r int32) {
var nFree, nInit Tu32
_, _ = nFree, nInit
nInit = _countLookasideSlots(tls, (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpInit)
nFree = _countLookasideSlots(tls, (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree)
nInit += _countLookasideSlots(tls, (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallInit)
nFree += _countLookasideSlots(tls, (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree)
if pHighwater != 0 {
*(*int32)(unsafe.Pointer(pHighwater)) = int32((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FnSlot - nInit)
}
return int32((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FnSlot - (nInit + nFree))
}
// C documentation
//
// /*
// ** Query status information for a single database connection
// */
func Xsqlite3_db_status(tls *libc.TLS, db uintptr, op int32, pCurrent uintptr, pHighwater uintptr, resetFlag int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, i1, i2, nByte, rc, totalUsed int32
var p, p1, pBt, pPager, pPager1, pSchema, pVdbe uintptr
var _ /* nByte at bp+0 */ int32
var _ /* nByte at bp+4 */ int32
var _ /* nRet at bp+8 */ Tu64
_, _, _, _, _, _, _, _, _, _, _, _, _ = i, i1, i2, nByte, p, p1, pBt, pPager, pPager1, pSchema, pVdbe, rc, totalUsed
rc = SQLITE_OK /* Return code */
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
switch op {
case SQLITE_DBSTATUS_LOOKASIDE_USED:
*(*int32)(unsafe.Pointer(pCurrent)) = _sqlite3LookasideUsed(tls, db, pHighwater)
if resetFlag != 0 {
p = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree
if p != 0 {
for (*TLookasideSlot)(unsafe.Pointer(p)).FpNext != 0 {
p = (*TLookasideSlot)(unsafe.Pointer(p)).FpNext
}
(*TLookasideSlot)(unsafe.Pointer(p)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpInit
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpInit = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree = uintptr(0)
}
p = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree
if p != 0 {
for (*TLookasideSlot)(unsafe.Pointer(p)).FpNext != 0 {
p = (*TLookasideSlot)(unsafe.Pointer(p)).FpNext
}
(*TLookasideSlot)(unsafe.Pointer(p)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallInit
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallInit = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree = uintptr(0)
}
}
case int32(SQLITE_DBSTATUS_LOOKASIDE_HIT):
fallthrough
case int32(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE):
fallthrough
case int32(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL):
*(*int32)(unsafe.Pointer(pCurrent)) = 0
*(*int32)(unsafe.Pointer(pHighwater)) = int32(*(*Tu32)(unsafe.Pointer(db + 440 + 16 + uintptr(op-int32(SQLITE_DBSTATUS_LOOKASIDE_HIT))*4)))
if resetFlag != 0 {
*(*Tu32)(unsafe.Pointer(db + 440 + 16 + uintptr(op-int32(SQLITE_DBSTATUS_LOOKASIDE_HIT))*4)) = uint32(0)
}
break
/*
** Return an approximation for the amount of memory currently used
** by all pagers associated with the given database connection. The
** highwater mark is meaningless and is returned as zero.
*/
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_USED_SHARED):
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_USED):
totalUsed = 0
_sqlite3BtreeEnterAll(tls, db)
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if pBt != 0 {
pPager = _sqlite3BtreePager(tls, pBt)
nByte = _sqlite3PagerMemUsed(tls, pPager)
if op == int32(SQLITE_DBSTATUS_CACHE_USED_SHARED) {
nByte = nByte / _sqlite3BtreeConnectionCount(tls, pBt)
}
totalUsed += nByte
}
goto _1
_1:
;
i++
}
_sqlite3BtreeLeaveAll(tls, db)
*(*int32)(unsafe.Pointer(pCurrent)) = totalUsed
*(*int32)(unsafe.Pointer(pHighwater)) = 0
break
/*
** *pCurrent gets an accurate estimate of the amount of memory used
** to store the schema for all databases (main, temp, and any ATTACHed
** databases. *pHighwater is set to zero.
*/
fallthrough
case int32(SQLITE_DBSTATUS_SCHEMA_USED): /* Used to iterate through schemas */
*(*int32)(unsafe.Pointer(bp)) = 0 /* Used to accumulate return value */
_sqlite3BtreeEnterAll(tls, db)
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = bp
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart
i1 = 0
for {
if !(i1 < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pSchema = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i1)*32))).FpSchema
if pSchema != uintptr(0) {
*(*int32)(unsafe.Pointer(bp)) = int32(uint32(*(*int32)(unsafe.Pointer(bp))) + uint32((*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRoundup})))(tls, int32(32)))*((*TSchema)(unsafe.Pointer(pSchema)).FtblHash.Fcount+(*TSchema)(unsafe.Pointer(pSchema)).FtrigHash.Fcount+(*TSchema)(unsafe.Pointer(pSchema)).FidxHash.Fcount+(*TSchema)(unsafe.Pointer(pSchema)).FfkeyHash.Fcount))
*(*int32)(unsafe.Pointer(bp)) = int32(uint64(*(*int32)(unsafe.Pointer(bp))) + Xsqlite3_msize(tls, (*TSchema)(unsafe.Pointer(pSchema)).FtblHash.Fht))
*(*int32)(unsafe.Pointer(bp)) = int32(uint64(*(*int32)(unsafe.Pointer(bp))) + Xsqlite3_msize(tls, (*TSchema)(unsafe.Pointer(pSchema)).FtrigHash.Fht))
*(*int32)(unsafe.Pointer(bp)) = int32(uint64(*(*int32)(unsafe.Pointer(bp))) + Xsqlite3_msize(tls, (*TSchema)(unsafe.Pointer(pSchema)).FidxHash.Fht))
*(*int32)(unsafe.Pointer(bp)) = int32(uint64(*(*int32)(unsafe.Pointer(bp))) + Xsqlite3_msize(tls, (*TSchema)(unsafe.Pointer(pSchema)).FfkeyHash.Fht))
p1 = (*THash)(unsafe.Pointer(pSchema + 56)).Ffirst
for {
if !(p1 != 0) {
break
}
_sqlite3DeleteTrigger(tls, db, (*THashElem)(unsafe.Pointer(p1)).Fdata)
goto _3
_3:
;
p1 = (*THashElem)(unsafe.Pointer(p1)).Fnext
}
p1 = (*THash)(unsafe.Pointer(pSchema + 8)).Ffirst
for {
if !(p1 != 0) {
break
}
_sqlite3DeleteTable(tls, db, (*THashElem)(unsafe.Pointer(p1)).Fdata)
goto _4
_4:
;
p1 = (*THashElem)(unsafe.Pointer(p1)).Fnext
}
}
goto _2
_2:
;
i1++
}
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = uintptr(0)
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpTrueEnd
_sqlite3BtreeLeaveAll(tls, db)
*(*int32)(unsafe.Pointer(pHighwater)) = 0
*(*int32)(unsafe.Pointer(pCurrent)) = *(*int32)(unsafe.Pointer(bp))
break
/*
** *pCurrent gets an accurate estimate of the amount of memory used
** to store all prepared statements.
** *pHighwater is set to zero.
*/
fallthrough
case int32(SQLITE_DBSTATUS_STMT_USED): /* Used to iterate through VMs */
*(*int32)(unsafe.Pointer(bp + 4)) = 0 /* Used to accumulate return value */
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = bp + 4
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart
pVdbe = (*Tsqlite3)(unsafe.Pointer(db)).FpVdbe
for {
if !(pVdbe != 0) {
break
}
_sqlite3VdbeDelete(tls, pVdbe)
goto _5
_5:
;
pVdbe = (*TVdbe1)(unsafe.Pointer(pVdbe)).FpVNext
}
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpTrueEnd
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = uintptr(0)
*(*int32)(unsafe.Pointer(pHighwater)) = 0 /* IMP: R-64479-57858 */
*(*int32)(unsafe.Pointer(pCurrent)) = *(*int32)(unsafe.Pointer(bp + 4))
break
/*
** Set *pCurrent to the total cache hits or misses encountered by all
** pagers the database handle is connected to. *pHighwater is always set
** to zero.
*/
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_SPILL):
op = libc.Int32FromInt32(SQLITE_DBSTATUS_CACHE_WRITE) + libc.Int32FromInt32(1)
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_HIT):
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_MISS):
fallthrough
case int32(SQLITE_DBSTATUS_CACHE_WRITE):
*(*Tu64)(unsafe.Pointer(bp + 8)) = uint64(0)
i2 = 0
for {
if !(i2 < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
if (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i2)*32))).FpBt != 0 {
pPager1 = _sqlite3BtreePager(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i2)*32))).FpBt)
_sqlite3PagerCacheStat(tls, pPager1, op, resetFlag, bp+8)
}
goto _6
_6:
;
i2++
}
*(*int32)(unsafe.Pointer(pHighwater)) = 0 /* IMP: R-42420-56072 */
/* IMP: R-54100-20147 */
/* IMP: R-29431-39229 */
*(*int32)(unsafe.Pointer(pCurrent)) = int32(*(*Tu64)(unsafe.Pointer(bp + 8))) & int32(0x7fffffff)
break
/* Set *pCurrent to non-zero if there are unresolved deferred foreign
** key constraints. Set *pCurrent to zero if all foreign key constraints
** have been satisfied. The *pHighwater is always set to zero.
*/
fallthrough
case int32(SQLITE_DBSTATUS_DEFERRED_FKS):
*(*int32)(unsafe.Pointer(pHighwater)) = 0 /* IMP: R-11967-56545 */
*(*int32)(unsafe.Pointer(pCurrent)) = libc.BoolInt32((*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons > 0 || (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons > 0)
default:
rc = int32(SQLITE_ERROR)
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
type Tclock_t = int32
type clock_t = Tclock_t
type Ttime_t = int64
type time_t = Ttime_t
type Tclockid_t = int32
type clockid_t = Tclockid_t
type Ttimer_t = uintptr
type timer_t = Ttimer_t
type Ttimespec = struct {
Ftv_sec Ttime_t
Ftv_nsec int64
}
type timespec = Ttimespec
type Titimerspec = struct {
Fit_interval Ttimespec
Fit_value Ttimespec
}
type itimerspec = Titimerspec
type Tpid_t = int32
type pid_t = Tpid_t
type Ttm = struct {
Ftm_sec int32
Ftm_min int32
Ftm_hour int32
Ftm_mday int32
Ftm_mon int32
Ftm_year int32
Ftm_wday int32
Ftm_yday int32
Ftm_isdst int32
Ftm_gmtoff int64
Ftm_zone uintptr
}
type tm = Ttm
/*
** The MSVC CRT on Windows CE may not have a localtime() function.
** So declare a substitute. The substitute function itself is
** defined in "os_win.c".
*/
// C documentation
//
// /*
// ** A structure for holding a single date and time.
// */
type TDateTime = struct {
FiJD Tsqlite3_int64
FY int32
FM int32
FD int32
Fh int32
Fm int32
Ftz int32
Fs float64
FvalidJD int8
FrawS int8
FvalidYMD int8
FvalidHMS int8
FvalidTZ int8
FtzSet int8
FisError int8
FuseSubsec int8
}
type DateTime = TDateTime
type TDateTime1 = struct {
FiJD Tsqlite3_int64
FY int32
FM int32
FD int32
Fh int32
Fm int32
Ftz int32
Fs float64
FvalidJD int8
FrawS int8
FvalidYMD int8
FvalidHMS int8
FvalidTZ int8
FtzSet int8
FisError int8
FuseSubsec int8
}
type DateTime1 = TDateTime1
// C documentation
//
// /*
// ** Convert zDate into one or more integers according to the conversion
// ** specifier zFormat.
// **
// ** zFormat[] contains 4 characters for each integer converted, except for
// ** the last integer which is specified by three characters. The meaning
// ** of a four-character format specifiers ABCD is:
// **
// ** A: number of digits to convert. Always "2" or "4".
// ** B: minimum value. Always "0" or "1".
// ** C: maximum value, decoded as:
// ** a: 12
// ** b: 14
// ** c: 24
// ** d: 31
// ** e: 59
// ** f: 9999
// ** D: the separator character, or \000 to indicate this is the
// ** last number to convert.
// **
// ** Example: To translate an ISO-8601 date YYYY-MM-DD, the format would
// ** be "40f-21a-20c". The "40f-" indicates the 4-digit year followed by "-".
// ** The "21a-" indicates the 2-digit month followed by "-". The "20c" indicates
// ** the 2-digit day which is the last integer in the set.
// **
// ** The function returns the number of successful conversions.
// */
func _getDigits(tls *libc.TLS, zDate uintptr, zFormat uintptr, va uintptr) (r int32) {
var N, min, nextC, v1 int8
var ap Tva_list
var cnt, val int32
var max Tu16
_, _, _, _, _, _, _, _ = N, ap, cnt, max, min, nextC, val, v1
cnt = 0
ap = va
for cond := true; cond; cond = nextC != 0 {
N = int8(int32(*(*int8)(unsafe.Pointer(zFormat))) - int32('0'))
min = int8(int32(*(*int8)(unsafe.Pointer(zFormat + 1))) - int32('0'))
val = 0
max = _aMx[int32(*(*int8)(unsafe.Pointer(zFormat + 2)))-int32('a')]
nextC = *(*int8)(unsafe.Pointer(zFormat + 3))
val = 0
for {
v1 = N
N--
if !(v1 != 0) {
break
}
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate)))])&libc.Int32FromInt32(0x04) != 0) {
goto end_getDigits
}
val = val*int32(10) + int32(*(*int8)(unsafe.Pointer(zDate))) - int32('0')
zDate++
}
if val < int32(min) || val > int32(max) || int32(nextC) != 0 && int32(nextC) != int32(*(*int8)(unsafe.Pointer(zDate))) {
goto end_getDigits
}
*(*int32)(unsafe.Pointer(libc.VaUintptr(&ap))) = val
zDate++
cnt++
zFormat += uintptr(4)
}
goto end_getDigits
end_getDigits:
;
_ = ap
return cnt
}
/* The aMx[] array translates the 3rd character of each format
** spec into a max size: a b c d e f */
var _aMx = [6]Tu16{
0: uint16(12),
1: uint16(14),
2: uint16(24),
3: uint16(31),
4: uint16(59),
5: uint16(14712),
}
// C documentation
//
// /*
// ** Parse a timezone extension on the end of a date-time.
// ** The extension is of the form:
// **
// ** (+/-)HH:MM
// **
// ** Or the "zulu" notation:
// **
// ** Z
// **
// ** If the parse is successful, write the number of minutes
// ** of change in p->tz and return 0. If a parser error occurs,
// ** return non-zero.
// **
// ** A missing specifier is not considered an error.
// */
func _parseTimezone(tls *libc.TLS, zDate uintptr, p uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var c, sgn int32
var _ /* nHr at bp+0 */ int32
var _ /* nMn at bp+4 */ int32
_, _ = c, sgn
sgn = 0
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate)))])&int32(0x01) != 0 {
zDate++
}
(*TDateTime)(unsafe.Pointer(p)).Ftz = 0
c = int32(*(*int8)(unsafe.Pointer(zDate)))
if c == int32('-') {
sgn = -int32(1)
} else {
if c == int32('+') {
sgn = +libc.Int32FromInt32(1)
} else {
if c == int32('Z') || c == int32('z') {
zDate++
goto zulu_time
} else {
return libc.BoolInt32(c != 0)
}
}
}
zDate++
if _getDigits(tls, zDate, __ccgo_ts+1156, libc.VaList(bp+16, bp, bp+4)) != int32(2) {
return int32(1)
}
zDate += uintptr(5)
(*TDateTime)(unsafe.Pointer(p)).Ftz = sgn * (*(*int32)(unsafe.Pointer(bp + 4)) + *(*int32)(unsafe.Pointer(bp))*int32(60))
goto zulu_time
zulu_time:
;
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate)))])&int32(0x01) != 0 {
zDate++
}
(*TDateTime)(unsafe.Pointer(p)).FtzSet = int8(1)
return libc.BoolInt32(int32(*(*int8)(unsafe.Pointer(zDate))) != 0)
}
// C documentation
//
// /*
// ** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF.
// ** The HH, MM, and SS must each be exactly 2 digits. The
// ** fractional seconds FFFF can be one or more digits.
// **
// ** Return 1 if there is a parsing error and 0 on success.
// */
func _parseHhMmSs(tls *libc.TLS, zDate uintptr, p uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var ms, rScale float64
var v1 int32
var _ /* h at bp+0 */ int32
var _ /* m at bp+4 */ int32
var _ /* s at bp+8 */ int32
_, _, _ = ms, rScale, v1
ms = float64(0)
if _getDigits(tls, zDate, __ccgo_ts+1164, libc.VaList(bp+24, bp, bp+4)) != int32(2) {
return int32(1)
}
zDate += uintptr(5)
if int32(*(*int8)(unsafe.Pointer(zDate))) == int32(':') {
zDate++
if _getDigits(tls, zDate, __ccgo_ts+1172, libc.VaList(bp+24, bp+8)) != int32(1) {
return int32(1)
}
zDate += uintptr(2)
if int32(*(*int8)(unsafe.Pointer(zDate))) == int32('.') && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate + 1)))])&int32(0x04) != 0 {
rScale = float64(1)
zDate++
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate)))])&int32(0x04) != 0 {
ms = ms*float64(10) + float64(*(*int8)(unsafe.Pointer(zDate))) - libc.Float64FromUint8('0')
rScale *= float64(10)
zDate++
}
ms /= rScale
}
} else {
*(*int32)(unsafe.Pointer(bp + 8)) = 0
}
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = int8(1)
(*TDateTime)(unsafe.Pointer(p)).Fh = *(*int32)(unsafe.Pointer(bp))
(*TDateTime)(unsafe.Pointer(p)).Fm = *(*int32)(unsafe.Pointer(bp + 4))
(*TDateTime)(unsafe.Pointer(p)).Fs = float64(*(*int32)(unsafe.Pointer(bp + 8))) + ms
if _parseTimezone(tls, zDate, p) != 0 {
return int32(1)
}
if (*TDateTime)(unsafe.Pointer(p)).Ftz != 0 {
v1 = int32(1)
} else {
v1 = 0
}
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = int8(v1)
return 0
}
// C documentation
//
// /*
// ** Put the DateTime object into its error state.
// */
func _datetimeError(tls *libc.TLS, p uintptr) {
libc.Xmemset(tls, p, 0, uint64(48))
(*TDateTime)(unsafe.Pointer(p)).FisError = int8(1)
}
// C documentation
//
// /*
// ** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume
// ** that the YYYY-MM-DD is according to the Gregorian calendar.
// **
// ** Reference: Meeus page 61
// */
func _computeJD(tls *libc.TLS, p uintptr) {
var A, B, D, M, X1, X2, Y int32
_, _, _, _, _, _, _ = A, B, D, M, X1, X2, Y
if (*TDateTime)(unsafe.Pointer(p)).FvalidJD != 0 {
return
}
if (*TDateTime)(unsafe.Pointer(p)).FvalidYMD != 0 {
Y = (*TDateTime)(unsafe.Pointer(p)).FY
M = (*TDateTime)(unsafe.Pointer(p)).FM
D = (*TDateTime)(unsafe.Pointer(p)).FD
} else {
Y = int32(2000) /* If no YMD specified, assume 2000-Jan-01 */
M = int32(1)
D = int32(1)
}
if Y < -int32(4713) || Y > int32(9999) || (*TDateTime)(unsafe.Pointer(p)).FrawS != 0 {
_datetimeError(tls, p)
return
}
if M <= int32(2) {
Y--
M += int32(12)
}
A = Y / int32(100)
B = int32(2) - A + A/int32(4)
X1 = int32(36525) * (Y + int32(4716)) / int32(100)
X2 = int32(306001) * (M + int32(1)) / int32(10000)
(*TDateTime)(unsafe.Pointer(p)).FiJD = int64((float64(X1+X2+D+B) - libc.Float64FromFloat64(1524.5)) * libc.Float64FromInt32(86400000))
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
if (*TDateTime)(unsafe.Pointer(p)).FvalidHMS != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(p)) += int64((*TDateTime)(unsafe.Pointer(p)).Fh*int32(3600000)+(*TDateTime)(unsafe.Pointer(p)).Fm*int32(60000)) + int64((*TDateTime)(unsafe.Pointer(p)).Fs*libc.Float64FromInt32(1000)+libc.Float64FromFloat64(0.5))
if (*TDateTime)(unsafe.Pointer(p)).FvalidTZ != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(p)) -= int64((*TDateTime)(unsafe.Pointer(p)).Ftz * int32(60000))
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = 0
}
}
}
// C documentation
//
// /*
// ** Parse dates of the form
// **
// ** YYYY-MM-DD HH:MM:SS.FFF
// ** YYYY-MM-DD HH:MM:SS
// ** YYYY-MM-DD HH:MM
// ** YYYY-MM-DD
// **
// ** Write the result into the DateTime structure and return 0
// ** on success and 1 if the input string is not a well-formed
// ** date.
// */
func _parseYyyyMmDd(tls *libc.TLS, zDate uintptr, p uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var neg, v1 int32
var _ /* D at bp+8 */ int32
var _ /* M at bp+4 */ int32
var _ /* Y at bp+0 */ int32
_, _ = neg, v1
if int32(*(*int8)(unsafe.Pointer(zDate))) == int32('-') {
zDate++
neg = int32(1)
} else {
neg = 0
}
if _getDigits(tls, zDate, __ccgo_ts+1176, libc.VaList(bp+24, bp, bp+4, bp+8)) != int32(3) {
return int32(1)
}
zDate += uintptr(10)
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zDate)))])&int32(0x01) != 0 || int32('T') == int32(*(*Tu8)(unsafe.Pointer(zDate))) {
zDate++
}
if _parseHhMmSs(tls, zDate, p) == 0 {
/* We got the time */
} else {
if int32(*(*int8)(unsafe.Pointer(zDate))) == 0 {
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = 0
} else {
return int32(1)
}
}
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = int8(1)
if neg != 0 {
v1 = -*(*int32)(unsafe.Pointer(bp))
} else {
v1 = *(*int32)(unsafe.Pointer(bp))
}
(*TDateTime)(unsafe.Pointer(p)).FY = v1
(*TDateTime)(unsafe.Pointer(p)).FM = *(*int32)(unsafe.Pointer(bp + 4))
(*TDateTime)(unsafe.Pointer(p)).FD = *(*int32)(unsafe.Pointer(bp + 8))
if (*TDateTime)(unsafe.Pointer(p)).FvalidTZ != 0 {
_computeJD(tls, p)
}
return 0
}
// C documentation
//
// /*
// ** Set the time to the current time reported by the VFS.
// **
// ** Return the number of errors.
// */
func _setDateTimeToCurrent(tls *libc.TLS, context uintptr, p uintptr) (r int32) {
(*TDateTime)(unsafe.Pointer(p)).FiJD = _sqlite3StmtCurrentTime(tls, context)
if (*TDateTime)(unsafe.Pointer(p)).FiJD > 0 {
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
return 0
} else {
return int32(1)
}
return r
}
// C documentation
//
// /*
// ** Input "r" is a numeric quantity which might be a julian day number,
// ** or the number of seconds since 1970. If the value if r is within
// ** range of a julian day number, install it as such and set validJD.
// ** If the value is a valid unix timestamp, put it in p->s and set p->rawS.
// */
func _setRawDateNumber(tls *libc.TLS, p uintptr, r float64) {
(*TDateTime)(unsafe.Pointer(p)).Fs = r
(*TDateTime)(unsafe.Pointer(p)).FrawS = int8(1)
if r >= float64(0) && r < float64(5.3734845e+06) {
(*TDateTime)(unsafe.Pointer(p)).FiJD = int64(r*libc.Float64FromFloat64(8.64e+07) + libc.Float64FromFloat64(0.5))
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
}
}
// C documentation
//
// /*
// ** Attempt to parse the given string into a julian day number. Return
// ** the number of errors.
// **
// ** The following are acceptable forms for the input string:
// **
// ** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM
// ** DDDD.DD
// ** now
// **
// ** In the first form, the +/-HH:MM is always optional. The fractional
// ** seconds extension (the ".FFF") is optional. The seconds portion
// ** (":SS.FFF") is option. The year and date can be omitted as long
// ** as there is a time string. The time string can be omitted as long
// ** as there is a year and date.
// */
func _parseDateOrTime(tls *libc.TLS, context uintptr, zDate uintptr, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* r at bp+0 */ float64
if _parseYyyyMmDd(tls, zDate, p) == 0 {
return 0
} else {
if _parseHhMmSs(tls, zDate, p) == 0 {
return 0
} else {
if _sqlite3StrICmp(tls, zDate, __ccgo_ts+1188) == 0 && _sqlite3NotPureFunc(tls, context) != 0 {
return _setDateTimeToCurrent(tls, context, p)
} else {
if _sqlite3AtoF(tls, zDate, bp, _sqlite3Strlen30(tls, zDate), uint8(SQLITE_UTF8)) > 0 {
_setRawDateNumber(tls, p, *(*float64)(unsafe.Pointer(bp)))
return 0
} else {
if (_sqlite3StrICmp(tls, zDate, __ccgo_ts+1192) == 0 || _sqlite3StrICmp(tls, zDate, __ccgo_ts+1199) == 0) && _sqlite3NotPureFunc(tls, context) != 0 {
(*TDateTime)(unsafe.Pointer(p)).FuseSubsec = int8(1)
return _setDateTimeToCurrent(tls, context, p)
}
}
}
}
}
return int32(1)
}
/* The julian day number for 9999-12-31 23:59:59.999 is 5373484.4999999.
** Multiplying this by 86400000 gives 464269060799999 as the maximum value
** for DateTime.iJD.
**
** But some older compilers (ex: gcc 4.2.1 on older Macs) cannot deal with
** such a large integer literal, so we have to encode it.
*/
// C documentation
//
// /*
// ** Return TRUE if the given julian day number is within range.
// **
// ** The input is the JulianDay times 86400000.
// */
func _validJulianDay(tls *libc.TLS, iJD Tsqlite3_int64) (r int32) {
return libc.BoolInt32(iJD >= 0 && iJD <= libc.Int64FromInt32(0x1a640)< int32(2) {
v2 = C - int32(4716)
} else {
v2 = C - int32(4715)
}
(*TDateTime)(unsafe.Pointer(p)).FY = v2
}
}
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = int8(1)
}
// C documentation
//
// /*
// ** Compute the Hour, Minute, and Seconds from the julian day number.
// */
func _computeHMS(tls *libc.TLS, p uintptr) {
var day_min, day_ms int32
_, _ = day_min, day_ms /* milliseconds, minutes into the day */
if (*TDateTime)(unsafe.Pointer(p)).FvalidHMS != 0 {
return
}
_computeJD(tls, p)
day_ms = int32(((*TDateTime)(unsafe.Pointer(p)).FiJD + libc.Int64FromInt32(43200000)) % libc.Int64FromInt32(86400000))
(*TDateTime)(unsafe.Pointer(p)).Fs = float64(day_ms%libc.Int32FromInt32(60000)) / float64(1000)
day_min = day_ms / int32(60000)
(*TDateTime)(unsafe.Pointer(p)).Fm = day_min % int32(60)
(*TDateTime)(unsafe.Pointer(p)).Fh = day_min / int32(60)
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = int8(1)
}
// C documentation
//
// /*
// ** Compute both YMD and HMS
// */
func _computeYMD_HMS(tls *libc.TLS, p uintptr) {
_computeYMD(tls, p)
_computeHMS(tls, p)
}
// C documentation
//
// /*
// ** Clear the YMD and HMS and the TZ
// */
func _clearYMD_HMS_TZ(tls *libc.TLS, p uintptr) {
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = 0
}
/*
** On recent Windows platforms, the localtime_s() function is available
** as part of the "Secure CRT". It is essentially equivalent to
** localtime_r() available under most POSIX platforms, except that the
** order of the parameters is reversed.
**
** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx.
**
** If the user has not indicated to use localtime_r() or localtime_s()
** already, check for an MSVC build environment that provides
** localtime_s().
*/
// C documentation
//
// /*
// ** The following routine implements the rough equivalent of localtime_r()
// ** using whatever operating-system specific localtime facility that
// ** is available. This routine returns 0 on success and
// ** non-zero on any kind of error.
// **
// ** If the sqlite3GlobalConfig.bLocaltimeFault variable is non-zero then this
// ** routine will always fail. If bLocaltimeFault is nonzero and
// ** sqlite3GlobalConfig.xAltLocaltime is not NULL, then xAltLocaltime() is
// ** invoked in place of the OS-defined localtime() function.
// **
// ** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C
// ** library function localtime_r() is used to assist in the calculation of
// ** local time.
// */
func _osLocaltime(tls *libc.TLS, t uintptr, pTm uintptr) (r int32) {
var mutex, pX uintptr
var rc int32
_, _, _ = mutex, pX, rc
mutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
Xsqlite3_mutex_enter(tls, mutex)
pX = libc.Xlocaltime(tls, t)
if _sqlite3Config.FbLocaltimeFault != 0 {
if _sqlite3Config.FxAltLocaltime != uintptr(0) && 0 == (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.FxAltLocaltime})))(tls, t, pTm) {
pX = pTm
} else {
pX = uintptr(0)
}
}
if pX != 0 {
*(*Ttm)(unsafe.Pointer(pTm)) = *(*Ttm)(unsafe.Pointer(pX))
}
Xsqlite3_mutex_leave(tls, mutex)
rc = libc.BoolInt32(pX == uintptr(0))
return rc
}
// C documentation
//
// /*
// ** Assuming the input DateTime is UTC, move it to its localtime equivalent.
// */
func _toLocaltime(tls *libc.TLS, p uintptr, pCtx uintptr) (r int32) {
bp := tls.Alloc(112)
defer tls.Free(112)
var iYearDiff int32
var _ /* sLocal at bp+8 */ Ttm
var _ /* t at bp+0 */ Ttime_t
var _ /* x at bp+64 */ TDateTime
_ = iYearDiff
/* Initialize the contents of sLocal to avoid a compiler warning. */
libc.Xmemset(tls, bp+8, 0, uint64(56))
_computeJD(tls, p)
if (*TDateTime)(unsafe.Pointer(p)).FiJD < libc.Int64FromInt32(2108667600)*libc.Int64FromInt32(100000) || (*TDateTime)(unsafe.Pointer(p)).FiJD > libc.Int64FromInt32(2130141456)*libc.Int64FromInt32(100000) {
/* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only
** works for years between 1970 and 2037. For dates outside this range,
** SQLite attempts to map the year into an equivalent year within this
** range, do the calculation, then map the year back.
*/
*(*TDateTime)(unsafe.Pointer(bp + 64)) = *(*TDateTime)(unsafe.Pointer(p))
_computeYMD_HMS(tls, bp+64)
iYearDiff = int32(2000) + (*(*TDateTime)(unsafe.Pointer(bp + 64))).FY%int32(4) - (*(*TDateTime)(unsafe.Pointer(bp + 64))).FY
(*(*TDateTime)(unsafe.Pointer(bp + 64))).FY += iYearDiff
(*(*TDateTime)(unsafe.Pointer(bp + 64))).FvalidJD = 0
_computeJD(tls, bp+64)
*(*Ttime_t)(unsafe.Pointer(bp)) = (*(*TDateTime)(unsafe.Pointer(bp + 64))).FiJD/libc.Int64FromInt32(1000) - libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000)
} else {
iYearDiff = 0
*(*Ttime_t)(unsafe.Pointer(bp)) = (*TDateTime)(unsafe.Pointer(p)).FiJD/libc.Int64FromInt32(1000) - libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000)
}
if _osLocaltime(tls, bp, bp+8) != 0 {
Xsqlite3_result_error(tls, pCtx, __ccgo_ts+1209, -int32(1))
return int32(SQLITE_ERROR)
}
(*TDateTime)(unsafe.Pointer(p)).FY = (*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_year + int32(1900) - iYearDiff
(*TDateTime)(unsafe.Pointer(p)).FM = (*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_mon + int32(1)
(*TDateTime)(unsafe.Pointer(p)).FD = (*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_mday
(*TDateTime)(unsafe.Pointer(p)).Fh = (*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_hour
(*TDateTime)(unsafe.Pointer(p)).Fm = (*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_min
(*TDateTime)(unsafe.Pointer(p)).Fs = float64((*(*Ttm)(unsafe.Pointer(bp + 8))).Ftm_sec) + float64((*TDateTime)(unsafe.Pointer(p)).FiJD%libc.Int64FromInt32(1000))*float64(0.001)
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = int8(1)
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = int8(1)
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = 0
(*TDateTime)(unsafe.Pointer(p)).FisError = 0
return SQLITE_OK
}
// C documentation
//
// /*
// ** The following table defines various date transformations of the form
// **
// ** 'NNN days'
// **
// ** Where NNN is an arbitrary floating-point number and "days" can be one
// ** of several units of time.
// */
var _aXformType = [6]struct {
FnName Tu8
FzName [7]int8
FrLimit float32
FrXform float32
}{
0: {
FnName: uint8(6),
FzName: [7]int8{'s', 'e', 'c', 'o', 'n', 'd'},
FrLimit: float32(4.6427e+14),
FrXform: float32(1),
},
1: {
FnName: uint8(6),
FzName: [7]int8{'m', 'i', 'n', 'u', 't', 'e'},
FrLimit: float32(7.7379e+12),
FrXform: float32(60),
},
2: {
FnName: uint8(4),
FzName: [7]int8{'h', 'o', 'u', 'r'},
FrLimit: float32(1.2897e+11),
FrXform: float32(3600),
},
3: {
FnName: uint8(3),
FzName: [7]int8{'d', 'a', 'y'},
FrLimit: float32(5.373485e+06),
FrXform: float32(86400),
},
4: {
FnName: uint8(5),
FzName: [7]int8{'m', 'o', 'n', 't', 'h'},
FrLimit: float32(176546),
FrXform: float32(2.592e+06),
},
5: {
FnName: uint8(4),
FzName: [7]int8{'y', 'e', 'a', 'r'},
FrLimit: float32(14713),
FrXform: float32(3.1536e+07),
},
}
// C documentation
//
// /*
// ** If the DateTime p is raw number, try to figure out if it is
// ** a julian day number of a unix timestamp. Set the p value
// ** appropriately.
// */
func _autoAdjustDate(tls *libc.TLS, p uintptr) {
var r float64
_ = r
if !((*TDateTime)(unsafe.Pointer(p)).FrawS != 0) || (*TDateTime)(unsafe.Pointer(p)).FvalidJD != 0 {
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
} else {
if (*TDateTime)(unsafe.Pointer(p)).Fs >= float64(int64(-libc.Int32FromInt32(21086676))*libc.Int64FromInt32(10000)) && (*TDateTime)(unsafe.Pointer(p)).Fs <= float64(libc.Int64FromInt32(25340230)*libc.Int64FromInt32(10000)+libc.Int64FromInt32(799)) {
r = (*TDateTime)(unsafe.Pointer(p)).Fs*float64(1000) + float64(2.1086676e+14)
_clearYMD_HMS_TZ(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FiJD = int64(r + libc.Float64FromFloat64(0.5))
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
}
}
}
// C documentation
//
// /*
// ** Process a modifier to a date-time stamp. The modifiers are
// ** as follows:
// **
// ** NNN days
// ** NNN hours
// ** NNN minutes
// ** NNN.NNNN seconds
// ** NNN months
// ** NNN years
// ** start of month
// ** start of year
// ** start of week
// ** start of day
// ** weekday N
// ** unixepoch
// ** localtime
// ** utc
// **
// ** Return 0 on success and 1 if there is any kind of error. If the error
// ** is in a system call (i.e. localtime()), then an error message is written
// ** to context pCtx. If the error is an unrecognized modifier, no error is
// ** written to pCtx.
// */
func _parseModifier(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, p uintptr, idx int32) (r int32) {
bp := tls.Alloc(160)
defer tls.Free(160)
var Z, day Tsqlite3_int64
var cnt, i, rc, x, y, v12, v2, v5, v7, v9 int32
var iErr, iGuess, iOrigJD, v1 Ti64
var rRounder, v10 float64
var z0 int8
var z2 uintptr
var v3, v6 bool
var _ /* D at bp+64 */ int32
var _ /* M at bp+60 */ int32
var _ /* Y at bp+56 */ int32
var _ /* h at bp+68 */ int32
var _ /* m at bp+72 */ int32
var _ /* new at bp+8 */ TDateTime
var _ /* r at bp+0 */ float64
var _ /* tx at bp+80 */ TDateTime
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = Z, cnt, day, i, iErr, iGuess, iOrigJD, rRounder, rc, x, y, z0, z2, v1, v10, v12, v2, v3, v5, v6, v7, v9
rc = int32(1)
switch int32(_sqlite3UpperToLower[uint8(*(*int8)(unsafe.Pointer(z)))]) {
case int32('a'):
/*
** auto
**
** If rawS is available, then interpret as a julian day number, or
** a unix timestamp, depending on its magnitude.
*/
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1232) == 0 {
if idx > int32(1) {
return int32(1)
} /* IMP: R-33611-57934 */
_autoAdjustDate(tls, p)
rc = 0
}
case int32('j'):
/*
** julianday
**
** Always interpret the prior number as a julian-day value. If this
** is not the first modifier, or if the prior argument is not a numeric
** value in the allowed range of julian day numbers understood by
** SQLite (0..5373484.5) then the result will be NULL.
*/
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1237) == 0 {
if idx > int32(1) {
return int32(1)
} /* IMP: R-31176-64601 */
if (*TDateTime)(unsafe.Pointer(p)).FvalidJD != 0 && (*TDateTime)(unsafe.Pointer(p)).FrawS != 0 {
rc = 0
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
}
}
case int32('l'):
/* localtime
**
** Assuming the current time value is UTC (a.k.a. GMT), shift it to
** show local time.
*/
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1247) == 0 && _sqlite3NotPureFunc(tls, pCtx) != 0 {
rc = _toLocaltime(tls, p, pCtx)
}
case int32('u'):
/*
** unixepoch
**
** Treat the current value of p->s as the number of
** seconds since 1970. Convert to a real julian day number.
*/
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1257) == 0 && (*TDateTime)(unsafe.Pointer(p)).FrawS != 0 {
if idx > int32(1) {
return int32(1)
} /* IMP: R-49255-55373 */
*(*float64)(unsafe.Pointer(bp)) = (*TDateTime)(unsafe.Pointer(p)).Fs*float64(1000) + float64(2.1086676e+14)
if *(*float64)(unsafe.Pointer(bp)) >= float64(0) && *(*float64)(unsafe.Pointer(bp)) < float64(4.642690608e+14) {
_clearYMD_HMS_TZ(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FiJD = int64(*(*float64)(unsafe.Pointer(bp)) + libc.Float64FromFloat64(0.5))
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
rc = 0
}
} else {
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1267) == 0 && _sqlite3NotPureFunc(tls, pCtx) != 0 {
if int32((*TDateTime)(unsafe.Pointer(p)).FtzSet) == 0 { /* Guess at the corresponding utc time */
cnt = 0 /* Guess is off by this much */
_computeJD(tls, p)
v1 = (*TDateTime)(unsafe.Pointer(p)).FiJD
iOrigJD = v1
iGuess = v1
iErr = 0
for {
libc.Xmemset(tls, bp+8, 0, uint64(48))
iGuess -= iErr
(*(*TDateTime)(unsafe.Pointer(bp + 8))).FiJD = iGuess
(*(*TDateTime)(unsafe.Pointer(bp + 8))).FvalidJD = int8(1)
rc = _toLocaltime(tls, bp+8, pCtx)
if rc != 0 {
return rc
}
_computeJD(tls, bp+8)
iErr = (*(*TDateTime)(unsafe.Pointer(bp + 8))).FiJD - iOrigJD
goto _4
_4:
;
if v3 = iErr != 0; v3 {
v2 = cnt
cnt++
}
if !(v3 && v2 < int32(3)) {
break
}
}
libc.Xmemset(tls, p, 0, uint64(48))
(*TDateTime)(unsafe.Pointer(p)).FiJD = iGuess
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = int8(1)
(*TDateTime)(unsafe.Pointer(p)).FtzSet = int8(1)
}
rc = SQLITE_OK
}
}
case int32('w'):
/*
** weekday N
**
** Move the date to the same time on the next occurrence of
** weekday N where 0==Sunday, 1==Monday, and so forth. If the
** date is already on the appropriate weekday, this is a no-op.
*/
if v6 = Xsqlite3_strnicmp(tls, z, __ccgo_ts+1271, int32(8)) == 0 && _sqlite3AtoF(tls, z+8, bp, _sqlite3Strlen30(tls, z+8), uint8(SQLITE_UTF8)) > 0 && *(*float64)(unsafe.Pointer(bp)) >= float64(0) && *(*float64)(unsafe.Pointer(bp)) < float64(7); v6 {
v5 = int32(*(*float64)(unsafe.Pointer(bp)))
n = v5
}
if v6 && float64(v5) == *(*float64)(unsafe.Pointer(bp)) {
_computeYMD_HMS(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
_computeJD(tls, p)
Z = ((*TDateTime)(unsafe.Pointer(p)).FiJD + int64(129600000)) / int64(86400000) % int64(7)
if Z > int64(n) {
Z -= int64(7)
}
*(*Tsqlite3_int64)(unsafe.Pointer(p)) += (int64(n) - Z) * int64(86400000)
_clearYMD_HMS_TZ(tls, p)
rc = 0
}
case int32('s'):
/*
** start of TTTTT
**
** Move the date backwards to the beginning of the current day,
** or month or year.
**
** subsecond
** subsec
**
** Show subsecond precision in the output of datetime() and
** unixepoch() and strftime('%s').
*/
if Xsqlite3_strnicmp(tls, z, __ccgo_ts+1280, int32(9)) != 0 {
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1192) == 0 || Xsqlite3_stricmp(tls, z, __ccgo_ts+1199) == 0 {
(*TDateTime)(unsafe.Pointer(p)).FuseSubsec = int8(1)
rc = 0
}
break
}
if !((*TDateTime)(unsafe.Pointer(p)).FvalidJD != 0) && !((*TDateTime)(unsafe.Pointer(p)).FvalidYMD != 0) && !((*TDateTime)(unsafe.Pointer(p)).FvalidHMS != 0) {
break
}
z += uintptr(9)
_computeYMD(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = int8(1)
v7 = libc.Int32FromInt32(0)
(*TDateTime)(unsafe.Pointer(p)).Fm = v7
(*TDateTime)(unsafe.Pointer(p)).Fh = v7
(*TDateTime)(unsafe.Pointer(p)).Fs = float64(0)
(*TDateTime)(unsafe.Pointer(p)).FrawS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidTZ = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1290) == 0 {
(*TDateTime)(unsafe.Pointer(p)).FD = int32(1)
rc = 0
} else {
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1296) == 0 {
(*TDateTime)(unsafe.Pointer(p)).FM = int32(1)
(*TDateTime)(unsafe.Pointer(p)).FD = int32(1)
rc = 0
} else {
if Xsqlite3_stricmp(tls, z, __ccgo_ts+1301) == 0 {
rc = 0
}
}
}
case int32('+'):
fallthrough
case int32('-'):
fallthrough
case int32('0'):
fallthrough
case int32('1'):
fallthrough
case int32('2'):
fallthrough
case int32('3'):
fallthrough
case int32('4'):
fallthrough
case int32('5'):
fallthrough
case int32('6'):
fallthrough
case int32('7'):
fallthrough
case int32('8'):
fallthrough
case int32('9'):
z2 = z
z0 = *(*int8)(unsafe.Pointer(z))
n = int32(1)
for {
if !(*(*int8)(unsafe.Pointer(z + uintptr(n))) != 0) {
break
}
if int32(*(*int8)(unsafe.Pointer(z + uintptr(n)))) == int32(':') {
break
}
if int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(n))))])&int32(0x01) != 0 {
break
}
if int32(*(*int8)(unsafe.Pointer(z + uintptr(n)))) == int32('-') {
if n == int32(5) && _getDigits(tls, z+1, __ccgo_ts+1305, libc.VaList(bp+136, bp+56)) == int32(1) {
break
}
if n == int32(6) && _getDigits(tls, z+1, __ccgo_ts+1309, libc.VaList(bp+136, bp+56)) == int32(1) {
break
}
}
goto _8
_8:
;
n++
}
if _sqlite3AtoF(tls, z, bp, n, uint8(SQLITE_UTF8)) <= 0 {
break
}
if int32(*(*int8)(unsafe.Pointer(z + uintptr(n)))) == int32('-') {
/* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the
** specified number of years, months, and days. MM is limited to
** the range 0-11 and DD is limited to 0-30.
*/
if int32(z0) != int32('+') && int32(z0) != int32('-') {
break
} /* Must start with +/- */
if n == int32(5) {
if _getDigits(tls, z+1, __ccgo_ts+1313, libc.VaList(bp+136, bp+56, bp+60, bp+64)) != int32(3) {
break
}
} else {
if _getDigits(tls, z+1, __ccgo_ts+1325, libc.VaList(bp+136, bp+56, bp+60, bp+64)) != int32(3) {
break
}
z++
}
if *(*int32)(unsafe.Pointer(bp + 60)) >= int32(12) {
break
} /* M range 0..11 */
if *(*int32)(unsafe.Pointer(bp + 64)) >= int32(31) {
break
} /* D range 0..30 */
_computeYMD_HMS(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
if int32(z0) == int32('-') {
*(*int32)(unsafe.Pointer(p + 8)) -= *(*int32)(unsafe.Pointer(bp + 56))
*(*int32)(unsafe.Pointer(p + 12)) -= *(*int32)(unsafe.Pointer(bp + 60))
*(*int32)(unsafe.Pointer(bp + 64)) = -*(*int32)(unsafe.Pointer(bp + 64))
} else {
*(*int32)(unsafe.Pointer(p + 8)) += *(*int32)(unsafe.Pointer(bp + 56))
*(*int32)(unsafe.Pointer(p + 12)) += *(*int32)(unsafe.Pointer(bp + 60))
}
if (*TDateTime)(unsafe.Pointer(p)).FM > 0 {
v9 = ((*TDateTime)(unsafe.Pointer(p)).FM - int32(1)) / int32(12)
} else {
v9 = ((*TDateTime)(unsafe.Pointer(p)).FM - int32(12)) / int32(12)
}
x = v9
*(*int32)(unsafe.Pointer(p + 8)) += x
*(*int32)(unsafe.Pointer(p + 12)) -= x * int32(12)
_computeJD(tls, p)
(*TDateTime)(unsafe.Pointer(p)).FvalidHMS = 0
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = 0
*(*Tsqlite3_int64)(unsafe.Pointer(p)) += int64(*(*int32)(unsafe.Pointer(bp + 64))) * int64(86400000)
if int32(*(*int8)(unsafe.Pointer(z + 11))) == 0 {
rc = 0
break
}
if int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + 11)))])&int32(0x01) != 0 && _getDigits(tls, z+12, __ccgo_ts+1164, libc.VaList(bp+136, bp+68, bp+72)) == int32(2) {
z2 = z + 12
n = int32(2)
} else {
break
}
}
if int32(*(*int8)(unsafe.Pointer(z2 + uintptr(n)))) == int32(':') {
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z2)))])&libc.Int32FromInt32(0x04) != 0) {
z2++
}
libc.Xmemset(tls, bp+80, 0, uint64(48))
if _parseHhMmSs(tls, z2, bp+80) != 0 {
break
}
_computeJD(tls, bp+80)
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD -= int64(43200000)
day = (*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD / int64(86400000)
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD -= day * int64(86400000)
if int32(z0) == int32('-') {
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD = -(*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD
}
_computeJD(tls, p)
_clearYMD_HMS_TZ(tls, p)
*(*Tsqlite3_int64)(unsafe.Pointer(p)) += (*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD
rc = 0
break
}
/* If control reaches this point, it means the transformation is
** one of the forms like "+NNN days". */
z += uintptr(n)
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x01) != 0 {
z++
}
n = _sqlite3Strlen30(tls, z)
if n > int32(10) || n < int32(3) {
break
}
if int32(_sqlite3UpperToLower[uint8(*(*int8)(unsafe.Pointer(z + uintptr(n-int32(1)))))]) == int32('s') {
n--
}
_computeJD(tls, p)
if *(*float64)(unsafe.Pointer(bp)) < libc.Float64FromInt32(0) {
v10 = -libc.Float64FromFloat64(0.5)
} else {
v10 = +libc.Float64FromFloat64(0.5)
}
rRounder = v10
i = 0
for {
if !(i < int32(libc.Uint64FromInt64(96)/libc.Uint64FromInt64(16))) {
break
}
if int32(_aXformType[i].FnName) == n && Xsqlite3_strnicmp(tls, uintptr(unsafe.Pointer(&_aXformType))+uintptr(i)*16+1, z, n) == 0 && *(*float64)(unsafe.Pointer(bp)) > float64(-_aXformType[i].FrLimit) && *(*float64)(unsafe.Pointer(bp)) < float64(_aXformType[i].FrLimit) {
switch i {
case int32(4): /* Special processing to add months */
_computeYMD_HMS(tls, p)
*(*int32)(unsafe.Pointer(p + 12)) += int32(*(*float64)(unsafe.Pointer(bp)))
if (*TDateTime)(unsafe.Pointer(p)).FM > 0 {
v12 = ((*TDateTime)(unsafe.Pointer(p)).FM - int32(1)) / int32(12)
} else {
v12 = ((*TDateTime)(unsafe.Pointer(p)).FM - int32(12)) / int32(12)
}
x = v12
*(*int32)(unsafe.Pointer(p + 8)) += x
*(*int32)(unsafe.Pointer(p + 12)) -= x * int32(12)
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
*(*float64)(unsafe.Pointer(bp)) -= float64(int32(*(*float64)(unsafe.Pointer(bp))))
case int32(5): /* Special processing to add years */
y = int32(*(*float64)(unsafe.Pointer(bp)))
_computeYMD_HMS(tls, p)
*(*int32)(unsafe.Pointer(p + 8)) += y
(*TDateTime)(unsafe.Pointer(p)).FvalidJD = 0
*(*float64)(unsafe.Pointer(bp)) -= float64(int32(*(*float64)(unsafe.Pointer(bp))))
break
}
_computeJD(tls, p)
*(*Tsqlite3_int64)(unsafe.Pointer(p)) += int64(*(*float64)(unsafe.Pointer(bp))*libc.Float64FromFloat64(1000)*float64(_aXformType[i].FrXform) + rRounder)
rc = 0
break
}
goto _11
_11:
;
i++
}
_clearYMD_HMS_TZ(tls, p)
default:
break
}
return rc
}
// C documentation
//
// /*
// ** Process time function arguments. argv[0] is a date-time stamp.
// ** argv[1] and following are modifiers. Parse them all and write
// ** the resulting time into the DateTime structure p. Return 0
// ** on success and 1 if there are any errors.
// **
// ** If there are zero parameters (if even argv[0] is undefined)
// ** then assume a default value of "now" for argv[0].
// */
func _isDate(tls *libc.TLS, context uintptr, argc int32, argv uintptr, p uintptr) (r int32) {
var eType, i, n, v1 int32
var z uintptr
_, _, _, _, _ = eType, i, n, z, v1
libc.Xmemset(tls, p, 0, uint64(48))
if argc == 0 {
if !(_sqlite3NotPureFunc(tls, context) != 0) {
return int32(1)
}
return _setDateTimeToCurrent(tls, context, p)
}
v1 = Xsqlite3_value_type(tls, *(*uintptr)(unsafe.Pointer(argv)))
eType = v1
if v1 == int32(SQLITE_FLOAT) || eType == int32(SQLITE_INTEGER) {
_setRawDateNumber(tls, p, Xsqlite3_value_double(tls, *(*uintptr)(unsafe.Pointer(argv))))
} else {
z = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv)))
if !(z != 0) || _parseDateOrTime(tls, context, z, p) != 0 {
return int32(1)
}
}
i = int32(1)
for {
if !(i < argc) {
break
}
z = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8)))
n = Xsqlite3_value_bytes(tls, *(*uintptr)(unsafe.Pointer(argv + uintptr(i)*8)))
if z == uintptr(0) || _parseModifier(tls, context, z, n, p, i) != 0 {
return int32(1)
}
goto _2
_2:
;
i++
}
_computeJD(tls, p)
if (*TDateTime)(unsafe.Pointer(p)).FisError != 0 || !(_validJulianDay(tls, (*TDateTime)(unsafe.Pointer(p)).FiJD) != 0) {
return int32(1)
}
if argc == int32(1) && (*TDateTime)(unsafe.Pointer(p)).FvalidYMD != 0 && (*TDateTime)(unsafe.Pointer(p)).FD > int32(28) {
/* Make sure a YYYY-MM-DD is normalized.
** Example: 2023-02-31 -> 2023-03-03 */
(*TDateTime)(unsafe.Pointer(p)).FvalidYMD = 0
}
return 0
}
/*
** The following routines implement the various date and time functions
** of SQLite.
*/
// C documentation
//
// /*
// ** julianday( TIMESTRING, MOD, MOD, ...)
// **
// ** Return the julian day number of the date specified in the arguments
// */
func _juliandayFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* x at bp+0 */ TDateTime
if _isDate(tls, context, argc, argv, bp) == 0 {
_computeJD(tls, bp)
Xsqlite3_result_double(tls, context, float64((*(*TDateTime)(unsafe.Pointer(bp))).FiJD)/float64(8.64e+07))
}
}
// C documentation
//
// /*
// ** unixepoch( TIMESTRING, MOD, MOD, ...)
// **
// ** Return the number of seconds (including fractional seconds) since
// ** the unix epoch of 1970-01-01 00:00:00 GMT.
// */
func _unixepochFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* x at bp+0 */ TDateTime
if _isDate(tls, context, argc, argv, bp) == 0 {
_computeJD(tls, bp)
if (*(*TDateTime)(unsafe.Pointer(bp))).FuseSubsec != 0 {
Xsqlite3_result_double(tls, context, float64((*(*TDateTime)(unsafe.Pointer(bp))).FiJD-libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000000))/float64(1000))
} else {
Xsqlite3_result_int64(tls, context, (*(*TDateTime)(unsafe.Pointer(bp))).FiJD/int64(1000)-libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000))
}
}
}
// C documentation
//
// /*
// ** datetime( TIMESTRING, MOD, MOD, ...)
// **
// ** Return YYYY-MM-DD HH:MM:SS
// */
func _datetimeFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(80)
defer tls.Free(80)
var Y, n, s int32
var _ /* x at bp+0 */ TDateTime
var _ /* zBuf at bp+48 */ [32]int8
_, _, _ = Y, n, s
if _isDate(tls, context, argc, argv, bp) == 0 {
_computeYMD_HMS(tls, bp)
Y = (*(*TDateTime)(unsafe.Pointer(bp))).FY
if Y < 0 {
Y = -Y
}
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(1)] = int8(int32('0') + Y/int32(1000)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(2)] = int8(int32('0') + Y/int32(100)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(3)] = int8(int32('0') + Y/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(4)] = int8(int32('0') + Y%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(5)] = int8('-')
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(6)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FM/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(7)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FM%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(8)] = int8('-')
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(9)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FD/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(10)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FD%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(11)] = int8(' ')
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(12)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fh/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(13)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fh%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(14)] = int8(':')
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(15)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fm/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(16)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fm%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(17)] = int8(':')
if (*(*TDateTime)(unsafe.Pointer(bp))).FuseSubsec != 0 {
s = int32(libc.Float64FromFloat64(1000)*(*(*TDateTime)(unsafe.Pointer(bp))).Fs + libc.Float64FromFloat64(0.5))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(18)] = int8(int32('0') + s/int32(10000)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(19)] = int8(int32('0') + s/int32(1000)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(20)] = int8('.')
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(21)] = int8(int32('0') + s/int32(100)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(22)] = int8(int32('0') + s/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(23)] = int8(int32('0') + s%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(24)] = 0
n = int32(24)
} else {
s = int32((*(*TDateTime)(unsafe.Pointer(bp))).Fs)
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(18)] = int8(int32('0') + s/int32(10)%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(19)] = int8(int32('0') + s%int32(10))
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[int32(20)] = 0
n = int32(20)
}
if (*(*TDateTime)(unsafe.Pointer(bp))).FY < 0 {
(*(*[32]int8)(unsafe.Pointer(bp + 48)))[0] = int8('-')
Xsqlite3_result_text(tls, context, bp+48, n, uintptr(-libc.Int32FromInt32(1)))
} else {
Xsqlite3_result_text(tls, context, bp+48+1, n-int32(1), uintptr(-libc.Int32FromInt32(1)))
}
}
}
// C documentation
//
// /*
// ** time( TIMESTRING, MOD, MOD, ...)
// **
// ** Return HH:MM:SS
// */
func _timeFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(64)
defer tls.Free(64)
var n, s int32
var _ /* x at bp+0 */ TDateTime
var _ /* zBuf at bp+48 */ [16]int8
_, _ = n, s
if _isDate(tls, context, argc, argv, bp) == 0 {
_computeHMS(tls, bp)
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[0] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fh/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(1)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fh%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(2)] = int8(':')
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(3)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fm/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(4)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).Fm%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(5)] = int8(':')
if (*(*TDateTime)(unsafe.Pointer(bp))).FuseSubsec != 0 {
s = int32(libc.Float64FromFloat64(1000)*(*(*TDateTime)(unsafe.Pointer(bp))).Fs + libc.Float64FromFloat64(0.5))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(6)] = int8(int32('0') + s/int32(10000)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(7)] = int8(int32('0') + s/int32(1000)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(8)] = int8('.')
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(9)] = int8(int32('0') + s/int32(100)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(10)] = int8(int32('0') + s/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(11)] = int8(int32('0') + s%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(12)] = 0
n = int32(12)
} else {
s = int32((*(*TDateTime)(unsafe.Pointer(bp))).Fs)
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(6)] = int8(int32('0') + s/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(7)] = int8(int32('0') + s%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(8)] = 0
n = int32(8)
}
Xsqlite3_result_text(tls, context, bp+48, n, uintptr(-libc.Int32FromInt32(1)))
}
}
// C documentation
//
// /*
// ** date( TIMESTRING, MOD, MOD, ...)
// **
// ** Return YYYY-MM-DD
// */
func _dateFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(64)
defer tls.Free(64)
var Y int32
var _ /* x at bp+0 */ TDateTime
var _ /* zBuf at bp+48 */ [16]int8
_ = Y
if _isDate(tls, context, argc, argv, bp) == 0 {
_computeYMD(tls, bp)
Y = (*(*TDateTime)(unsafe.Pointer(bp))).FY
if Y < 0 {
Y = -Y
}
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(1)] = int8(int32('0') + Y/int32(1000)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(2)] = int8(int32('0') + Y/int32(100)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(3)] = int8(int32('0') + Y/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(4)] = int8(int32('0') + Y%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(5)] = int8('-')
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(6)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FM/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(7)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FM%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(8)] = int8('-')
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(9)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FD/int32(10)%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(10)] = int8(int32('0') + (*(*TDateTime)(unsafe.Pointer(bp))).FD%int32(10))
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[int32(11)] = 0
if (*(*TDateTime)(unsafe.Pointer(bp))).FY < 0 {
(*(*[16]int8)(unsafe.Pointer(bp + 48)))[0] = int8('-')
Xsqlite3_result_text(tls, context, bp+48, int32(11), uintptr(-libc.Int32FromInt32(1)))
} else {
Xsqlite3_result_text(tls, context, bp+48+1, int32(10), uintptr(-libc.Int32FromInt32(1)))
}
}
}
// C documentation
//
// /*
// ** strftime( FORMAT, TIMESTRING, MOD, MOD, ...)
// **
// ** Return a string described by FORMAT. Conversions as follows:
// **
// ** %d day of month
// ** %f ** fractional seconds SS.SSS
// ** %H hour 00-24
// ** %j day of year 000-366
// ** %J ** julian day number
// ** %m month 01-12
// ** %M minute 00-59
// ** %s seconds since 1970-01-01
// ** %S seconds 00-59
// ** %w day of week 0-6 Sunday==0
// ** %W week of year 00-53
// ** %Y year 0000-9999
// ** %% %
// */
func _strftimeFunc(tls *libc.TLS, context uintptr, argc int32, argv uintptr) {
bp := tls.Alloc(160)
defer tls.Free(160)
var c, cf int8
var db, zFmt, v3, v4, v5, v6, v7 uintptr
var h, nDay, wd int32
var i, j, v2 Tsize_t
var iS Ti64
var s float64
var _ /* sRes at bp+48 */ Tsqlite3_str
var _ /* x at bp+0 */ TDateTime
var _ /* y at bp+80 */ TDateTime
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = c, cf, db, h, i, iS, j, nDay, s, wd, zFmt, v2, v3, v4, v5, v6, v7
if argc == 0 {
return
}
zFmt = Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer(argv)))
if zFmt == uintptr(0) || _isDate(tls, context, argc-int32(1), argv+uintptr(1)*8, bp) != 0 {
return
}
db = Xsqlite3_context_db_handle(tls, context)
_sqlite3StrAccumInit(tls, bp+48, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer(db + 136)))
_computeJD(tls, bp)
_computeYMD_HMS(tls, bp)
v2 = libc.Uint64FromInt32(0)
j = v2
i = v2
for {
if !(*(*int8)(unsafe.Pointer(zFmt + uintptr(i))) != 0) {
break
}
if int32(*(*int8)(unsafe.Pointer(zFmt + uintptr(i)))) != int32('%') {
goto _1
}
if j < i {
Xsqlite3_str_append(tls, bp+48, zFmt+uintptr(j), int32(i-j))
}
i++
j = i + uint64(1)
cf = *(*int8)(unsafe.Pointer(zFmt + uintptr(i)))
switch int32(cf) {
case int32('d'): /* Fall thru */
fallthrough
case int32('e'):
if int32(cf) == int32('d') {
v3 = __ccgo_ts + 1337
} else {
v3 = __ccgo_ts + 1342
}
Xsqlite3_str_appendf(tls, bp+48, v3, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).FD))
case int32('f'):
s = (*(*TDateTime)(unsafe.Pointer(bp))).Fs
if s > float64(59.999) {
s = float64(59.999)
}
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1346, libc.VaList(bp+136, s))
case int32('F'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1353, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).FY, (*(*TDateTime)(unsafe.Pointer(bp))).FM, (*(*TDateTime)(unsafe.Pointer(bp))).FD))
case int32('H'):
fallthrough
case int32('k'):
if int32(cf) == int32('H') {
v4 = __ccgo_ts + 1337
} else {
v4 = __ccgo_ts + 1342
}
Xsqlite3_str_appendf(tls, bp+48, v4, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).Fh))
case int32('I'): /* Fall thru */
fallthrough
case int32('l'):
h = (*(*TDateTime)(unsafe.Pointer(bp))).Fh
if h > int32(12) {
h -= int32(12)
}
if h == 0 {
h = int32(12)
}
if int32(cf) == int32('I') {
v5 = __ccgo_ts + 1337
} else {
v5 = __ccgo_ts + 1342
}
Xsqlite3_str_appendf(tls, bp+48, v5, libc.VaList(bp+136, h))
case int32('W'): /* Fall thru */
fallthrough
case int32('j'): /* Number of days since 1st day of year */
*(*TDateTime)(unsafe.Pointer(bp + 80)) = *(*TDateTime)(unsafe.Pointer(bp))
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FvalidJD = 0
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FM = int32(1)
(*(*TDateTime)(unsafe.Pointer(bp + 80))).FD = int32(1)
_computeJD(tls, bp+80)
nDay = int32(((*(*TDateTime)(unsafe.Pointer(bp))).FiJD - (*(*TDateTime)(unsafe.Pointer(bp + 80))).FiJD + libc.Int64FromInt32(43200000)) / libc.Int64FromInt32(86400000))
if int32(cf) == int32('W') { /* 0=Monday, 1=Tuesday, ... 6=Sunday */
wd = int32(((*(*TDateTime)(unsafe.Pointer(bp))).FiJD + libc.Int64FromInt32(43200000)) / libc.Int64FromInt32(86400000) % libc.Int64FromInt32(7))
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1337, libc.VaList(bp+136, (nDay+int32(7)-wd)/int32(7)))
} else {
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1368, libc.VaList(bp+136, nDay+int32(1)))
}
case int32('J'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1373, libc.VaList(bp+136, float64((*(*TDateTime)(unsafe.Pointer(bp))).FiJD)/float64(8.64e+07)))
case int32('m'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1337, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).FM))
case int32('M'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1337, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).Fm))
case int32('p'): /* Fall thru */
fallthrough
case int32('P'):
if (*(*TDateTime)(unsafe.Pointer(bp))).Fh >= int32(12) {
if int32(cf) == int32('p') {
v6 = __ccgo_ts + 1379
} else {
v6 = __ccgo_ts + 1382
}
Xsqlite3_str_append(tls, bp+48, v6, int32(2))
} else {
if int32(cf) == int32('p') {
v7 = __ccgo_ts + 1385
} else {
v7 = __ccgo_ts + 1388
}
Xsqlite3_str_append(tls, bp+48, v7, int32(2))
}
case int32('R'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1391, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).Fh, (*(*TDateTime)(unsafe.Pointer(bp))).Fm))
case int32('s'):
if (*(*TDateTime)(unsafe.Pointer(bp))).FuseSubsec != 0 {
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1401, libc.VaList(bp+136, float64((*(*TDateTime)(unsafe.Pointer(bp))).FiJD-libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000000))/float64(1000)))
} else {
iS = (*(*TDateTime)(unsafe.Pointer(bp))).FiJD/libc.Int64FromInt32(1000) - libc.Int64FromInt32(21086676)*libc.Int64FromInt32(10000)
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1406, libc.VaList(bp+136, iS))
}
case int32('S'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1337, libc.VaList(bp+136, int32((*(*TDateTime)(unsafe.Pointer(bp))).Fs)))
case int32('T'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1411, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).Fh, (*(*TDateTime)(unsafe.Pointer(bp))).Fm, int32((*(*TDateTime)(unsafe.Pointer(bp))).Fs)))
case int32('u'): /* Fall thru */
fallthrough
case int32('w'):
c = int8(int32(int8(((*(*TDateTime)(unsafe.Pointer(bp))).FiJD+libc.Int64FromInt32(129600000))/libc.Int64FromInt32(86400000)%libc.Int64FromInt32(7))) + int32('0'))
if int32(c) == int32('0') && int32(cf) == int32('u') {
c = int8('7')
}
Xsqlite3_str_appendchar(tls, bp+48, int32(1), c)
case int32('Y'):
Xsqlite3_str_appendf(tls, bp+48, __ccgo_ts+1426, libc.VaList(bp+136, (*(*TDateTime)(unsafe.Pointer(bp))).FY))
case int32('%'):
Xsqlite3_str_appendchar(tls, bp+48, int32(1), int8('%'))
default:
Xsqlite3_str_reset(tls, bp+48)
return
}
goto _1
_1:
;
i++
}
if j < i {
Xsqlite3_str_append(tls, bp+48, zFmt+uintptr(j), int32(i-j))
}
_sqlite3ResultStrAccum(tls, context, bp+48)
}
// C documentation
//
// /*
// ** current_time()
// **
// ** This function returns the same value as time('now').
// */
func _ctimeFunc(tls *libc.TLS, context uintptr, NotUsed int32, NotUsed2 uintptr) {
_ = NotUsed
_ = NotUsed2
_timeFunc(tls, context, 0, uintptr(0))
}
// C documentation
//
// /*
// ** current_date()
// **
// ** This function returns the same value as date('now').
// */
func _cdateFunc(tls *libc.TLS, context uintptr, NotUsed int32, NotUsed2 uintptr) {
_ = NotUsed
_ = NotUsed2
_dateFunc(tls, context, 0, uintptr(0))
}
// C documentation
//
// /*
// ** timediff(DATE1, DATE2)
// **
// ** Return the amount of time that must be added to DATE2 in order to
// ** convert it into DATE2. The time difference format is:
// **
// ** +YYYY-MM-DD HH:MM:SS.SSS
// **
// ** The initial "+" becomes "-" if DATE1 occurs before DATE2. For
// ** date/time values A and B, the following invariant should hold:
// **
// ** datetime(A) == (datetime(B, timediff(A,B))
// **
// ** Both DATE arguments must be either a julian day number, or an
// ** ISO-8601 string. The unix timestamps are not supported by this
// ** routine.
// */
func _timediffFunc(tls *libc.TLS, context uintptr, NotUsed1 int32, argv uintptr) {
bp := tls.Alloc(192)
defer tls.Free(192)
var M, Y int32
var sign int8
var p1, p2 uintptr
var _ /* d1 at bp+0 */ TDateTime
var _ /* d2 at bp+48 */ TDateTime
var _ /* sRes at bp+96 */ Tsqlite3_str
_, _, _, _, _ = M, Y, sign, p1, p2
_ = NotUsed1
if _isDate(tls, context, int32(1), argv, bp) != 0 {
return
}
if _isDate(tls, context, int32(1), argv+1*8, bp+48) != 0 {
return
}
_computeYMD_HMS(tls, bp)
_computeYMD_HMS(tls, bp+48)
if (*(*TDateTime)(unsafe.Pointer(bp))).FiJD >= (*(*TDateTime)(unsafe.Pointer(bp + 48))).FiJD {
sign = int8('+')
Y = (*(*TDateTime)(unsafe.Pointer(bp))).FY - (*(*TDateTime)(unsafe.Pointer(bp + 48))).FY
if Y != 0 {
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FY = (*(*TDateTime)(unsafe.Pointer(bp))).FY
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FvalidJD = 0
_computeJD(tls, bp+48)
}
M = (*(*TDateTime)(unsafe.Pointer(bp))).FM - (*(*TDateTime)(unsafe.Pointer(bp + 48))).FM
if M < 0 {
Y--
M += int32(12)
}
if M != 0 {
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FM = (*(*TDateTime)(unsafe.Pointer(bp))).FM
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FvalidJD = 0
_computeJD(tls, bp+48)
}
for (*(*TDateTime)(unsafe.Pointer(bp))).FiJD < (*(*TDateTime)(unsafe.Pointer(bp + 48))).FiJD {
M--
if M < 0 {
M = int32(11)
Y--
}
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FM--
if (*(*TDateTime)(unsafe.Pointer(bp + 48))).FM < int32(1) {
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FM = int32(12)
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FY--
}
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FvalidJD = 0
_computeJD(tls, bp+48)
}
(*(*TDateTime)(unsafe.Pointer(bp))).FiJD -= (*(*TDateTime)(unsafe.Pointer(bp + 48))).FiJD
p1 = bp
*(*Tsqlite3_int64)(unsafe.Pointer(p1)) = Tsqlite3_int64(uint64(*(*Tsqlite3_int64)(unsafe.Pointer(p1))) + libc.Uint64FromInt32(1486995408)*libc.Uint64FromInt32(100000))
} else { /* d1 (*(*TDateTime)(unsafe.Pointer(bp + 48))).FiJD {
M--
if M < 0 {
M = int32(11)
Y--
}
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FM++
if (*(*TDateTime)(unsafe.Pointer(bp + 48))).FM > int32(12) {
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FM = int32(1)
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FY++
}
(*(*TDateTime)(unsafe.Pointer(bp + 48))).FvalidJD = 0
_computeJD(tls, bp+48)
}
(*(*TDateTime)(unsafe.Pointer(bp))).FiJD = (*(*TDateTime)(unsafe.Pointer(bp + 48))).FiJD - (*(*TDateTime)(unsafe.Pointer(bp))).FiJD
p2 = bp
*(*Tsqlite3_int64)(unsafe.Pointer(p2)) = Tsqlite3_int64(uint64(*(*Tsqlite3_int64)(unsafe.Pointer(p2))) + libc.Uint64FromInt32(1486995408)*libc.Uint64FromInt32(100000))
}
(*(*TDateTime)(unsafe.Pointer(bp))).FvalidYMD = 0
(*(*TDateTime)(unsafe.Pointer(bp))).FvalidHMS = 0
(*(*TDateTime)(unsafe.Pointer(bp))).FvalidTZ = 0
_computeYMD_HMS(tls, bp)
_sqlite3StrAccumInit(tls, bp+96, uintptr(0), uintptr(0), 0, int32(100))
Xsqlite3_str_appendf(tls, bp+96, __ccgo_ts+1431, libc.VaList(bp+136, int32(sign), Y, M, (*(*TDateTime)(unsafe.Pointer(bp))).FD-int32(1), (*(*TDateTime)(unsafe.Pointer(bp))).Fh, (*(*TDateTime)(unsafe.Pointer(bp))).Fm, (*(*TDateTime)(unsafe.Pointer(bp))).Fs))
_sqlite3ResultStrAccum(tls, context, bp+96)
}
// C documentation
//
// /*
// ** current_timestamp()
// **
// ** This function returns the same value as datetime('now').
// */
func _ctimestampFunc(tls *libc.TLS, context uintptr, NotUsed int32, NotUsed2 uintptr) {
_ = NotUsed
_ = NotUsed2
_datetimeFunc(tls, context, 0, uintptr(0))
}
// C documentation
//
// /*
// ** This function registered all of the above C functions as SQL
// ** functions. This should be the only routine in this file with
// ** external linkage.
// */
func _sqlite3RegisterDateTimeFunctions(tls *libc.TLS) {
_sqlite3InsertBuiltinFuncs(tls, uintptr(unsafe.Pointer(&_aDateTimeFuncs)), int32(libc.Uint64FromInt64(720)/libc.Uint64FromInt64(72)))
}
var _aDateTimeFuncs = [10]TFuncDef{
0: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1237,
},
1: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1257,
},
2: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1465,
},
3: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1470,
},
4: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1475,
},
5: {
FnArg: int8(-int32(1)),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1484,
},
6: {
FnArg: int8(2),
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8) | libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)),
FpUserData: uintptr(unsafe.Pointer(&_sqlite3Config)),
FzName: __ccgo_ts + 1493,
},
7: {
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8)),
FzName: __ccgo_ts + 1502,
},
8: {
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8)),
FzName: __ccgo_ts + 1515,
},
9: {
FfuncFlags: uint32(libc.Int32FromInt32(SQLITE_FUNC_BUILTIN) | libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG) | libc.Int32FromInt32(SQLITE_UTF8)),
FzName: __ccgo_ts + 1533,
},
}
func init() {
p := unsafe.Pointer(&_aDateTimeFuncs)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_juliandayFunc)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_unixepochFunc)
*(*uintptr)(unsafe.Add(p, 168)) = __ccgo_fp(_dateFunc)
*(*uintptr)(unsafe.Add(p, 240)) = __ccgo_fp(_timeFunc)
*(*uintptr)(unsafe.Add(p, 312)) = __ccgo_fp(_datetimeFunc)
*(*uintptr)(unsafe.Add(p, 384)) = __ccgo_fp(_strftimeFunc)
*(*uintptr)(unsafe.Add(p, 456)) = __ccgo_fp(_timediffFunc)
*(*uintptr)(unsafe.Add(p, 528)) = __ccgo_fp(_ctimeFunc)
*(*uintptr)(unsafe.Add(p, 600)) = __ccgo_fp(_ctimestampFunc)
*(*uintptr)(unsafe.Add(p, 672)) = __ccgo_fp(_cdateFunc)
}
/************** End of date.c ************************************************/
/************** Begin file os.c **********************************************/
/*
** 2005 November 29
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** This file contains OS interface code that is common to all
** architectures.
*/
/* #include "sqliteInt.h" */
/*
** If we compile with the SQLITE_TEST macro set, then the following block
** of code will give us the ability to simulate a disk I/O error. This
** is used for testing the I/O recovery logic.
*/
/*
** When testing, also keep a count of the number of open files.
*/
/*
** The default SQLite sqlite3_vfs implementations do not allocate
** memory (actually, os_unix.c allocates a small amount of memory
** from within OsOpen()), but some third-party implementations may.
** So we test the effects of a malloc() failing and the sqlite3OsXXX()
** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro.
**
** The following functions are instrumented for malloc() failure
** testing:
**
** sqlite3OsRead()
** sqlite3OsWrite()
** sqlite3OsSync()
** sqlite3OsFileSize()
** sqlite3OsLock()
** sqlite3OsCheckReservedLock()
** sqlite3OsFileControl()
** sqlite3OsShmMap()
** sqlite3OsOpen()
** sqlite3OsDelete()
** sqlite3OsAccess()
** sqlite3OsFullPathname()
**
*/
// C documentation
//
// /*
// ** The following routines are convenience wrappers around methods
// ** of the sqlite3_file object. This is mostly just syntactic sugar. All
// ** of this would be completely automatic if SQLite were coded using
// ** C++ instead of plain old C.
// */
func _sqlite3OsClose(tls *libc.TLS, pId uintptr) {
if (*Tsqlite3_file)(unsafe.Pointer(pId)).FpMethods != 0 {
(*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(pId)).FpMethods)).FxClose})))(tls, pId)
(*Tsqlite3_file)(unsafe.Pointer(pId)).FpMethods = uintptr(0)
}
}
func _sqlite3OsRead(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Ti64) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr, int32, Tsqlite3_int64) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxRead})))(tls, id, pBuf, amt, offset)
}
func _sqlite3OsWrite(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Ti64) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr, int32, Tsqlite3_int64) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxWrite})))(tls, id, pBuf, amt, offset)
}
func _sqlite3OsTruncate(tls *libc.TLS, id uintptr, size Ti64) (r int32) {
return (*(*func(*libc.TLS, uintptr, Tsqlite3_int64) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxTruncate})))(tls, id, size)
}
func _sqlite3OsSync(tls *libc.TLS, id uintptr, flags int32) (r int32) {
var v1 int32
_ = v1
if flags != 0 {
v1 = (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxSync})))(tls, id, flags)
} else {
v1 = SQLITE_OK
}
return v1
}
func _sqlite3OsFileSize(tls *libc.TLS, id uintptr, pSize uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxFileSize})))(tls, id, pSize)
}
func _sqlite3OsLock(tls *libc.TLS, id uintptr, lockType int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxLock})))(tls, id, lockType)
}
func _sqlite3OsUnlock(tls *libc.TLS, id uintptr, lockType int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxUnlock})))(tls, id, lockType)
}
func _sqlite3OsCheckReservedLock(tls *libc.TLS, id uintptr, pResOut uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxCheckReservedLock})))(tls, id, pResOut)
}
// C documentation
//
// /*
// ** Use sqlite3OsFileControl() when we are doing something that might fail
// ** and we need to know about the failures. Use sqlite3OsFileControlHint()
// ** when simply tossing information over the wall to the VFS and we do not
// ** really care if the VFS receives and understands the information since it
// ** is only a hint and can be safely ignored. The sqlite3OsFileControlHint()
// ** routine has no return value since the return value would be meaningless.
// */
func _sqlite3OsFileControl(tls *libc.TLS, id uintptr, op int32, pArg uintptr) (r int32) {
if (*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods == uintptr(0) {
return int32(SQLITE_NOTFOUND)
}
return (*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxFileControl})))(tls, id, op, pArg)
}
func _sqlite3OsFileControlHint(tls *libc.TLS, id uintptr, op int32, pArg uintptr) {
if (*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods != 0 {
(*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxFileControl})))(tls, id, op, pArg)
}
}
func _sqlite3OsSectorSize(tls *libc.TLS, id uintptr) (r int32) {
var xSectorSize uintptr
var v1 int32
_, _ = xSectorSize, v1
xSectorSize = (*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxSectorSize
if xSectorSize != 0 {
v1 = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{xSectorSize})))(tls, id)
} else {
v1 = int32(SQLITE_DEFAULT_SECTOR_SIZE)
}
return v1
}
func _sqlite3OsDeviceCharacteristics(tls *libc.TLS, id uintptr) (r int32) {
if (*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods == uintptr(0) {
return 0
}
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxDeviceCharacteristics})))(tls, id)
}
func _sqlite3OsShmLock(tls *libc.TLS, id uintptr, offset int32, n int32, flags int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32, int32, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxShmLock})))(tls, id, offset, n, flags)
}
func _sqlite3OsShmBarrier(tls *libc.TLS, id uintptr) {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxShmBarrier})))(tls, id)
}
func _sqlite3OsShmUnmap(tls *libc.TLS, id uintptr, deleteFlag int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxShmUnmap})))(tls, id, deleteFlag)
}
func _sqlite3OsShmMap(tls *libc.TLS, id uintptr, iPage int32, pgsz int32, bExtend int32, pp uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxShmMap})))(tls, id, iPage, pgsz, bExtend, pp)
}
// C documentation
//
// /* The real implementation of xFetch and xUnfetch */
func _sqlite3OsFetch(tls *libc.TLS, id uintptr, iOff Ti64, iAmt int32, pp uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, Tsqlite3_int64, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxFetch})))(tls, id, iOff, iAmt, pp)
}
func _sqlite3OsUnfetch(tls *libc.TLS, id uintptr, iOff Ti64, p uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, Tsqlite3_int64, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(id)).FpMethods)).FxUnfetch})))(tls, id, iOff, p)
}
// C documentation
//
// /*
// ** The next group of routines are convenience wrappers around the
// ** VFS methods.
// */
func _sqlite3OsOpen(tls *libc.TLS, pVfs uintptr, zPath uintptr, pFile uintptr, flags int32, pFlagsOut uintptr) (r int32) {
var rc int32
_ = rc
/* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed
** down into the VFS layer. Some SQLITE_OPEN_ flags (for example,
** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before
** reaching the VFS. */
rc = (*(*func(*libc.TLS, uintptr, Tsqlite3_filename, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxOpen})))(tls, pVfs, zPath, pFile, flags&int32(0x1087f7f), pFlagsOut)
return rc
}
func _sqlite3OsDelete(tls *libc.TLS, pVfs uintptr, zPath uintptr, dirSync int32) (r int32) {
var v1 int32
_ = v1
if (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDelete != uintptr(0) {
v1 = (*(*func(*libc.TLS, uintptr, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDelete})))(tls, pVfs, zPath, dirSync)
} else {
v1 = SQLITE_OK
}
return v1
}
func _sqlite3OsAccess(tls *libc.TLS, pVfs uintptr, zPath uintptr, flags int32, pResOut uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxAccess})))(tls, pVfs, zPath, flags, pResOut)
}
func _sqlite3OsFullPathname(tls *libc.TLS, pVfs uintptr, zPath uintptr, nPathOut int32, zPathOut uintptr) (r int32) {
*(*int8)(unsafe.Pointer(zPathOut)) = 0
return (*(*func(*libc.TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxFullPathname})))(tls, pVfs, zPath, nPathOut, zPathOut)
}
func _sqlite3OsDlOpen(tls *libc.TLS, pVfs uintptr, zPath uintptr) (r uintptr) {
/* tag-20210611-1 */
return (*(*func(*libc.TLS, uintptr, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDlOpen})))(tls, pVfs, zPath)
}
func _sqlite3OsDlError(tls *libc.TLS, pVfs uintptr, nByte int32, zBufOut uintptr) {
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDlError})))(tls, pVfs, nByte, zBufOut)
}
func _sqlite3OsDlSym(tls *libc.TLS, pVfs uintptr, pHdle uintptr, zSym uintptr) (r uintptr) {
return (*(*func(*libc.TLS, uintptr, uintptr, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDlSym})))(tls, pVfs, pHdle, zSym)
}
func _sqlite3OsDlClose(tls *libc.TLS, pVfs uintptr, pHandle uintptr) {
(*(*func(*libc.TLS, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxDlClose})))(tls, pVfs, pHandle)
}
func _sqlite3OsRandomness(tls *libc.TLS, pVfs uintptr, nByte int32, zBufOut uintptr) (r int32) {
if _sqlite3Config.FiPrngSeed != 0 {
libc.Xmemset(tls, zBufOut, 0, uint64(nByte))
if nByte > libc.Int32FromInt64(4) {
nByte = int32(4)
}
libc.Xmemcpy(tls, zBufOut, uintptr(unsafe.Pointer(&_sqlite3Config))+432, uint64(nByte))
return SQLITE_OK
} else {
return (*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxRandomness})))(tls, pVfs, nByte, zBufOut)
}
return r
}
func _sqlite3OsSleep(tls *libc.TLS, pVfs uintptr, nMicro int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxSleep})))(tls, pVfs, nMicro)
}
func _sqlite3OsGetLastError(tls *libc.TLS, pVfs uintptr) (r int32) {
var v1 int32
_ = v1
if (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxGetLastError != 0 {
v1 = (*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxGetLastError})))(tls, pVfs, 0, uintptr(0))
} else {
v1 = 0
}
return v1
}
func _sqlite3OsCurrentTimeInt64(tls *libc.TLS, pVfs uintptr, pTimeOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* r at bp+0 */ float64
_ = rc
/* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64()
** method to get the current date and time if that method is available
** (if iVersion is 2 or greater and the function pointer is not NULL) and
** will fall back to xCurrentTime() if xCurrentTimeInt64() is
** unavailable.
*/
if (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FiVersion >= int32(2) && (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxCurrentTimeInt64 != 0 {
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxCurrentTimeInt64})))(tls, pVfs, pTimeOut)
} else {
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FxCurrentTime})))(tls, pVfs, bp)
*(*Tsqlite3_int64)(unsafe.Pointer(pTimeOut)) = int64(*(*float64)(unsafe.Pointer(bp)) * libc.Float64FromFloat64(8.64e+07))
}
return rc
}
func _sqlite3OsOpenMalloc(tls *libc.TLS, pVfs uintptr, zFile uintptr, ppFile uintptr, flags int32, pOutFlags uintptr) (r int32) {
var pFile uintptr
var rc int32
_, _ = pFile, rc
pFile = _sqlite3MallocZero(tls, uint64((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile))
if pFile != 0 {
rc = _sqlite3OsOpen(tls, pVfs, zFile, pFile, flags, pOutFlags)
if rc != SQLITE_OK {
Xsqlite3_free(tls, pFile)
*(*uintptr)(unsafe.Pointer(ppFile)) = uintptr(0)
} else {
*(*uintptr)(unsafe.Pointer(ppFile)) = pFile
}
} else {
*(*uintptr)(unsafe.Pointer(ppFile)) = uintptr(0)
rc = int32(SQLITE_NOMEM)
}
return rc
}
func _sqlite3OsCloseFree(tls *libc.TLS, pFile uintptr) {
_sqlite3OsClose(tls, pFile)
Xsqlite3_free(tls, pFile)
}
// C documentation
//
// /*
// ** This function is a wrapper around the OS specific implementation of
// ** sqlite3_os_init(). The purpose of the wrapper is to provide the
// ** ability to simulate a malloc failure, so that the handling of an
// ** error in sqlite3_os_init() by the upper layers can be tested.
// */
func _sqlite3OsInit(tls *libc.TLS) (r int32) {
var p uintptr
_ = p
p = Xsqlite3_malloc(tls, int32(10))
if p == uintptr(0) {
return int32(SQLITE_NOMEM)
}
Xsqlite3_free(tls, p)
return Xsqlite3_os_init(tls)
}
// C documentation
//
// /*
// ** The list of all registered VFS implementations.
// */
var _vfsList = uintptr(0)
// C documentation
//
// /*
// ** Locate a VFS by name. If no name is given, simply return the
// ** first VFS on the list.
// */
func Xsqlite3_vfs_find(tls *libc.TLS, zVfs uintptr) (r uintptr) {
var mutex, pVfs uintptr
var rc int32
_, _, _ = mutex, pVfs, rc
pVfs = uintptr(0)
rc = Xsqlite3_initialize(tls)
if rc != 0 {
return uintptr(0)
}
mutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
Xsqlite3_mutex_enter(tls, mutex)
pVfs = _vfsList
for {
if !(pVfs != 0) {
break
}
if zVfs == uintptr(0) {
break
}
if libc.Xstrcmp(tls, zVfs, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FzName) == 0 {
break
}
goto _1
_1:
;
pVfs = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpNext
}
Xsqlite3_mutex_leave(tls, mutex)
return pVfs
}
// C documentation
//
// /*
// ** Unlink a VFS from the linked list
// */
func _vfsUnlink(tls *libc.TLS, pVfs uintptr) {
var p uintptr
_ = p
if pVfs == uintptr(0) {
/* No-op */
} else {
if _vfsList == pVfs {
_vfsList = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpNext
} else {
if _vfsList != 0 {
p = _vfsList
for (*Tsqlite3_vfs)(unsafe.Pointer(p)).FpNext != 0 && (*Tsqlite3_vfs)(unsafe.Pointer(p)).FpNext != pVfs {
p = (*Tsqlite3_vfs)(unsafe.Pointer(p)).FpNext
}
if (*Tsqlite3_vfs)(unsafe.Pointer(p)).FpNext == pVfs {
(*Tsqlite3_vfs)(unsafe.Pointer(p)).FpNext = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpNext
}
}
}
}
}
// C documentation
//
// /*
// ** Register a VFS with the system. It is harmless to register the same
// ** VFS multiple times. The new VFS becomes the default if makeDflt is
// ** true.
// */
func Xsqlite3_vfs_register(tls *libc.TLS, pVfs uintptr, makeDflt int32) (r int32) {
var mutex uintptr
var rc int32
_, _ = mutex, rc
rc = Xsqlite3_initialize(tls)
if rc != 0 {
return rc
}
mutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
Xsqlite3_mutex_enter(tls, mutex)
_vfsUnlink(tls, pVfs)
if makeDflt != 0 || _vfsList == uintptr(0) {
(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpNext = _vfsList
_vfsList = pVfs
} else {
(*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpNext = (*Tsqlite3_vfs)(unsafe.Pointer(_vfsList)).FpNext
(*Tsqlite3_vfs)(unsafe.Pointer(_vfsList)).FpNext = pVfs
}
Xsqlite3_mutex_leave(tls, mutex)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Unregister a VFS so that it is no longer accessible.
// */
func Xsqlite3_vfs_unregister(tls *libc.TLS, pVfs uintptr) (r int32) {
var mutex uintptr
var rc int32
_, _ = mutex, rc
rc = Xsqlite3_initialize(tls)
if rc != 0 {
return rc
}
mutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
Xsqlite3_mutex_enter(tls, mutex)
_vfsUnlink(tls, pVfs)
Xsqlite3_mutex_leave(tls, mutex)
return SQLITE_OK
}
/************** End of os.c **************************************************/
/************** Begin file fault.c *******************************************/
/*
** 2008 Jan 22
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains code to support the concept of "benign"
** malloc failures (when the xMalloc() or xRealloc() method of the
** sqlite3_mem_methods structure fails to allocate a block of memory
** and returns 0).
**
** Most malloc failures are non-benign. After they occur, SQLite
** abandons the current operation and returns an error code (usually
** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily
** fatal. For example, if a malloc fails while resizing a hash table, this
** is completely recoverable simply by not carrying out the resize. The
** hash table will continue to function normally. So a malloc failure
** during a hash table resize is a benign fault.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /*
// ** Global variables.
// */
type TBenignMallocHooks = struct {
FxBenignBegin uintptr
FxBenignEnd uintptr
}
type BenignMallocHooks = TBenignMallocHooks
type TBenignMallocHooks1 = struct {
FxBenignBegin uintptr
FxBenignEnd uintptr
}
type BenignMallocHooks1 = TBenignMallocHooks1
var _sqlite3Hooks = TBenignMallocHooks1{}
/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks
** structure. If writable static data is unsupported on the target,
** we have to locate the state vector at run-time. In the more common
** case where writable static data is supported, wsdHooks can refer directly
** to the "sqlite3Hooks" state vector declared above.
*/
// C documentation
//
// /*
// ** Register hooks to call when sqlite3BeginBenignMalloc() and
// ** sqlite3EndBenignMalloc() are called, respectively.
// */
func _sqlite3BenignMallocHooks(tls *libc.TLS, xBenignBegin uintptr, xBenignEnd uintptr) {
_sqlite3Hooks.FxBenignBegin = xBenignBegin
_sqlite3Hooks.FxBenignEnd = xBenignEnd
}
// C documentation
//
// /*
// ** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that
// ** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc()
// ** indicates that subsequent malloc failures are non-benign.
// */
func _sqlite3BeginBenignMalloc(tls *libc.TLS) {
if _sqlite3Hooks.FxBenignBegin != 0 {
(*(*func(*libc.TLS))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Hooks.FxBenignBegin})))(tls)
}
}
func _sqlite3EndBenignMalloc(tls *libc.TLS) {
if _sqlite3Hooks.FxBenignEnd != 0 {
(*(*func(*libc.TLS))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Hooks.FxBenignEnd})))(tls)
}
}
/************** End of fault.c ***********************************************/
/************** Begin file mem0.c ********************************************/
/*
** 2008 October 28
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains a no-op memory allocation drivers for use when
** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented
** here always fail. SQLite will not operate with these drivers. These
** are merely placeholders. Real drivers must be substituted using
** sqlite3_config() before SQLite will operate.
*/
/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is the default. It is
** used when no other memory allocator is specified using compile-time
** macros.
*/
/************** End of mem0.c ************************************************/
/************** Begin file mem1.c ********************************************/
/*
** 2007 August 14
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains low-level memory allocation drivers for when
** SQLite will use the standard C-library malloc/realloc/free interface
** to obtain the memory it needs.
**
** This file contains implementations of the low-level memory allocation
** routines specified in the sqlite3_mem_methods object. The content of
** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The
** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the
** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The
** default configuration is to use memory allocation routines in this
** file.
**
** C-preprocessor macro summary:
**
** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if
** the malloc_usable_size() interface exists
** on the target platform. Or, this symbol
** can be set manually, if desired.
** If an equivalent interface exists by
** a different name, using a separate -D
** option to rename it.
**
** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone
** memory allocator. Set this symbol to enable
** building on older macs.
**
** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of
** _msize() on windows systems. This might
** be necessary when compiling for Delphi,
** for example.
*/
/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is the default. It is
** used when no other memory allocator is specified using compile-time
** macros.
*/
/*
** Use standard C library malloc and free on non-Apple systems.
** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined.
*/
/*
** The malloc.h header file is needed for malloc_usable_size() function
** on some systems (e.g. Linux).
*/
/*
** Include the malloc.h header file, if necessary. Also set define macro
** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize()
** for MSVC and malloc_usable_size() for most other systems (e.g. Linux).
** The memory size function can always be overridden manually by defining
** the macro SQLITE_MALLOCSIZE to the desired function name.
*/
// C documentation
//
// /*
// ** Like malloc(), but remember the size of the allocation
// ** so that we can find it later using sqlite3MemSize().
// **
// ** For this low-level routine, we are guaranteed that nByte>0 because
// ** cases of nByte<=0 will be intercepted and dealt with by higher level
// ** routines.
// */
func _sqlite3MemMalloc(tls *libc.TLS, nByte int32) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var p uintptr
_ = p
p = libc.Xmalloc(tls, uint64(nByte+int32(8)))
if p != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(p)) = int64(nByte)
p += 8
} else {
Xsqlite3_log(tls, int32(SQLITE_NOMEM), __ccgo_ts+1546, libc.VaList(bp+8, nByte))
}
return p
}
// C documentation
//
// /*
// ** Like free() but works for allocations obtained from sqlite3MemMalloc()
// ** or sqlite3MemRealloc().
// **
// ** For this low-level routine, we already know that pPrior!=0 since
// ** cases where pPrior==0 will have been intercepted and dealt with
// ** by higher-level routines.
// */
func _sqlite3MemFree(tls *libc.TLS, pPrior uintptr) {
var p uintptr
_ = p
p = pPrior
p -= 8
libc.Xfree(tls, p)
}
// C documentation
//
// /*
// ** Report the allocated size of a prior return from xMalloc()
// ** or xRealloc().
// */
func _sqlite3MemSize(tls *libc.TLS, pPrior uintptr) (r int32) {
var p uintptr
_ = p
p = pPrior
p -= 8
return int32(*(*Tsqlite3_int64)(unsafe.Pointer(p)))
}
// C documentation
//
// /*
// ** Like realloc(). Resize an allocation previously obtained from
// ** sqlite3MemMalloc().
// **
// ** For this low-level interface, we know that pPrior!=0. Cases where
// ** pPrior==0 while have been intercepted by higher-level routine and
// ** redirected to xMalloc. Similarly, we know that nByte>0 because
// ** cases where nByte<=0 will have been intercepted by higher-level
// ** routines and redirected to xFree.
// */
func _sqlite3MemRealloc(tls *libc.TLS, pPrior uintptr, nByte int32) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var p uintptr
_ = p
p = pPrior
/* EV: R-46199-30249 */
p -= 8
p = libc.Xrealloc(tls, p, uint64(nByte+libc.Int32FromInt32(8)))
if p != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(p)) = int64(nByte)
p += 8
} else {
Xsqlite3_log(tls, int32(SQLITE_NOMEM), __ccgo_ts+1584, libc.VaList(bp+8, _sqlite3MemSize(tls, pPrior), nByte))
}
return p
}
// C documentation
//
// /*
// ** Round up a request size to the next valid allocation size.
// */
func _sqlite3MemRoundup(tls *libc.TLS, n int32) (r int32) {
return (n + int32(7)) & ^libc.Int32FromInt32(7)
}
// C documentation
//
// /*
// ** Initialize this module.
// */
func _sqlite3MemInit(tls *libc.TLS, NotUsed uintptr) (r int32) {
_ = NotUsed
return SQLITE_OK
}
// C documentation
//
// /*
// ** Deinitialize this module.
// */
func _sqlite3MemShutdown(tls *libc.TLS, NotUsed uintptr) {
_ = NotUsed
return
}
// C documentation
//
// /*
// ** This routine is the only routine in this file with external linkage.
// **
// ** Populate the low-level memory allocation function pointers in
// ** sqlite3GlobalConfig.m with pointers to the routines in this file.
// */
func _sqlite3MemSetDefault(tls *libc.TLS) {
bp := tls.Alloc(16)
defer tls.Free(16)
Xsqlite3_config(tls, int32(SQLITE_CONFIG_MALLOC), libc.VaList(bp+8, uintptr(unsafe.Pointer(&_defaultMethods))))
}
var _defaultMethods = Tsqlite3_mem_methods{}
func init() {
p := unsafe.Pointer(&_defaultMethods)
*(*uintptr)(unsafe.Add(p, 0)) = __ccgo_fp(_sqlite3MemMalloc)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_sqlite3MemFree)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_sqlite3MemRealloc)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_sqlite3MemSize)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_sqlite3MemRoundup)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_sqlite3MemInit)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_sqlite3MemShutdown)
}
/************** End of mem1.c ************************************************/
/************** Begin file mem2.c ********************************************/
/*
** 2007 August 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains low-level memory allocation drivers for when
** SQLite will use the standard C-library malloc/realloc/free interface
** to obtain the memory it needs while adding lots of additional debugging
** information to each allocation in order to help detect and fix memory
** leaks and memory usage errors.
**
** This file contains implementations of the low-level memory allocation
** routines specified in the sqlite3_mem_methods object.
*/
/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is used only if the
** SQLITE_MEMDEBUG macro is defined
*/
/************** End of mem2.c ************************************************/
/************** Begin file mem3.c ********************************************/
/*
** 2007 October 14
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement a memory
** allocation subsystem for use by SQLite.
**
** This version of the memory allocation subsystem omits all
** use of malloc(). The SQLite user supplies a block of memory
** before calling sqlite3_initialize() from which allocations
** are made and returned by the xMalloc() and xRealloc()
** implementations. Once sqlite3_initialize() has been called,
** the amount of memory available to SQLite is fixed and cannot
** be changed.
**
** This version of the memory allocation subsystem is included
** in the build only if SQLITE_ENABLE_MEMSYS3 is defined.
*/
/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is only built into the library
** SQLITE_ENABLE_MEMSYS3 is defined. Defining this symbol does not
** mean that the library will use a memory-pool by default, just that
** it is available. The mempool allocator is activated by calling
** sqlite3_config().
*/
/************** End of mem3.c ************************************************/
/************** Begin file mem5.c ********************************************/
/*
** 2007 October 14
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement a memory
** allocation subsystem for use by SQLite.
**
** This version of the memory allocation subsystem omits all
** use of malloc(). The application gives SQLite a block of memory
** before calling sqlite3_initialize() from which allocations
** are made and returned by the xMalloc() and xRealloc()
** implementations. Once sqlite3_initialize() has been called,
** the amount of memory available to SQLite is fixed and cannot
** be changed.
**
** This version of the memory allocation subsystem is included
** in the build only if SQLITE_ENABLE_MEMSYS5 is defined.
**
** This memory allocator uses the following algorithm:
**
** 1. All memory allocation sizes are rounded up to a power of 2.
**
** 2. If two adjacent free blocks are the halves of a larger block,
** then the two blocks are coalesced into the single larger block.
**
** 3. New memory is allocated from the first available free block.
**
** This algorithm is described in: J. M. Robson. "Bounds for Some Functions
** Concerning Dynamic Storage Allocation". Journal of the Association for
** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499.
**
** Let n be the size of the largest allocation divided by the minimum
** allocation size (after rounding all sizes up to a power of 2.) Let M
** be the maximum amount of memory ever outstanding at one time. Let
** N be the total amount of memory available for allocation. Robson
** proved that this memory allocator will never breakdown due to
** fragmentation as long as the following constraint holds:
**
** N >= M*(1 + log2(n)/2) - n + 1
**
** The sqlite3_status() logic tracks the maximum values of n and M so
** that an application can, at any time, verify this constraint.
*/
/* #include "sqliteInt.h" */
/*
** This version of the memory allocator is used only when
** SQLITE_ENABLE_MEMSYS5 is defined.
*/
/************** End of mem5.c ************************************************/
/************** Begin file mutex.c *******************************************/
/*
** 2007 August 14
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement mutexes.
**
** This file contains code that is common across all mutex implementations.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /*
// ** Initialize the mutex system.
// */
func _sqlite3MutexInit(tls *libc.TLS) (r int32) {
var pFrom, pTo uintptr
var rc int32
_, _, _ = pFrom, pTo, rc
rc = SQLITE_OK
if !(_sqlite3Config.Fmutex.FxMutexAlloc != 0) {
pTo = uintptr(unsafe.Pointer(&_sqlite3Config)) + 96
if _sqlite3Config.FbCoreMutex != 0 {
pFrom = _sqlite3DefaultMutex(tls)
} else {
pFrom = _sqlite3NoopMutex(tls)
}
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexInit = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexInit
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexEnd = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexEnd
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexFree = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexFree
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexEnter = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexEnter
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexTry = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexTry
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexLeave = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexLeave
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexHeld = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexHeld
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexNotheld = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexNotheld
(*Tsqlite3_mutex_methods)(unsafe.Pointer(pTo)).FxMutexAlloc = (*Tsqlite3_mutex_methods)(unsafe.Pointer(pFrom)).FxMutexAlloc
}
rc = (*(*func(*libc.TLS) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexInit})))(tls)
return rc
}
// C documentation
//
// /*
// ** Shutdown the mutex system. This call frees resources allocated by
// ** sqlite3MutexInit().
// */
func _sqlite3MutexEnd(tls *libc.TLS) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if _sqlite3Config.Fmutex.FxMutexEnd != 0 {
rc = (*(*func(*libc.TLS) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexEnd})))(tls)
}
return rc
}
// C documentation
//
// /*
// ** Retrieve a pointer to a static mutex or allocate a new dynamic one.
// */
func Xsqlite3_mutex_alloc(tls *libc.TLS, id int32) (r uintptr) {
if id <= int32(SQLITE_MUTEX_RECURSIVE) && Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
if id > int32(SQLITE_MUTEX_RECURSIVE) && _sqlite3MutexInit(tls) != 0 {
return uintptr(0)
}
return (*(*func(*libc.TLS, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexAlloc})))(tls, id)
}
func _sqlite3MutexAlloc(tls *libc.TLS, id int32) (r uintptr) {
if !(_sqlite3Config.FbCoreMutex != 0) {
return uintptr(0)
}
return (*(*func(*libc.TLS, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexAlloc})))(tls, id)
}
// C documentation
//
// /*
// ** Free a dynamic mutex.
// */
func Xsqlite3_mutex_free(tls *libc.TLS, p uintptr) {
if p != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexFree})))(tls, p)
}
}
// C documentation
//
// /*
// ** Obtain the mutex p. If some other thread already has the mutex, block
// ** until it can be obtained.
// */
func Xsqlite3_mutex_enter(tls *libc.TLS, p uintptr) {
if p != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexEnter})))(tls, p)
}
}
// C documentation
//
// /*
// ** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another
// ** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY.
// */
func Xsqlite3_mutex_try(tls *libc.TLS, p uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if p != 0 {
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexTry})))(tls, p)
}
return rc
}
// C documentation
//
// /*
// ** The sqlite3_mutex_leave() routine exits a mutex that was previously
// ** entered by the same thread. The behavior is undefined if the mutex
// ** is not currently entered. If a NULL pointer is passed as an argument
// ** this function is a no-op.
// */
func Xsqlite3_mutex_leave(tls *libc.TLS, p uintptr) {
if p != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fmutex.FxMutexLeave})))(tls, p)
}
}
/************** End of mutex.c ***********************************************/
/************** Begin file mutex_noop.c **************************************/
/*
** 2008 October 07
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement mutexes.
**
** This implementation in this file does not provide any mutual
** exclusion and is thus suitable for use only in applications
** that use SQLite in a single thread. The routines defined
** here are place-holders. Applications can substitute working
** mutex routines at start-time using the
**
** sqlite3_config(SQLITE_CONFIG_MUTEX,...)
**
** interface.
**
** If compiled with SQLITE_DEBUG, then additional logic is inserted
** that does error checking on mutexes to make sure they are being
** called correctly.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /*
// ** Stub routines for all mutex methods.
// **
// ** This routines provide no mutual exclusion or error checking.
// */
func _noopMutexInit(tls *libc.TLS) (r int32) {
return SQLITE_OK
}
func _noopMutexEnd(tls *libc.TLS) (r int32) {
return SQLITE_OK
}
func _noopMutexAlloc(tls *libc.TLS, id int32) (r uintptr) {
_ = id
return libc.UintptrFromInt32(8)
}
func _noopMutexFree(tls *libc.TLS, p uintptr) {
_ = p
return
}
func _noopMutexEnter(tls *libc.TLS, p uintptr) {
_ = p
return
}
func _noopMutexTry(tls *libc.TLS, p uintptr) (r int32) {
_ = p
return SQLITE_OK
}
func _noopMutexLeave(tls *libc.TLS, p uintptr) {
_ = p
return
}
func _sqlite3NoopMutex(tls *libc.TLS) (r uintptr) {
return uintptr(unsafe.Pointer(&_sMutex))
}
var _sMutex = Tsqlite3_mutex_methods{}
func init() {
p := unsafe.Pointer(&_sMutex)
*(*uintptr)(unsafe.Add(p, 0)) = __ccgo_fp(_noopMutexInit)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_noopMutexEnd)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_noopMutexAlloc)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_noopMutexFree)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_noopMutexEnter)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_noopMutexTry)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_noopMutexLeave)
}
// C documentation
//
// /*
// ** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation
// ** is used regardless of the run-time threadsafety setting.
// */
func _sqlite3DefaultMutex(tls *libc.TLS) (r uintptr) {
return _sqlite3NoopMutex(tls)
}
/************** End of mutex_noop.c ******************************************/
/************** Begin file mutex_unix.c **************************************/
/*
** 2007 August 28
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement mutexes for pthreads
*/
/* #include "sqliteInt.h" */
/*
** The code in this file is only used if we are compiling threadsafe
** under unix with pthreads.
**
** Note that this implementation requires a version of pthreads that
** supports recursive mutexes.
*/
/************** End of mutex_unix.c ******************************************/
/************** Begin file mutex_w32.c ***************************************/
/*
** 2007 August 14
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the C functions that implement mutexes for Win32.
*/
/* #include "sqliteInt.h" */
/*
** The code in this file is only used if we are compiling multithreaded
** on a Win32 system.
*/
/************** End of mutex_w32.c *******************************************/
/************** Begin file malloc.c ******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** Memory allocation functions used throughout sqlite.
*/
/* #include "sqliteInt.h" */
/* #include */
// C documentation
//
// /*
// ** Attempt to release up to n bytes of non-essential memory currently
// ** held by SQLite. An example of non-essential memory is memory used to
// ** cache database pages that are not currently in use.
// */
func Xsqlite3_release_memory(tls *libc.TLS, n int32) (r int32) {
return _sqlite3PcacheReleaseMemory(tls, n)
}
/*
** Default value of the hard heap limit. 0 means "no limit".
*/
// C documentation
//
// /*
// ** State information local to the memory allocation subsystem.
// */
type TMem0Global = struct {
Fmutex uintptr
FalarmThreshold Tsqlite3_int64
FhardLimit Tsqlite3_int64
FnearlyFull int32
}
type Mem0Global = TMem0Global
/*
** Default value of the hard heap limit. 0 means "no limit".
*/
// C documentation
//
// /*
// ** State information local to the memory allocation subsystem.
// */
var _mem0 = TMem0Global{}
// C documentation
//
// /*
// ** Return the memory allocator mutex. sqlite3_status() needs it.
// */
func _sqlite3MallocMutex(tls *libc.TLS) (r uintptr) {
return _mem0.Fmutex
}
// C documentation
//
// /*
// ** Deprecated external interface. It used to set an alarm callback
// ** that was invoked when memory usage grew too large. Now it is a
// ** no-op.
// */
func Xsqlite3_memory_alarm(tls *libc.TLS, xCallback uintptr, pArg uintptr, iThreshold Tsqlite3_int64) (r int32) {
_ = xCallback
_ = pArg
_ = iThreshold
return SQLITE_OK
}
// C documentation
//
// /*
// ** Set the soft heap-size limit for the library. An argument of
// ** zero disables the limit. A negative argument is a no-op used to
// ** obtain the return value.
// **
// ** The return value is the value of the heap limit just before this
// ** interface was called.
// **
// ** If the hard heap limit is enabled, then the soft heap limit cannot
// ** be disabled nor raised above the hard heap limit.
// */
func Xsqlite3_soft_heap_limit64(tls *libc.TLS, n Tsqlite3_int64) (r Tsqlite3_int64) {
var excess, nUsed, priorLimit Tsqlite3_int64
var rc int32
_, _, _, _ = excess, nUsed, priorLimit, rc
rc = Xsqlite3_initialize(tls)
if rc != 0 {
return int64(-int32(1))
}
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
priorLimit = _mem0.FalarmThreshold
if n < 0 {
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
return priorLimit
}
if _mem0.FhardLimit > 0 && (n > _mem0.FhardLimit || n == 0) {
n = _mem0.FhardLimit
}
_mem0.FalarmThreshold = n
nUsed = _sqlite3StatusValue(tls, SQLITE_STATUS_MEMORY_USED)
*(*int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_mem0)) + 24)) = libc.BoolInt32(n > 0 && n <= nUsed)
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
excess = Xsqlite3_memory_used(tls) - n
if excess > 0 {
Xsqlite3_release_memory(tls, int32(excess&libc.Int64FromInt32(0x7fffffff)))
}
return priorLimit
}
func Xsqlite3_soft_heap_limit(tls *libc.TLS, n int32) {
if n < 0 {
n = 0
}
Xsqlite3_soft_heap_limit64(tls, int64(n))
}
// C documentation
//
// /*
// ** Set the hard heap-size limit for the library. An argument of zero
// ** disables the hard heap limit. A negative argument is a no-op used
// ** to obtain the return value without affecting the hard heap limit.
// **
// ** The return value is the value of the hard heap limit just prior to
// ** calling this interface.
// **
// ** Setting the hard heap limit will also activate the soft heap limit
// ** and constrain the soft heap limit to be no more than the hard heap
// ** limit.
// */
func Xsqlite3_hard_heap_limit64(tls *libc.TLS, n Tsqlite3_int64) (r Tsqlite3_int64) {
var priorLimit Tsqlite3_int64
var rc int32
_, _ = priorLimit, rc
rc = Xsqlite3_initialize(tls)
if rc != 0 {
return int64(-int32(1))
}
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
priorLimit = _mem0.FhardLimit
if n >= 0 {
_mem0.FhardLimit = n
if n < _mem0.FalarmThreshold || _mem0.FalarmThreshold == 0 {
_mem0.FalarmThreshold = n
}
}
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
return priorLimit
}
// C documentation
//
// /*
// ** Initialize the memory allocation subsystem.
// */
func _sqlite3MallocInit(tls *libc.TLS) (r int32) {
var rc int32
_ = rc
if _sqlite3Config.Fm.FxMalloc == uintptr(0) {
_sqlite3MemSetDefault(tls)
}
_mem0.Fmutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MEM))
if _sqlite3Config.FpPage == uintptr(0) || _sqlite3Config.FszPage < int32(512) || _sqlite3Config.FnPage <= 0 {
_sqlite3Config.FpPage = uintptr(0)
_sqlite3Config.FszPage = 0
}
rc = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxInit})))(tls, _sqlite3Config.Fm.FpAppData)
if rc != SQLITE_OK {
libc.Xmemset(tls, uintptr(unsafe.Pointer(&_mem0)), 0, uint64(32))
}
return rc
}
// C documentation
//
// /*
// ** Return true if the heap is currently under memory pressure - in other
// ** words if the amount of heap used is close to the limit set by
// ** sqlite3_soft_heap_limit().
// */
func _sqlite3HeapNearlyFull(tls *libc.TLS) (r int32) {
return *(*int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_mem0)) + 24))
}
// C documentation
//
// /*
// ** Deinitialize the memory allocation subsystem.
// */
func _sqlite3MallocEnd(tls *libc.TLS) {
if _sqlite3Config.Fm.FxShutdown != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxShutdown})))(tls, _sqlite3Config.Fm.FpAppData)
}
libc.Xmemset(tls, uintptr(unsafe.Pointer(&_mem0)), 0, uint64(32))
}
// C documentation
//
// /*
// ** Return the amount of memory currently checked out.
// */
func Xsqlite3_memory_used(tls *libc.TLS) (r Tsqlite3_int64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* mx at bp+8 */ Tsqlite3_int64
var _ /* res at bp+0 */ Tsqlite3_int64
Xsqlite3_status64(tls, SQLITE_STATUS_MEMORY_USED, bp, bp+8, 0)
return *(*Tsqlite3_int64)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Return the maximum amount of memory that has ever been
// ** checked out since either the beginning of this process
// ** or since the most recent reset.
// */
func Xsqlite3_memory_highwater(tls *libc.TLS, resetFlag int32) (r Tsqlite3_int64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* mx at bp+8 */ Tsqlite3_int64
var _ /* res at bp+0 */ Tsqlite3_int64
Xsqlite3_status64(tls, SQLITE_STATUS_MEMORY_USED, bp, bp+8, resetFlag)
return *(*Tsqlite3_int64)(unsafe.Pointer(bp + 8))
}
// C documentation
//
// /*
// ** Trigger the alarm
// */
func _sqlite3MallocAlarm(tls *libc.TLS, nByte int32) {
if _mem0.FalarmThreshold <= 0 {
return
}
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
Xsqlite3_release_memory(tls, nByte)
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
}
// C documentation
//
// /*
// ** Do a memory allocation with statistics and alarms. Assume the
// ** lock is already held.
// */
func _mallocWithAlarm(tls *libc.TLS, n int32, pp uintptr) {
var nFull int32
var nUsed Tsqlite3_int64
var p uintptr
_, _, _ = nFull, nUsed, p
/* In Firefox (circa 2017-02-08), xRoundup() is remapped to an internal
** implementation of malloc_good_size(), which must be called in debug
** mode and specifically when the DMD "Dark Matter Detector" is enabled
** or else a crash results. Hence, do not attempt to optimize out the
** following xRoundup() call. */
nFull = (*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRoundup})))(tls, n)
_sqlite3StatusHighwater(tls, int32(SQLITE_STATUS_MALLOC_SIZE), n)
if _mem0.FalarmThreshold > 0 {
nUsed = _sqlite3StatusValue(tls, SQLITE_STATUS_MEMORY_USED)
if nUsed >= _mem0.FalarmThreshold-int64(nFull) {
*(*int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_mem0)) + 24)) = int32(1)
_sqlite3MallocAlarm(tls, nFull)
if _mem0.FhardLimit != 0 {
nUsed = _sqlite3StatusValue(tls, SQLITE_STATUS_MEMORY_USED)
if nUsed >= _mem0.FhardLimit-int64(nFull) {
*(*uintptr)(unsafe.Pointer(pp)) = uintptr(0)
return
}
}
} else {
*(*int32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_mem0)) + 24)) = 0
}
}
p = (*(*func(*libc.TLS, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxMalloc})))(tls, nFull)
if p == uintptr(0) && _mem0.FalarmThreshold > 0 {
_sqlite3MallocAlarm(tls, nFull)
p = (*(*func(*libc.TLS, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxMalloc})))(tls, nFull)
}
if p != 0 {
nFull = _sqlite3MallocSize(tls, p)
_sqlite3StatusUp(tls, SQLITE_STATUS_MEMORY_USED, nFull)
_sqlite3StatusUp(tls, int32(SQLITE_STATUS_MALLOC_COUNT), int32(1))
}
*(*uintptr)(unsafe.Pointer(pp)) = p
}
/*
** Maximum size of any single memory allocation.
**
** This is not a limit on the total amount of memory used. This is
** a limit on the size parameter to sqlite3_malloc() and sqlite3_realloc().
**
** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391
** This provides a 256-byte safety margin for defense against 32-bit
** signed integer overflow bugs when computing memory allocation sizes.
** Paranoid applications might want to reduce the maximum allocation size
** further for an even larger safety margin. 0x3fffffff or 0x0fffffff
** or even smaller would be reasonable upper bounds on the size of a memory
** allocations for most applications.
*/
// C documentation
//
// /*
// ** Allocate memory. This routine is like sqlite3_malloc() except that it
// ** assumes the memory subsystem has already been initialized.
// */
func _sqlite3Malloc(tls *libc.TLS, n Tu64) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* p at bp+0 */ uintptr
if n == uint64(0) || n > uint64(SQLITE_MAX_ALLOCATION_SIZE) {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
if _sqlite3Config.FbMemstat != 0 {
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
_mallocWithAlarm(tls, int32(n), bp)
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
} else {
*(*uintptr)(unsafe.Pointer(bp)) = (*(*func(*libc.TLS, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxMalloc})))(tls, int32(n))
}
}
/* IMP: R-11148-40995 */
return *(*uintptr)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** This version of the memory allocation is for use by the application.
// ** First make sure the memory subsystem is initialized, then do the
// ** allocation.
// */
func Xsqlite3_malloc(tls *libc.TLS, n int32) (r uintptr) {
var v1 uintptr
_ = v1
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
if n <= 0 {
v1 = uintptr(0)
} else {
v1 = _sqlite3Malloc(tls, uint64(n))
}
return v1
}
func Xsqlite3_malloc64(tls *libc.TLS, n Tsqlite3_uint64) (r uintptr) {
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
return _sqlite3Malloc(tls, n)
}
// C documentation
//
// /*
// ** TRUE if p is a lookaside memory allocation from db
// */
func _isLookaside(tls *libc.TLS, db uintptr, p uintptr) (r int32) {
return libc.BoolInt32(uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart) && uint64(p) < uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpTrueEnd))
}
// C documentation
//
// /*
// ** Return the size of a memory allocation previously obtained from
// ** sqlite3Malloc() or sqlite3_malloc().
// */
func _sqlite3MallocSize(tls *libc.TLS, p uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxSize})))(tls, p)
}
func _lookasideMallocSize(tls *libc.TLS, db uintptr, p uintptr) (r int32) {
var v1 int32
_ = v1
if p < (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpMiddle {
v1 = int32((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FszTrue)
} else {
v1 = int32(LOOKASIDE_SMALL)
}
return v1
}
func _sqlite3DbMallocSize(tls *libc.TLS, db uintptr, p uintptr) (r int32) {
if db != 0 {
if uint64(p) < uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpTrueEnd) {
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpMiddle) {
return int32(LOOKASIDE_SMALL)
}
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart) {
return int32((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FszTrue)
}
}
}
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxSize})))(tls, p)
}
func Xsqlite3_msize(tls *libc.TLS, p uintptr) (r Tsqlite3_uint64) {
var v1 int32
_ = v1
if p != 0 {
v1 = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxSize})))(tls, p)
} else {
v1 = 0
}
return uint64(v1)
}
// C documentation
//
// /*
// ** Free memory previously obtained from sqlite3Malloc().
// */
func Xsqlite3_free(tls *libc.TLS, p uintptr) {
if p == uintptr(0) {
return
} /* IMP: R-49053-54554 */
if _sqlite3Config.FbMemstat != 0 {
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
_sqlite3StatusDown(tls, SQLITE_STATUS_MEMORY_USED, _sqlite3MallocSize(tls, p))
_sqlite3StatusDown(tls, int32(SQLITE_STATUS_MALLOC_COUNT), int32(1))
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxFree})))(tls, p)
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
} else {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxFree})))(tls, p)
}
}
// C documentation
//
// /*
// ** Add the size of memory allocation "p" to the count in
// ** *db->pnBytesFreed.
// */
func _measureAllocationSize(tls *libc.TLS, db uintptr, p uintptr) {
*(*int32)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed)) += _sqlite3DbMallocSize(tls, db, p)
}
// C documentation
//
// /*
// ** Free memory that might be associated with a particular database
// ** connection. Calling sqlite3DbFree(D,X) for X==0 is a harmless no-op.
// ** The sqlite3DbFreeNN(D,X) version requires that X be non-NULL.
// */
func _sqlite3DbFreeNN(tls *libc.TLS, db uintptr, p uintptr) {
var pBuf, pBuf1 uintptr
_, _ = pBuf, pBuf1
if db != 0 {
if uint64(p) < uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd) {
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpMiddle) {
pBuf = p
(*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree = pBuf
return
}
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart) {
pBuf1 = p
(*TLookasideSlot)(unsafe.Pointer(pBuf1)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree = pBuf1
return
}
}
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed != 0 {
_measureAllocationSize(tls, db, p)
return
}
}
Xsqlite3_free(tls, p)
}
func _sqlite3DbNNFreeNN(tls *libc.TLS, db uintptr, p uintptr) {
var pBuf, pBuf1 uintptr
_, _ = pBuf, pBuf1
if uint64(p) < uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd) {
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpMiddle) {
pBuf = p
(*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree = pBuf
return
}
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart) {
pBuf1 = p
(*TLookasideSlot)(unsafe.Pointer(pBuf1)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree = pBuf1
return
}
}
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed != 0 {
_measureAllocationSize(tls, db, p)
return
}
Xsqlite3_free(tls, p)
}
func _sqlite3DbFree(tls *libc.TLS, db uintptr, p uintptr) {
if p != 0 {
_sqlite3DbFreeNN(tls, db, p)
}
}
// C documentation
//
// /*
// ** Change the size of an existing memory allocation
// */
func _sqlite3Realloc(tls *libc.TLS, pOld uintptr, nBytes Tu64) (r uintptr) {
var nDiff, nNew, nOld int32
var nUsed, v1 Tsqlite3_int64
var pNew uintptr
var v2 bool
_, _, _, _, _, _, _ = nDiff, nNew, nOld, nUsed, pNew, v1, v2
if pOld == uintptr(0) {
return _sqlite3Malloc(tls, nBytes) /* IMP: R-04300-56712 */
}
if nBytes == uint64(0) {
Xsqlite3_free(tls, pOld) /* IMP: R-26507-47431 */
return uintptr(0)
}
if nBytes >= uint64(0x7fffff00) {
/* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */
return uintptr(0)
}
nOld = _sqlite3MallocSize(tls, pOld)
/* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second
** argument to xRealloc is always a value returned by a prior call to
** xRoundup. */
nNew = (*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRoundup})))(tls, int32(nBytes))
if nOld == nNew {
pNew = pOld
} else {
if _sqlite3Config.FbMemstat != 0 {
Xsqlite3_mutex_enter(tls, _mem0.Fmutex)
_sqlite3StatusHighwater(tls, int32(SQLITE_STATUS_MALLOC_SIZE), int32(nBytes))
nDiff = nNew - nOld
if v2 = nDiff > 0; v2 {
v1 = _sqlite3StatusValue(tls, SQLITE_STATUS_MEMORY_USED)
nUsed = v1
}
if v2 && v1 >= _mem0.FalarmThreshold-int64(nDiff) {
_sqlite3MallocAlarm(tls, nDiff)
if _mem0.FhardLimit > 0 && nUsed >= _mem0.FhardLimit-int64(nDiff) {
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
return uintptr(0)
}
}
pNew = (*(*func(*libc.TLS, uintptr, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRealloc})))(tls, pOld, nNew)
if pNew == uintptr(0) && _mem0.FalarmThreshold > 0 {
_sqlite3MallocAlarm(tls, int32(nBytes))
pNew = (*(*func(*libc.TLS, uintptr, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRealloc})))(tls, pOld, nNew)
}
if pNew != 0 {
nNew = _sqlite3MallocSize(tls, pNew)
_sqlite3StatusUp(tls, SQLITE_STATUS_MEMORY_USED, nNew-nOld)
}
Xsqlite3_mutex_leave(tls, _mem0.Fmutex)
} else {
pNew = (*(*func(*libc.TLS, uintptr, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fm.FxRealloc})))(tls, pOld, nNew)
}
}
/* IMP: R-11148-40995 */
return pNew
}
// C documentation
//
// /*
// ** The public interface to sqlite3Realloc. Make sure that the memory
// ** subsystem is initialized prior to invoking sqliteRealloc.
// */
func Xsqlite3_realloc(tls *libc.TLS, pOld uintptr, n int32) (r uintptr) {
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
if n < 0 {
n = 0
} /* IMP: R-26507-47431 */
return _sqlite3Realloc(tls, pOld, uint64(n))
}
func Xsqlite3_realloc64(tls *libc.TLS, pOld uintptr, n Tsqlite3_uint64) (r uintptr) {
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
return _sqlite3Realloc(tls, pOld, n)
}
// C documentation
//
// /*
// ** Allocate and zero memory.
// */
func _sqlite3MallocZero(tls *libc.TLS, n Tu64) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3Malloc(tls, n)
if p != 0 {
libc.Xmemset(tls, p, 0, n)
}
return p
}
// C documentation
//
// /*
// ** Allocate and zero memory. If the allocation fails, make
// ** the mallocFailed flag in the connection pointer.
// */
func _sqlite3DbMallocZero(tls *libc.TLS, db uintptr, n Tu64) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3DbMallocRaw(tls, db, n)
if p != 0 {
libc.Xmemset(tls, p, 0, n)
}
return p
}
// C documentation
//
// /* Finish the work of sqlite3DbMallocRawNN for the unusual and
// ** slower case when the allocation cannot be fulfilled using lookaside.
// */
func _dbMallocRawFinish(tls *libc.TLS, db uintptr, n Tu64) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3Malloc(tls, n)
if !(p != 0) {
_sqlite3OomFault(tls, db)
}
return p
}
// C documentation
//
// /*
// ** Allocate memory, either lookaside (if possible) or heap.
// ** If the allocation fails, set the mallocFailed flag in
// ** the connection pointer.
// **
// ** If db!=0 and db->mallocFailed is true (indicating a prior malloc
// ** failure on the same database connection) then always return 0.
// ** Hence for a particular database connection, once malloc starts
// ** failing, it fails consistently until mallocFailed is reset.
// ** This is an important assumption. There are many places in the
// ** code that do things like this:
// **
// ** int *a = (int*)sqlite3DbMallocRaw(db, 100);
// ** int *b = (int*)sqlite3DbMallocRaw(db, 200);
// ** if( b ) a[10] = 9;
// **
// ** In other words, if a subsequent malloc (ex: "b") worked, it is assumed
// ** that all prior mallocs (ex: "a") worked too.
// **
// ** The sqlite3MallocRawNN() variant guarantees that the "db" parameter is
// ** not a NULL pointer.
// */
func _sqlite3DbMallocRaw(tls *libc.TLS, db uintptr, n Tu64) (r uintptr) {
var p uintptr
_ = p
if db != 0 {
return _sqlite3DbMallocRawNN(tls, db, n)
}
p = _sqlite3Malloc(tls, n)
return p
}
func _sqlite3DbMallocRawNN(tls *libc.TLS, db uintptr, n Tu64) (r uintptr) {
var pBuf, v1, v2, v3, v4 uintptr
_, _, _, _, _ = pBuf, v1, v2, v3, v4
if n > uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.Fsz) {
if !((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FbDisable != 0) {
*(*Tu32)(unsafe.Pointer(db + 440 + 16 + 1*4))++
} else {
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
return uintptr(0)
}
}
return _dbMallocRawFinish(tls, db, n)
}
if n <= uint64(LOOKASIDE_SMALL) {
v1 = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree
pBuf = v1
if v1 != uintptr(0) {
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallFree = (*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext
*(*Tu32)(unsafe.Pointer(db + 440 + 16))++
return pBuf
} else {
v2 = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallInit
pBuf = v2
if v2 != uintptr(0) {
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpSmallInit = (*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext
*(*Tu32)(unsafe.Pointer(db + 440 + 16))++
return pBuf
}
}
}
v3 = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree
pBuf = v3
if v3 != uintptr(0) {
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpFree = (*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext
*(*Tu32)(unsafe.Pointer(db + 440 + 16))++
return pBuf
} else {
v4 = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpInit
pBuf = v4
if v4 != uintptr(0) {
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpInit = (*TLookasideSlot)(unsafe.Pointer(pBuf)).FpNext
*(*Tu32)(unsafe.Pointer(db + 440 + 16))++
return pBuf
} else {
*(*Tu32)(unsafe.Pointer(db + 440 + 16 + 2*4))++
}
}
return _dbMallocRawFinish(tls, db, n)
}
// C documentation
//
// /*
// ** Resize the block of memory pointed to by p to n bytes. If the
// ** resize fails, set the mallocFailed flag in the connection object.
// */
func _sqlite3DbRealloc(tls *libc.TLS, db uintptr, p uintptr, n Tu64) (r uintptr) {
if p == uintptr(0) {
return _sqlite3DbMallocRawNN(tls, db, n)
}
if uint64(p) < uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd) {
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpMiddle) {
if n <= uint64(LOOKASIDE_SMALL) {
return p
}
} else {
if uint64(p) >= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart) {
if n <= uint64((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FszTrue) {
return p
}
}
}
}
return _dbReallocFinish(tls, db, p, n)
}
func _dbReallocFinish(tls *libc.TLS, db uintptr, p uintptr, n Tu64) (r uintptr) {
var pNew uintptr
_ = pNew
pNew = uintptr(0)
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 {
if _isLookaside(tls, db, p) != 0 {
pNew = _sqlite3DbMallocRawNN(tls, db, n)
if pNew != 0 {
libc.Xmemcpy(tls, pNew, p, uint64(_lookasideMallocSize(tls, db, p)))
_sqlite3DbFree(tls, db, p)
}
} else {
pNew = _sqlite3Realloc(tls, p, n)
if !(pNew != 0) {
_sqlite3OomFault(tls, db)
}
}
}
return pNew
}
// C documentation
//
// /*
// ** Attempt to reallocate p. If the reallocation fails, then free p
// ** and set the mallocFailed flag in the database connection.
// */
func _sqlite3DbReallocOrFree(tls *libc.TLS, db uintptr, p uintptr, n Tu64) (r uintptr) {
var pNew uintptr
_ = pNew
pNew = _sqlite3DbRealloc(tls, db, p, n)
if !(pNew != 0) {
_sqlite3DbFree(tls, db, p)
}
return pNew
}
// C documentation
//
// /*
// ** Make a copy of a string in memory obtained from sqliteMalloc(). These
// ** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This
// ** is because when memory debugging is turned on, these two functions are
// ** called via macros that record the current file and line number in the
// ** ThreadData structure.
// */
func _sqlite3DbStrDup(tls *libc.TLS, db uintptr, z uintptr) (r uintptr) {
var n Tsize_t
var zNew uintptr
_, _ = n, zNew
if z == uintptr(0) {
return uintptr(0)
}
n = libc.Xstrlen(tls, z) + uint64(1)
zNew = _sqlite3DbMallocRaw(tls, db, n)
if zNew != 0 {
libc.Xmemcpy(tls, zNew, z, n)
}
return zNew
}
func _sqlite3DbStrNDup(tls *libc.TLS, db uintptr, z uintptr, n Tu64) (r uintptr) {
var zNew, v1 uintptr
_, _ = zNew, v1
if z != 0 {
v1 = _sqlite3DbMallocRawNN(tls, db, n+uint64(1))
} else {
v1 = uintptr(0)
}
zNew = v1
if zNew != 0 {
libc.Xmemcpy(tls, zNew, z, n)
*(*int8)(unsafe.Pointer(zNew + uintptr(n))) = 0
}
return zNew
}
// C documentation
//
// /*
// ** The text between zStart and zEnd represents a phrase within a larger
// ** SQL statement. Make a copy of this phrase in space obtained form
// ** sqlite3DbMalloc(). Omit leading and trailing whitespace.
// */
func _sqlite3DbSpanDup(tls *libc.TLS, db uintptr, zStart uintptr, zEnd uintptr) (r uintptr) {
var n int32
_ = n
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zStart)))])&int32(0x01) != 0 {
zStart++
}
n = int32(int64(zEnd) - int64(zStart))
for int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zStart + uintptr(n-int32(1)))))])&int32(0x01) != 0 {
n--
}
return _sqlite3DbStrNDup(tls, db, zStart, uint64(n))
}
// C documentation
//
// /*
// ** Free any prior content in *pz and replace it with a copy of zNew.
// */
func _sqlite3SetString(tls *libc.TLS, pz uintptr, db uintptr, zNew uintptr) {
var z uintptr
_ = z
z = _sqlite3DbStrDup(tls, db, zNew)
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(pz)))
*(*uintptr)(unsafe.Pointer(pz)) = z
}
// C documentation
//
// /*
// ** Call this routine to record the fact that an OOM (out-of-memory) error
// ** has happened. This routine will set db->mallocFailed, and also
// ** temporarily disable the lookaside memory allocator and interrupt
// ** any running VDBEs.
// **
// ** Always return a NULL pointer so that this routine can be invoked using
// **
// ** return sqlite3OomFault(db);
// **
// ** and thereby avoid unnecessary stack frame allocations for the overwhelmingly
// ** common case where no OOM occurs.
// */
func _sqlite3OomFault(tls *libc.TLS, db uintptr) (r uintptr) {
var pParse uintptr
_ = pParse
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 && int32((*Tsqlite3)(unsafe.Pointer(db)).FbBenignMalloc) == 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed = uint8(1)
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec > 0 {
libc.AtomicStorePInt32(db+432, libc.Int32FromInt32(1))
}
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FbDisable++
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.Fsz = uint16(0)
if (*Tsqlite3)(unsafe.Pointer(db)).FpParse != 0 {
_sqlite3ErrorMsg(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpParse, __ccgo_ts+1620, 0)
(*TParse)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FpParse)).Frc = int32(SQLITE_NOMEM)
pParse = (*TParse)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FpParse)).FpOuterParse
for {
if !(pParse != 0) {
break
}
(*TParse)(unsafe.Pointer(pParse)).FnErr++
(*TParse)(unsafe.Pointer(pParse)).Frc = int32(SQLITE_NOMEM)
goto _1
_1:
;
pParse = (*TParse)(unsafe.Pointer(pParse)).FpOuterParse
}
}
}
return uintptr(0)
}
// C documentation
//
// /*
// ** This routine reactivates the memory allocator and clears the
// ** db->mallocFailed flag as necessary.
// **
// ** The memory allocator is not restarted if there are running
// ** VDBEs.
// */
func _sqlite3OomClear(tls *libc.TLS, db uintptr) {
var v1 int32
_ = v1
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 && (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec == 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed = uint8(0)
libc.AtomicStorePInt32(db+432, libc.Int32FromInt32(0))
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FbDisable--
if (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FbDisable != 0 {
v1 = 0
} else {
v1 = int32((*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FszTrue)
}
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.Fsz = uint16(v1)
}
}
// C documentation
//
// /*
// ** Take actions at the end of an API call to deal with error codes.
// */
func _apiHandleError(tls *libc.TLS, db uintptr, rc int32) (r int32) {
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 || rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(12)<mallocFailed
** is unsafe, as is the call to sqlite3Error().
*/
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 || rc != 0 {
return _apiHandleError(tls, db, rc)
}
return 0
}
/************** End of malloc.c **********************************************/
/************** Begin file printf.c ******************************************/
/*
** The "printf" code that follows dates from the 1980's. It is in
** the public domain.
**
**************************************************************************
**
** This file contains code for a set of "printf"-like routines. These
** routines format strings much like the printf() from the standard C
** library, though the implementation here has enhancements to support
** SQLite.
*/
/* #include "sqliteInt.h" */
/*
** Conversion types fall into various categories as defined by the
** following enumeration.
*/
/* The rest are extensions, not normally found in printf() */
// C documentation
//
// /*
// ** An "etByte" is an 8-bit unsigned value.
// */
type TetByte = uint8
type etByte = TetByte
// C documentation
//
// /*
// ** Each builtin conversion character (ex: the 'd' in "%d") is described
// ** by an instance of the following structure
// */
type Tet_info = struct {
Ffmttype int8
Fbase TetByte
Fflags TetByte
Ftype1 TetByte
Fcharset TetByte
Fprefix TetByte
}
type et_info = Tet_info
/*
** Allowed values for et_info.flags
*/
// C documentation
//
// /*
// ** The following table is searched linearly, so it is good to put the
// ** most frequently used conversion types first.
// */
var _aDigits = [33]int8{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
var _aPrefix = [7]int8{'-', 'x', '0', 0, 'X', '0'}
var _fmtinfo = [23]Tet_info{
0: {
Ffmttype: int8('d'),
Fbase: uint8(10),
Fflags: uint8(1),
Ftype1: uint8(etDECIMAL),
},
1: {
Ffmttype: int8('s'),
Fflags: uint8(4),
Ftype1: uint8(etSTRING),
},
2: {
Ffmttype: int8('g'),
Fflags: uint8(1),
Ftype1: uint8(etGENERIC),
Fcharset: uint8(30),
},
3: {
Ffmttype: int8('z'),
Fflags: uint8(4),
Ftype1: uint8(etDYNSTRING),
},
4: {
Ffmttype: int8('q'),
Fflags: uint8(4),
Ftype1: uint8(etSQLESCAPE),
},
5: {
Ffmttype: int8('Q'),
Fflags: uint8(4),
Ftype1: uint8(etSQLESCAPE2),
},
6: {
Ffmttype: int8('w'),
Fflags: uint8(4),
Ftype1: uint8(etSQLESCAPE3),
},
7: {
Ffmttype: int8('c'),
Ftype1: uint8(etCHARX),
},
8: {
Ffmttype: int8('o'),
Fbase: uint8(8),
Fprefix: uint8(2),
},
9: {
Ffmttype: int8('u'),
Fbase: uint8(10),
Ftype1: uint8(etDECIMAL),
},
10: {
Ffmttype: int8('x'),
Fbase: uint8(16),
Fcharset: uint8(16),
Fprefix: uint8(1),
},
11: {
Ffmttype: int8('X'),
Fbase: uint8(16),
Fprefix: uint8(4),
},
12: {
Ffmttype: int8('f'),
Fflags: uint8(1),
Ftype1: uint8(etFLOAT),
},
13: {
Ffmttype: int8('e'),
Fflags: uint8(1),
Ftype1: uint8(etEXP),
Fcharset: uint8(30),
},
14: {
Ffmttype: int8('E'),
Fflags: uint8(1),
Ftype1: uint8(etEXP),
Fcharset: uint8(14),
},
15: {
Ffmttype: int8('G'),
Fflags: uint8(1),
Ftype1: uint8(etGENERIC),
Fcharset: uint8(14),
},
16: {
Ffmttype: int8('i'),
Fbase: uint8(10),
Fflags: uint8(1),
Ftype1: uint8(etDECIMAL),
},
17: {
Ffmttype: int8('n'),
Ftype1: uint8(etSIZE),
},
18: {
Ffmttype: int8('%'),
Ftype1: uint8(etPERCENT),
},
19: {
Ffmttype: int8('p'),
Fbase: uint8(16),
Ftype1: uint8(etPOINTER),
Fprefix: uint8(1),
},
20: {
Ffmttype: int8('T'),
Ftype1: uint8(etTOKEN),
},
21: {
Ffmttype: int8('S'),
Ftype1: uint8(etSRCITEM),
},
22: {
Ffmttype: int8('r'),
Fbase: uint8(10),
Fflags: uint8(1),
Ftype1: uint8(etORDINAL),
},
}
/* Notes:
**
** %S Takes a pointer to SrcItem. Shows name or database.name
** %!S Like %S but prefer the zName over the zAlias
*/
// C documentation
//
// /*
// ** Set the StrAccum object to an error mode.
// */
func _sqlite3StrAccumSetError(tls *libc.TLS, p uintptr, eError Tu8) {
(*TStrAccum)(unsafe.Pointer(p)).FaccError = eError
if (*TStrAccum)(unsafe.Pointer(p)).FmxAlloc != 0 {
Xsqlite3_str_reset(tls, p)
}
if int32(eError) == int32(SQLITE_TOOBIG) {
_sqlite3ErrorToParser(tls, (*TStrAccum)(unsafe.Pointer(p)).Fdb, int32(eError))
}
}
// C documentation
//
// /*
// ** Extra argument values from a PrintfArguments object
// */
func _getIntArg(tls *libc.TLS, p uintptr) (r Tsqlite3_int64) {
var v1 int32
var v2 uintptr
_, _ = v1, v2
if (*TPrintfArguments)(unsafe.Pointer(p)).FnArg <= (*TPrintfArguments)(unsafe.Pointer(p)).FnUsed {
return 0
}
v2 = p + 4
v1 = *(*int32)(unsafe.Pointer(v2))
*(*int32)(unsafe.Pointer(v2))++
return Xsqlite3_value_int64(tls, *(*uintptr)(unsafe.Pointer((*TPrintfArguments)(unsafe.Pointer(p)).FapArg + uintptr(v1)*8)))
}
func _getDoubleArg(tls *libc.TLS, p uintptr) (r float64) {
var v1 int32
var v2 uintptr
_, _ = v1, v2
if (*TPrintfArguments)(unsafe.Pointer(p)).FnArg <= (*TPrintfArguments)(unsafe.Pointer(p)).FnUsed {
return float64(0)
}
v2 = p + 4
v1 = *(*int32)(unsafe.Pointer(v2))
*(*int32)(unsafe.Pointer(v2))++
return Xsqlite3_value_double(tls, *(*uintptr)(unsafe.Pointer((*TPrintfArguments)(unsafe.Pointer(p)).FapArg + uintptr(v1)*8)))
}
func _getTextArg(tls *libc.TLS, p uintptr) (r uintptr) {
var v1 int32
var v2 uintptr
_, _ = v1, v2
if (*TPrintfArguments)(unsafe.Pointer(p)).FnArg <= (*TPrintfArguments)(unsafe.Pointer(p)).FnUsed {
return uintptr(0)
}
v2 = p + 4
v1 = *(*int32)(unsafe.Pointer(v2))
*(*int32)(unsafe.Pointer(v2))++
return Xsqlite3_value_text(tls, *(*uintptr)(unsafe.Pointer((*TPrintfArguments)(unsafe.Pointer(p)).FapArg + uintptr(v1)*8)))
}
// C documentation
//
// /*
// ** Allocate memory for a temporary buffer needed for printf rendering.
// **
// ** If the requested size of the temp buffer is larger than the size
// ** of the output buffer in pAccum, then cause an SQLITE_TOOBIG error.
// ** Do the size check before the memory allocation to prevent rogue
// ** SQL from requesting large allocations using the precision or width
// ** field of the printf() function.
// */
func _printfTempBuf(tls *libc.TLS, pAccum uintptr, n Tsqlite3_int64) (r uintptr) {
var z uintptr
_ = z
if (*Tsqlite3_str)(unsafe.Pointer(pAccum)).FaccError != 0 {
return uintptr(0)
}
if n > int64((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnAlloc) && n > int64((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FmxAlloc) {
_sqlite3StrAccumSetError(tls, pAccum, uint8(SQLITE_TOOBIG))
return uintptr(0)
}
z = _sqlite3DbMallocRaw(tls, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).Fdb, uint64(n))
if z == uintptr(0) {
_sqlite3StrAccumSetError(tls, pAccum, uint8(SQLITE_NOMEM))
}
return z
}
/*
** On machines with a small stack size, you can redefine the
** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired.
*/
/*
** Hard limit on the precision of floating-point conversions.
*/
// C documentation
//
// /*
// ** Render a string given by "fmt" into the StrAccum object.
// */
func Xsqlite3_str_vappendf(tls *libc.TLS, pAccum uintptr, fmt uintptr, ap Tva_list) {
bp := tls.Alloc(144)
defer tls.Free(144)
var bArgList, base Tu8
var bufpt, cset, escarg, infop, pArgList, pExpr, pItem, pSel, pToken, pre, z, zExtra, zOut, v103, v11, v14, v15, v17, v20, v21, v23, v24, v4, v45, v46, v47, v48, v49, v52, v55, v57, v59, v61, v62, v64, v67, v68, v70, v72, v75, v76, v77, v78, v79, v80, v81, v82, v83, v89, v91, v94, v98, p92 uintptr
var c, e2, exp, i, iRound, idx, ii, isnull, ix, j, length, nOut, nPad, needQuote, nn, precision, width, x, v10, v110, v111, v16, v18, v19, v2, v22, v3, v51, v56, v58, v60, v65, v66, v71, v73, v74, v85, v86, v87, v88, v90, v93, v96, v97 int32
var cThousand, done, flag_alternateform, flag_altform2, flag_dp, flag_leftjustify, flag_long, flag_prefix, flag_rtz, flag_zeropad, xtype, v5, v6, v7, v8, v9 TetByte
var ch, px, wx uint32
var ch1, prefix, q, x1, v101, v107, v54 int8
var i1, j1, k, n1, nCopyBytes, nPrior, szBufNeeded, v, v100, v104, v106, v108, v109 Ti64
var longvalue Tsqlite_uint64
var n Tu64
var realvalue float64
var v102, v12 bool
var _ /* buf at bp+0 */ [70]int8
var _ /* s at bp+72 */ TFpDecode
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = bArgList, base, bufpt, c, cThousand, ch, ch1, cset, done, e2, escarg, exp, flag_alternateform, flag_altform2, flag_dp, flag_leftjustify, flag_long, flag_prefix, flag_rtz, flag_zeropad, i, i1, iRound, idx, ii, infop, isnull, ix, j, j1, k, length, longvalue, n, n1, nCopyBytes, nOut, nPad, nPrior, needQuote, nn, pArgList, pExpr, pItem, pSel, pToken, pre, precision, prefix, px, q, realvalue, szBufNeeded, v, width, wx, x, x1, xtype, z, zExtra, zOut, v10, v100, v101, v102, v103, v104, v106, v107, v108, v109, v11, v110, v111, v12, v14, v15, v16, v17, v18, v19, v2, v20, v21, v22, v23, v24, v3, v4, v45, v46, v47, v48, v49, v5, v51, v52, v54, v55, v56, v57, v58, v59, v6, v60, v61, v62, v64, v65, v66, v67, v68, v7, v70, v71, v72, v73, v74, v75, v76, v77, v78, v79, v8, v80, v81, v82, v83, v85, v86, v87, v88, v89, v9, v90, v91, v93, v94, v96, v97, v98, p92 /* Thousands separator for %d and %u */
xtype = uint8(etINVALID) /* Size of the rendering buffer */
zExtra = uintptr(0) /* True if trailing zeros should be removed */
pArgList = uintptr(0) /* Conversion buffer */
/* pAccum never starts out with an empty buffer that was obtained from
** malloc(). This precondition is required by the mprintf("%z...")
** optimization. */
bufpt = uintptr(0)
if int32((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FprintfFlags)&int32(SQLITE_PRINTF_SQLFUNC) != 0 {
pArgList = libc.VaUintptr(&ap)
bArgList = uint8(1)
} else {
bArgList = uint8(0)
}
for {
v2 = int32(*(*int8)(unsafe.Pointer(fmt)))
c = v2
if !(v2 != 0) {
break
}
if c != int32('%') {
bufpt = fmt
for cond := true; cond; cond = *(*int8)(unsafe.Pointer(fmt)) != 0 && int32(*(*int8)(unsafe.Pointer(fmt))) != int32('%') {
fmt++
}
Xsqlite3_str_append(tls, pAccum, bufpt, int32(int64(fmt)-int64(bufpt)))
if int32(*(*int8)(unsafe.Pointer(fmt))) == 0 {
break
}
}
fmt++
v4 = fmt
v3 = int32(*(*int8)(unsafe.Pointer(v4)))
c = v3
if v3 == 0 {
Xsqlite3_str_append(tls, pAccum, __ccgo_ts+1634, int32(1))
break
}
/* Find out what flags are present */
v9 = libc.Uint8FromInt32(0)
flag_zeropad = v9
v8 = v9
flag_altform2 = v8
v7 = v8
flag_alternateform = v7
v6 = v7
cThousand = v6
v5 = v6
flag_prefix = v5
flag_leftjustify = v5
done = uint8(0)
width = 0
flag_long = uint8(0)
precision = -int32(1)
for {
switch c {
case int32('-'):
flag_leftjustify = uint8(1)
case int32('+'):
flag_prefix = uint8('+')
case int32(' '):
flag_prefix = uint8(' ')
case int32('#'):
flag_alternateform = uint8(1)
case int32('!'):
flag_altform2 = uint8(1)
case int32('0'):
flag_zeropad = uint8(1)
case int32(','):
cThousand = uint8(',')
default:
done = uint8(1)
case int32('l'):
flag_long = uint8(1)
fmt++
v14 = fmt
c = int32(*(*int8)(unsafe.Pointer(v14)))
if c == int32('l') {
fmt++
v15 = fmt
c = int32(*(*int8)(unsafe.Pointer(v15)))
flag_long = uint8(2)
}
done = uint8(1)
case int32('1'):
fallthrough
case int32('2'):
fallthrough
case int32('3'):
fallthrough
case int32('4'):
fallthrough
case int32('5'):
fallthrough
case int32('6'):
fallthrough
case int32('7'):
fallthrough
case int32('8'):
fallthrough
case int32('9'):
wx = uint32(c - int32('0'))
for {
fmt++
v17 = fmt
v16 = int32(*(*int8)(unsafe.Pointer(v17)))
c = v16
if !(v16 >= int32('0') && c <= int32('9')) {
break
}
wx = wx*uint32(10) + uint32(c) - uint32('0')
}
width = int32(wx & uint32(0x7fffffff))
if c != int32('.') && c != int32('l') {
done = uint8(1)
} else {
fmt--
}
case int32('*'):
if bArgList != 0 {
width = int32(_getIntArg(tls, pArgList))
} else {
width = libc.VaInt32(&ap)
}
if width < 0 {
flag_leftjustify = uint8(1)
if width >= -int32(2147483647) {
v18 = -width
} else {
v18 = 0
}
width = v18
}
v19 = int32(*(*int8)(unsafe.Pointer(fmt + 1)))
c = v19
if v19 != int32('.') && c != int32('l') {
fmt++
v20 = fmt
c = int32(*(*int8)(unsafe.Pointer(v20)))
done = uint8(1)
}
case int32('.'):
fmt++
v21 = fmt
c = int32(*(*int8)(unsafe.Pointer(v21)))
if c == int32('*') {
if bArgList != 0 {
precision = int32(_getIntArg(tls, pArgList))
} else {
precision = libc.VaInt32(&ap)
}
if precision < 0 {
if precision >= -int32(2147483647) {
v22 = -precision
} else {
v22 = -int32(1)
}
precision = v22
}
fmt++
v23 = fmt
c = int32(*(*int8)(unsafe.Pointer(v23)))
} else {
px = uint32(0)
for c >= int32('0') && c <= int32('9') {
px = px*uint32(10) + uint32(c) - uint32('0')
fmt++
v24 = fmt
c = int32(*(*int8)(unsafe.Pointer(v24)))
}
precision = int32(px & uint32(0x7fffffff))
}
if c == int32('l') {
fmt--
} else {
done = uint8(1)
}
break
}
goto _13
_13:
;
if v12 = !(done != 0); v12 {
fmt++
v11 = fmt
v10 = int32(*(*int8)(unsafe.Pointer(v11)))
c = v10
}
if !(v12 && v10 != 0) {
break
}
}
/* Fetch the info entry for the field */
infop = uintptr(unsafe.Pointer(&_fmtinfo))
xtype = uint8(etINVALID)
idx = 0
for {
if !(idx < int32(libc.Uint64FromInt64(138)/libc.Uint64FromInt64(6))) {
break
}
if c == int32(_fmtinfo[idx].Ffmttype) {
infop = uintptr(unsafe.Pointer(&_fmtinfo)) + uintptr(idx)*6
xtype = (*Tet_info)(unsafe.Pointer(infop)).Ftype1
break
}
goto _25
_25:
;
idx++
}
/*
** At this point, variables are initialized as follows:
**
** flag_alternateform TRUE if a '#' is present.
** flag_altform2 TRUE if a '!' is present.
** flag_prefix '+' or ' ' or zero
** flag_leftjustify TRUE if a '-' is present or if the
** field width was negative.
** flag_zeropad TRUE if the width began with 0.
** flag_long 1 for "l", 2 for "ll"
** width The specified field width. This is
** always non-negative. Zero is the default.
** precision The specified precision. The default
** is -1.
** xtype The class of the conversion.
** infop Pointer to the appropriate info struct.
*/
switch int32(xtype) {
case int32(etPOINTER):
goto _26
case etRADIX:
goto _27
case int32(etORDINAL):
goto _28
case int32(etDECIMAL):
goto _29
case int32(etGENERIC):
goto _30
case int32(etEXP):
goto _31
case int32(etFLOAT):
goto _32
case int32(etSIZE):
goto _33
case int32(etPERCENT):
goto _34
case int32(etCHARX):
goto _35
case int32(etDYNSTRING):
goto _36
case int32(etSTRING):
goto _37
case int32(etSQLESCAPE3):
goto _38
case int32(etSQLESCAPE2):
goto _39
case int32(etSQLESCAPE):
goto _40
case int32(etTOKEN):
goto _41
case int32(etSRCITEM):
goto _42
default:
goto _43
}
goto _44
_26:
;
flag_long = uint8(2)
_28:
;
_27:
;
cThousand = uint8(0)
_29:
;
if int32((*Tet_info)(unsafe.Pointer(infop)).Fflags)&int32(FLAG_SIGNED) != 0 {
if bArgList != 0 {
v = _getIntArg(tls, pArgList)
} else {
if flag_long != 0 {
if int32(flag_long) == int32(2) {
v = libc.VaInt64(&ap)
} else {
v = libc.VaInt64(&ap)
}
} else {
v = int64(libc.VaInt32(&ap))
}
}
if v < 0 {
longvalue = uint64(^v)
longvalue++
prefix = int8('-')
} else {
longvalue = uint64(v)
prefix = int8(flag_prefix)
}
} else {
if bArgList != 0 {
longvalue = uint64(_getIntArg(tls, pArgList))
} else {
if flag_long != 0 {
if int32(flag_long) == int32(2) {
longvalue = libc.VaUint64(&ap)
} else {
longvalue = libc.VaUint64(&ap)
}
} else {
longvalue = uint64(libc.VaUint32(&ap))
}
}
prefix = 0
}
if longvalue == uint64(0) {
flag_alternateform = uint8(0)
}
if flag_zeropad != 0 && precision < width-libc.BoolInt32(int32(prefix) != 0) {
precision = width - libc.BoolInt32(int32(prefix) != 0)
}
if precision < libc.Int32FromInt32(SQLITE_PRINT_BUF_SIZE)-libc.Int32FromInt32(10)-libc.Int32FromInt32(SQLITE_PRINT_BUF_SIZE)/libc.Int32FromInt32(3) {
nOut = int32(SQLITE_PRINT_BUF_SIZE)
zOut = bp
} else {
n = uint64(precision) + uint64(10)
if cThousand != 0 {
n += uint64(precision / int32(3))
}
v45 = _printfTempBuf(tls, pAccum, int64(n))
zExtra = v45
zOut = v45
if zOut == uintptr(0) {
return
}
nOut = int32(n)
}
bufpt = zOut + uintptr(nOut-int32(1))
if int32(xtype) == int32(etORDINAL) {
x = int32(longvalue % libc.Uint64FromInt32(10))
if x >= int32(4) || longvalue/uint64(10)%uint64(10) == uint64(1) {
x = 0
}
bufpt--
v46 = bufpt
*(*int8)(unsafe.Pointer(v46)) = _zOrd[x*int32(2)+int32(1)]
bufpt--
v47 = bufpt
*(*int8)(unsafe.Pointer(v47)) = _zOrd[x*int32(2)]
}
cset = uintptr(unsafe.Pointer(&_aDigits)) + uintptr((*Tet_info)(unsafe.Pointer(infop)).Fcharset)
base = (*Tet_info)(unsafe.Pointer(infop)).Fbase
for cond := true; cond; cond = longvalue > uint64(0) { /* Convert to ascii */
bufpt--
v48 = bufpt
*(*int8)(unsafe.Pointer(v48)) = *(*int8)(unsafe.Pointer(cset + uintptr(longvalue%uint64(base))))
longvalue = longvalue / uint64(base)
}
length = int32(t__predefined_ptrdiff_t(zOut+uintptr(nOut-int32(1))) - int64(bufpt))
for precision > length {
bufpt--
v49 = bufpt
*(*int8)(unsafe.Pointer(v49)) = int8('0') /* Zero pad */
length++
}
if cThousand != 0 {
nn = (length - int32(1)) / int32(3) /* Number of "," to insert */
ix = (length-int32(1))%int32(3) + int32(1)
bufpt -= uintptr(nn)
idx = 0
for {
if !(nn > 0) {
break
}
*(*int8)(unsafe.Pointer(bufpt + uintptr(idx))) = *(*int8)(unsafe.Pointer(bufpt + uintptr(idx+nn)))
ix--
if ix == 0 {
idx++
v51 = idx
*(*int8)(unsafe.Pointer(bufpt + uintptr(v51))) = int8(cThousand)
nn--
ix = int32(3)
}
goto _50
_50:
;
idx++
}
}
if prefix != 0 {
bufpt--
v52 = bufpt
*(*int8)(unsafe.Pointer(v52)) = prefix
} /* Add sign */
if flag_alternateform != 0 && (*Tet_info)(unsafe.Pointer(infop)).Fprefix != 0 {
pre = uintptr(unsafe.Pointer(&_aPrefix)) + uintptr((*Tet_info)(unsafe.Pointer(infop)).Fprefix)
for {
v54 = *(*int8)(unsafe.Pointer(pre))
x1 = v54
if !(int32(v54) != 0) {
break
}
bufpt--
v55 = bufpt
*(*int8)(unsafe.Pointer(v55)) = x1
goto _53
_53:
;
pre++
}
}
length = int32(t__predefined_ptrdiff_t(zOut+uintptr(nOut-int32(1))) - int64(bufpt))
goto _44
_32:
;
_31:
;
_30:
;
if bArgList != 0 {
realvalue = _getDoubleArg(tls, pArgList)
} else {
realvalue = libc.VaFloat64(&ap)
}
if precision < 0 {
precision = int32(6)
} /* Set default precision */
if precision > int32(SQLITE_FP_PRECISION_LIMIT) {
precision = int32(SQLITE_FP_PRECISION_LIMIT)
}
if int32(xtype) == int32(etFLOAT) {
iRound = -precision
} else {
if int32(xtype) == int32(etGENERIC) {
if precision == 0 {
precision = int32(1)
}
iRound = precision
} else {
iRound = precision + int32(1)
}
}
if flag_altform2 != 0 {
v56 = int32(26)
} else {
v56 = int32(16)
}
_sqlite3FpDecode(tls, bp+72, realvalue, iRound, v56)
if (*(*TFpDecode)(unsafe.Pointer(bp + 72))).FisSpecial != 0 {
if int32((*(*TFpDecode)(unsafe.Pointer(bp + 72))).FisSpecial) == int32(2) {
if flag_zeropad != 0 {
v57 = __ccgo_ts + 1636
} else {
v57 = __ccgo_ts + 1641
}
bufpt = v57
length = _sqlite3Strlen30(tls, bufpt)
goto _44
} else {
if flag_zeropad != 0 {
*(*int8)(unsafe.Pointer((*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fz)) = int8('9')
(*(*TFpDecode)(unsafe.Pointer(bp + 72))).FiDP = int32(1000)
(*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fn = int32(1)
} else {
libc.Xmemcpy(tls, bp, __ccgo_ts+1645, uint64(5))
bufpt = bp
if int32((*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fsign) == int32('-') {
/* no-op */
} else {
if flag_prefix != 0 {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(flag_prefix)
} else {
bufpt++
}
}
length = _sqlite3Strlen30(tls, bufpt)
goto _44
}
}
}
if int32((*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fsign) == int32('-') {
prefix = int8('-')
} else {
prefix = int8(flag_prefix)
}
exp = (*(*TFpDecode)(unsafe.Pointer(bp + 72))).FiDP - int32(1)
if int32(xtype) == int32(etGENERIC) && precision > 0 {
precision--
}
/*
** If the field type is etGENERIC, then convert to either etEXP
** or etFLOAT, as appropriate.
*/
if int32(xtype) == int32(etGENERIC) {
flag_rtz = libc.BoolUint8(!(flag_alternateform != 0))
if exp < -int32(4) || exp > precision {
xtype = uint8(etEXP)
} else {
precision = precision - exp
xtype = uint8(etFLOAT)
}
} else {
flag_rtz = flag_altform2
}
if int32(xtype) == int32(etEXP) {
e2 = 0
} else {
e2 = (*(*TFpDecode)(unsafe.Pointer(bp + 72))).FiDP - int32(1)
}
bufpt = bp
/* Size of a temporary buffer needed */
if e2 > 0 {
v58 = e2
} else {
v58 = 0
}
szBufNeeded = int64(v58) + int64(precision) + int64(width) + int64(15)
if cThousand != 0 && e2 > 0 {
szBufNeeded += int64((e2 + int32(2)) / int32(3))
}
if szBufNeeded > int64(SQLITE_PRINT_BUF_SIZE) {
v59 = _printfTempBuf(tls, pAccum, szBufNeeded)
zExtra = v59
bufpt = v59
if bufpt == uintptr(0) {
return
}
}
zOut = bufpt
if precision > 0 {
v60 = int32(1)
} else {
v60 = 0
}
flag_dp = uint8(v60 | int32(flag_alternateform) | int32(flag_altform2))
/* The sign in front of the number */
if prefix != 0 {
v61 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v61)) = prefix
}
/* Digits prior to the decimal point */
j = 0
if e2 < 0 {
v62 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v62)) = int8('0')
} else {
for {
if !(e2 >= 0) {
break
}
v64 = bufpt
bufpt++
if j < (*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fn {
v66 = j
j++
v65 = int32(*(*int8)(unsafe.Pointer((*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fz + uintptr(v66))))
} else {
v65 = int32('0')
}
*(*int8)(unsafe.Pointer(v64)) = int8(v65)
if cThousand != 0 && e2%int32(3) == 0 && e2 > int32(1) {
v67 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v67)) = int8(',')
}
goto _63
_63:
;
e2--
}
}
/* The decimal point */
if flag_dp != 0 {
v68 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v68)) = int8('.')
}
/* "0" digits after the decimal point but before the first
** significant digit of the number */
e2++
for {
if !(e2 < 0 && precision > 0) {
break
}
v70 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v70)) = int8('0')
goto _69
_69:
;
precision--
e2++
}
/* Significant digits after the decimal point */
for {
v71 = precision
precision--
if !(v71 > 0) {
break
}
v72 = bufpt
bufpt++
if j < (*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fn {
v74 = j
j++
v73 = int32(*(*int8)(unsafe.Pointer((*(*TFpDecode)(unsafe.Pointer(bp + 72))).Fz + uintptr(v74))))
} else {
v73 = int32('0')
}
*(*int8)(unsafe.Pointer(v72)) = int8(v73)
}
/* Remove trailing zeros and the "." if no digits follow the "." */
if flag_rtz != 0 && flag_dp != 0 {
for int32(*(*int8)(unsafe.Pointer(bufpt + uintptr(-libc.Int32FromInt32(1))))) == int32('0') {
bufpt--
v75 = bufpt
*(*int8)(unsafe.Pointer(v75)) = 0
}
if int32(*(*int8)(unsafe.Pointer(bufpt + uintptr(-libc.Int32FromInt32(1))))) == int32('.') {
if flag_altform2 != 0 {
v76 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v76)) = int8('0')
} else {
bufpt--
v77 = bufpt
*(*int8)(unsafe.Pointer(v77)) = 0
}
}
}
/* Add the "eNNN" suffix */
if int32(xtype) == int32(etEXP) {
exp = (*(*TFpDecode)(unsafe.Pointer(bp + 72))).FiDP - int32(1)
v78 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v78)) = _aDigits[(*Tet_info)(unsafe.Pointer(infop)).Fcharset]
if exp < 0 {
v79 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v79)) = int8('-')
exp = -exp
} else {
v80 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v80)) = int8('+')
}
if exp >= int32(100) {
v81 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v81)) = int8(exp/libc.Int32FromInt32(100) + libc.Int32FromUint8('0')) /* 100's digit */
exp %= int32(100)
}
v82 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v82)) = int8(exp/libc.Int32FromInt32(10) + libc.Int32FromUint8('0')) /* 10's digit */
v83 = bufpt
bufpt++
*(*int8)(unsafe.Pointer(v83)) = int8(exp%libc.Int32FromInt32(10) + libc.Int32FromUint8('0')) /* 1's digit */
}
*(*int8)(unsafe.Pointer(bufpt)) = 0
/* The converted number is in buf[] and zero terminated. Output it.
** Note that the number is in the usual order, not reversed as with
** integer conversions. */
length = int32(int64(bufpt) - int64(zOut))
bufpt = zOut
/* Special case: Add leading zeros if the flag_zeropad flag is
** set and we are not left justified */
if flag_zeropad != 0 && !(flag_leftjustify != 0) && length < width {
nPad = width - length
i = width
for {
if !(i >= nPad) {
break
}
*(*int8)(unsafe.Pointer(bufpt + uintptr(i))) = *(*int8)(unsafe.Pointer(bufpt + uintptr(i-nPad)))
goto _84
_84:
;
i--
}
i = libc.BoolInt32(int32(prefix) != 0)
for {
v85 = nPad
nPad--
if !(v85 != 0) {
break
}
v86 = i
i++
*(*int8)(unsafe.Pointer(bufpt + uintptr(v86))) = int8('0')
}
length = width
}
goto _44
_33:
;
if !(bArgList != 0) {
*(*int32)(unsafe.Pointer(libc.VaUintptr(&ap))) = int32((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnChar)
}
v87 = libc.Int32FromInt32(0)
width = v87
length = v87
goto _44
_34:
;
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8('%')
bufpt = bp
length = int32(1)
goto _44
_35:
;
if bArgList != 0 {
bufpt = _getTextArg(tls, pArgList)
length = int32(1)
if bufpt != 0 {
v89 = bufpt
bufpt++
v88 = int32(*(*int8)(unsafe.Pointer(v89)))
c = v88
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(v88)
if c&int32(0xc0) == int32(0xc0) {
for length < int32(4) && int32(*(*int8)(unsafe.Pointer(bufpt)))&int32(0xc0) == int32(0x80) {
v90 = length
length++
v91 = bufpt
bufpt++
(*(*[70]int8)(unsafe.Pointer(bp)))[v90] = *(*int8)(unsafe.Pointer(v91))
}
}
} else {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = 0
}
} else {
ch = libc.VaUint32(&ap)
if ch < uint32(0x00080) {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(ch & uint32(0xff))
length = int32(1)
} else {
if ch < uint32(0x00800) {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(int32(0xc0) + int32(uint8(ch>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x1f))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(1)] = int8(int32(0x80) + int32(uint8(ch&libc.Uint32FromInt32(0x3f))))
length = int32(2)
} else {
if ch < uint32(0x10000) {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(int32(0xe0) + int32(uint8(ch>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x0f))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(1)] = int8(int32(0x80) + int32(uint8(ch>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3f))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(2)] = int8(int32(0x80) + int32(uint8(ch&libc.Uint32FromInt32(0x3f))))
length = int32(3)
} else {
(*(*[70]int8)(unsafe.Pointer(bp)))[0] = int8(int32(0xf0) + int32(uint8(ch>>libc.Int32FromInt32(18)&libc.Uint32FromInt32(0x07))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(1)] = int8(int32(0x80) + int32(uint8(ch>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x3f))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(2)] = int8(int32(0x80) + int32(uint8(ch>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3f))))
(*(*[70]int8)(unsafe.Pointer(bp)))[int32(3)] = int8(int32(0x80) + int32(uint8(ch&libc.Uint32FromInt32(0x3f))))
length = int32(4)
}
}
}
}
if precision > int32(1) {
nPrior = int64(1)
width -= precision - int32(1)
if width > int32(1) && !(flag_leftjustify != 0) {
Xsqlite3_str_appendchar(tls, pAccum, width-int32(1), int8(' '))
width = 0
}
Xsqlite3_str_append(tls, pAccum, bp, length)
precision--
for precision > int32(1) {
if nPrior > int64(precision-int32(1)) {
nPrior = int64(precision - int32(1))
}
nCopyBytes = int64(length) * nPrior
if nCopyBytes+int64((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnChar) >= int64((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnAlloc) {
_sqlite3StrAccumEnlarge(tls, pAccum, nCopyBytes)
}
if (*Tsqlite3_str)(unsafe.Pointer(pAccum)).FaccError != 0 {
break
}
Xsqlite3_str_append(tls, pAccum, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).FzText+uintptr(int64((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnChar)-nCopyBytes), int32(nCopyBytes))
precision = int32(int64(precision) - nPrior)
nPrior *= int64(2)
}
}
bufpt = bp
flag_altform2 = uint8(1)
goto adjust_width_for_utf8
_37:
;
_36:
;
if bArgList != 0 {
bufpt = _getTextArg(tls, pArgList)
xtype = uint8(etSTRING)
} else {
bufpt = libc.VaUintptr(&ap)
}
if bufpt == uintptr(0) {
bufpt = __ccgo_ts + 1650
} else {
if int32(xtype) == int32(etDYNSTRING) {
if (*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnChar == uint32(0) && (*Tsqlite3_str)(unsafe.Pointer(pAccum)).FmxAlloc != 0 && width == 0 && precision < 0 && int32((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FaccError) == 0 {
/* Special optimization for sqlite3_mprintf("%z..."):
** Extend an existing memory allocation rather than creating
** a new one. */
(*Tsqlite3_str)(unsafe.Pointer(pAccum)).FzText = bufpt
(*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnAlloc = uint32(_sqlite3DbMallocSize(tls, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).Fdb, bufpt))
(*Tsqlite3_str)(unsafe.Pointer(pAccum)).FnChar = uint32(int32(0x7fffffff) & int32(libc.Xstrlen(tls, bufpt)))
p92 = pAccum + 29
*(*Tu8)(unsafe.Pointer(p92)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p92))) | libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED))
length = 0
goto _44
}
zExtra = bufpt
}
}
if precision >= 0 {
if flag_altform2 != 0 {
/* Set length to the number of bytes needed in order to display
** precision characters */
z = bufpt
for {
v93 = precision
precision--
if !(v93 > 0 && *(*uint8)(unsafe.Pointer(z)) != 0) {
break
}
v94 = z
z++
if int32(*(*uint8)(unsafe.Pointer(v94))) >= int32(0xc0) {
for int32(*(*uint8)(unsafe.Pointer(z)))&int32(0xc0) == int32(0x80) {
z++
}
}
}
length = int32(int64(z) - int64(bufpt))
} else {
length = 0
for {
if !(length < precision && *(*int8)(unsafe.Pointer(bufpt + uintptr(length))) != 0) {
break
}
goto _95
_95:
;
length++
}
}
} else {
length = int32(0x7fffffff) & int32(libc.Xstrlen(tls, bufpt))
}
goto adjust_width_for_utf8
adjust_width_for_utf8:
;
if flag_altform2 != 0 && width > 0 {
/* Adjust width to account for extra bytes in UTF-8 characters */
ii = length - int32(1)
for ii >= 0 {
v96 = ii
ii--
if int32(*(*int8)(unsafe.Pointer(bufpt + uintptr(v96))))&int32(0xc0) == int32(0x80) {
width++
}
}
}
goto _44
_40:
; /* %q: Escape ' characters */
_39:
; /* %Q: Escape ' and enclose in '...' */
_38:
;
if int32(xtype) == int32(etSQLESCAPE3) {
v97 = int32('"')
} else {
v97 = int32('\'')
}
q = int8(v97)
if bArgList != 0 {
escarg = _getTextArg(tls, pArgList)
} else {
escarg = libc.VaUintptr(&ap)
}
isnull = libc.BoolInt32(escarg == uintptr(0))
if isnull != 0 {
if int32(xtype) == int32(etSQLESCAPE2) {
v98 = __ccgo_ts + 1651
} else {
v98 = __ccgo_ts + 1656
}
escarg = v98
}
/* For %q, %Q, and %w, the precision is the number of bytes (or
** characters if the ! flags is present) to use from the input.
** Because of the extra quoting characters inserted, the number
** of output characters may be larger than the precision.
*/
k = int64(precision)
v100 = libc.Int64FromInt32(0)
n1 = v100
i1 = v100
for {
if v102 = k != 0; v102 {
v101 = *(*int8)(unsafe.Pointer(escarg + uintptr(i1)))
ch1 = v101
}
if !(v102 && int32(v101) != 0) {
break
}
if int32(ch1) == int32(q) {
n1++
}
if flag_altform2 != 0 && int32(ch1)&int32(0xc0) == int32(0xc0) {
for int32(*(*int8)(unsafe.Pointer(escarg + uintptr(i1+int64(1)))))&int32(0xc0) == int32(0x80) {
i1++
}
}
goto _99
_99:
;
i1++
k--
}
needQuote = libc.BoolInt32(!(isnull != 0) && int32(xtype) == int32(etSQLESCAPE2))
n1 += i1 + int64(3)
if n1 > int64(SQLITE_PRINT_BUF_SIZE) {
v103 = _printfTempBuf(tls, pAccum, n1)
zExtra = v103
bufpt = v103
if bufpt == uintptr(0) {
return
}
} else {
bufpt = bp
}
j1 = 0
if needQuote != 0 {
v104 = j1
j1++
*(*int8)(unsafe.Pointer(bufpt + uintptr(v104))) = q
}
k = i1
i1 = 0
for {
if !(i1 < k) {
break
}
v106 = j1
j1++
v107 = *(*int8)(unsafe.Pointer(escarg + uintptr(i1)))
ch1 = v107
*(*int8)(unsafe.Pointer(bufpt + uintptr(v106))) = v107
if int32(ch1) == int32(q) {
v108 = j1
j1++
*(*int8)(unsafe.Pointer(bufpt + uintptr(v108))) = ch1
}
goto _105
_105:
;
i1++
}
if needQuote != 0 {
v109 = j1
j1++
*(*int8)(unsafe.Pointer(bufpt + uintptr(v109))) = q
}
*(*int8)(unsafe.Pointer(bufpt + uintptr(j1))) = 0
length = int32(j1)
goto adjust_width_for_utf8
_41:
;
if int32((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FprintfFlags)&int32(SQLITE_PRINTF_INTERNAL) == 0 {
return
}
if flag_alternateform != 0 {
/* %#T means an Expr pointer that uses Expr.u.zToken */
pExpr = libc.VaUintptr(&ap)
if pExpr != 0 && !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_IntValue)) != libc.Uint32FromInt32(0)) {
Xsqlite3_str_appendall(tls, pAccum, *(*uintptr)(unsafe.Pointer(pExpr + 8)))
_sqlite3RecordErrorOffsetOfExpr(tls, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).Fdb, pExpr)
}
} else {
/* %T means a Token pointer */
pToken = libc.VaUintptr(&ap)
if pToken != 0 && (*TToken)(unsafe.Pointer(pToken)).Fn != 0 {
Xsqlite3_str_append(tls, pAccum, (*TToken)(unsafe.Pointer(pToken)).Fz, int32((*TToken)(unsafe.Pointer(pToken)).Fn))
_sqlite3RecordErrorByteOffset(tls, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).Fdb, (*TToken)(unsafe.Pointer(pToken)).Fz)
}
}
v110 = libc.Int32FromInt32(0)
width = v110
length = v110
goto _44
_42:
;
if int32((*Tsqlite3_str)(unsafe.Pointer(pAccum)).FprintfFlags)&int32(SQLITE_PRINTF_INTERNAL) == 0 {
return
}
pItem = libc.VaUintptr(&ap)
if (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias != 0 && !(flag_altform2 != 0) {
Xsqlite3_str_appendall(tls, pAccum, (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias)
} else {
if (*TSrcItem)(unsafe.Pointer(pItem)).FzName != 0 {
if (*TSrcItem)(unsafe.Pointer(pItem)).FzDatabase != 0 {
Xsqlite3_str_appendall(tls, pAccum, (*TSrcItem)(unsafe.Pointer(pItem)).FzDatabase)
Xsqlite3_str_append(tls, pAccum, __ccgo_ts+1663, int32(1))
}
Xsqlite3_str_appendall(tls, pAccum, (*TSrcItem)(unsafe.Pointer(pItem)).FzName)
} else {
if (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias != 0 {
Xsqlite3_str_appendall(tls, pAccum, (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias)
} else {
pSel = (*TSrcItem)(unsafe.Pointer(pItem)).FpSelect
if (*TSelect)(unsafe.Pointer(pSel)).FselFlags&uint32(SF_NestedFrom) != 0 {
Xsqlite3_str_appendf(tls, pAccum, __ccgo_ts+1665, libc.VaList(bp+128, (*TSelect)(unsafe.Pointer(pSel)).FselId))
} else {
Xsqlite3_str_appendf(tls, pAccum, __ccgo_ts+1675, libc.VaList(bp+128, (*TSelect)(unsafe.Pointer(pSel)).FselId))
}
}
}
}
v111 = libc.Int32FromInt32(0)
width = v111
length = v111
goto _44
_43:
;
return
_44:
; /* End switch over the format type */
/*
** The text of the conversion is pointed to by "bufpt" and is
** "length" characters long. The field width is "width". Do
** the output. Both length and width are in bytes, not characters,
** at this point. If the "!" flag was present on string conversions
** indicating that width and precision should be expressed in characters,
** then the values have been translated prior to reaching this point.
*/
width -= length
if width > 0 {
if !(flag_leftjustify != 0) {
Xsqlite3_str_appendchar(tls, pAccum, width, int8(' '))
}
Xsqlite3_str_append(tls, pAccum, bufpt, length)
if flag_leftjustify != 0 {
Xsqlite3_str_appendchar(tls, pAccum, width, int8(' '))
}
} else {
Xsqlite3_str_append(tls, pAccum, bufpt, length)
}
if zExtra != 0 {
_sqlite3DbFree(tls, (*Tsqlite3_str)(unsafe.Pointer(pAccum)).Fdb, zExtra)
zExtra = uintptr(0)
}
goto _1
_1:
;
fmt++
} /* End for loop over the format string */
}
var _zOrd = [9]int8{'t', 'h', 's', 't', 'n', 'd', 'r', 'd'}
/* End of function */
// C documentation
//
// /*
// ** The z string points to the first character of a token that is
// ** associated with an error. If db does not already have an error
// ** byte offset recorded, try to compute the error byte offset for
// ** z and set the error byte offset in db.
// */
func _sqlite3RecordErrorByteOffset(tls *libc.TLS, db uintptr, z uintptr) {
var pParse, zEnd, zText uintptr
_, _, _ = pParse, zEnd, zText
if db == uintptr(0) {
return
}
if (*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset != -int32(2) {
return
}
pParse = (*Tsqlite3)(unsafe.Pointer(db)).FpParse
if pParse == uintptr(0) {
return
}
zText = (*TParse)(unsafe.Pointer(pParse)).FzTail
if zText == uintptr(0) {
return
}
zEnd = zText + uintptr(libc.Xstrlen(tls, zText))
if uint64(z) >= uint64(zText) && uint64(z) < uint64(zEnd) {
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = int32(int64(z) - int64(zText))
}
}
// C documentation
//
// /*
// ** If pExpr has a byte offset for the start of a token, record that as
// ** as the error offset.
// */
func _sqlite3RecordErrorOffsetOfExpr(tls *libc.TLS, db uintptr, pExpr uintptr) {
for pExpr != 0 && ((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_InnerON)) != uint32(0) || *(*int32)(unsafe.Pointer(pExpr + 52)) <= 0) {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
}
if pExpr == uintptr(0) {
return
}
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = *(*int32)(unsafe.Pointer(pExpr + 52))
}
// C documentation
//
// /*
// ** Enlarge the memory allocation on a StrAccum object so that it is
// ** able to accept at least N more bytes of text.
// **
// ** Return the number of bytes of text that StrAccum is able to accept
// ** after the attempted enlargement. The value returned might be zero.
// */
func _sqlite3StrAccumEnlarge(tls *libc.TLS, p uintptr, N Ti64) (r int32) {
var szNew Ti64
var zNew, zOld, v1, p2 uintptr
_, _, _, _, _ = szNew, zNew, zOld, v1, p2
/* Only called if really needed */
if (*TStrAccum)(unsafe.Pointer(p)).FaccError != 0 {
return 0
}
if (*TStrAccum)(unsafe.Pointer(p)).FmxAlloc == uint32(0) {
_sqlite3StrAccumSetError(tls, p, uint8(SQLITE_TOOBIG))
return int32((*TStrAccum)(unsafe.Pointer(p)).FnAlloc - (*TStrAccum)(unsafe.Pointer(p)).FnChar - uint32(1))
} else {
if int32((*TStrAccum)(unsafe.Pointer(p)).FprintfFlags)&int32(SQLITE_PRINTF_MALLOCED) != 0 {
v1 = (*TStrAccum)(unsafe.Pointer(p)).FzText
} else {
v1 = uintptr(0)
}
zOld = v1
szNew = int64((*TStrAccum)(unsafe.Pointer(p)).FnChar) + N + int64(1)
if szNew+int64((*TStrAccum)(unsafe.Pointer(p)).FnChar) <= int64((*TStrAccum)(unsafe.Pointer(p)).FmxAlloc) {
/* Force exponential buffer size growth as long as it does not overflow,
** to avoid having to call this routine too often */
szNew += int64((*TStrAccum)(unsafe.Pointer(p)).FnChar)
}
if szNew > int64((*TStrAccum)(unsafe.Pointer(p)).FmxAlloc) {
Xsqlite3_str_reset(tls, p)
_sqlite3StrAccumSetError(tls, p, uint8(SQLITE_TOOBIG))
return 0
} else {
(*TStrAccum)(unsafe.Pointer(p)).FnAlloc = uint32(int32(szNew))
}
if (*TStrAccum)(unsafe.Pointer(p)).Fdb != 0 {
zNew = _sqlite3DbRealloc(tls, (*TStrAccum)(unsafe.Pointer(p)).Fdb, zOld, uint64((*TStrAccum)(unsafe.Pointer(p)).FnAlloc))
} else {
zNew = _sqlite3Realloc(tls, zOld, uint64((*TStrAccum)(unsafe.Pointer(p)).FnAlloc))
}
if zNew != 0 {
if !(int32((*TStrAccum)(unsafe.Pointer(p)).FprintfFlags)&libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED) != libc.Int32FromInt32(0)) && (*TStrAccum)(unsafe.Pointer(p)).FnChar > uint32(0) {
libc.Xmemcpy(tls, zNew, (*TStrAccum)(unsafe.Pointer(p)).FzText, uint64((*TStrAccum)(unsafe.Pointer(p)).FnChar))
}
(*TStrAccum)(unsafe.Pointer(p)).FzText = zNew
(*TStrAccum)(unsafe.Pointer(p)).FnAlloc = uint32(_sqlite3DbMallocSize(tls, (*TStrAccum)(unsafe.Pointer(p)).Fdb, zNew))
p2 = p + 29
*(*Tu8)(unsafe.Pointer(p2)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p2))) | libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED))
} else {
Xsqlite3_str_reset(tls, p)
_sqlite3StrAccumSetError(tls, p, uint8(SQLITE_NOMEM))
return 0
}
}
return int32(N)
}
// C documentation
//
// /*
// ** Append N copies of character c to the given string buffer.
// */
func Xsqlite3_str_appendchar(tls *libc.TLS, p uintptr, N int32, c int8) {
var v1, v3 int32
var v2 bool
var v4 Tu32
var v5 uintptr
_, _, _, _, _ = v1, v2, v3, v4, v5
if v2 = int64((*Tsqlite3_str)(unsafe.Pointer(p)).FnChar)+int64(N) >= int64((*Tsqlite3_str)(unsafe.Pointer(p)).FnAlloc); v2 {
v1 = _sqlite3StrAccumEnlarge(tls, p, int64(N))
N = v1
}
if v2 && v1 <= 0 {
return
}
for {
v3 = N
N--
if !(v3 > 0) {
break
}
v5 = p + 24
v4 = *(*Tu32)(unsafe.Pointer(v5))
*(*Tu32)(unsafe.Pointer(v5))++
*(*int8)(unsafe.Pointer((*Tsqlite3_str)(unsafe.Pointer(p)).FzText + uintptr(v4))) = c
}
}
// C documentation
//
// /*
// ** The StrAccum "p" is not large enough to accept N new bytes of z[].
// ** So enlarge if first, then do the append.
// **
// ** This is a helper routine to sqlite3_str_append() that does special-case
// ** work (enlarging the buffer) using tail recursion, so that the
// ** sqlite3_str_append() routine can use fast calling semantics.
// */
func _enlargeAndAppend(tls *libc.TLS, p uintptr, z uintptr, N int32) {
N = _sqlite3StrAccumEnlarge(tls, p, int64(N))
if N > 0 {
libc.Xmemcpy(tls, (*TStrAccum)(unsafe.Pointer(p)).FzText+uintptr((*TStrAccum)(unsafe.Pointer(p)).FnChar), z, uint64(N))
*(*Tu32)(unsafe.Pointer(p + 24)) += uint32(N)
}
}
// C documentation
//
// /*
// ** Append N bytes of text from z to the StrAccum object. Increase the
// ** size of the memory allocation for StrAccum if necessary.
// */
func Xsqlite3_str_append(tls *libc.TLS, p uintptr, z uintptr, N int32) {
if (*Tsqlite3_str)(unsafe.Pointer(p)).FnChar+uint32(N) >= (*Tsqlite3_str)(unsafe.Pointer(p)).FnAlloc {
_enlargeAndAppend(tls, p, z, N)
} else {
if N != 0 {
*(*Tu32)(unsafe.Pointer(p + 24)) += uint32(N)
libc.Xmemcpy(tls, (*Tsqlite3_str)(unsafe.Pointer(p)).FzText+uintptr((*Tsqlite3_str)(unsafe.Pointer(p)).FnChar-uint32(N)), z, uint64(N))
}
}
}
// C documentation
//
// /*
// ** Append the complete text of zero-terminated string z[] to the p string.
// */
func Xsqlite3_str_appendall(tls *libc.TLS, p uintptr, z uintptr) {
Xsqlite3_str_append(tls, p, z, _sqlite3Strlen30(tls, z))
}
// C documentation
//
// /*
// ** Finish off a string by making sure it is zero-terminated.
// ** Return a pointer to the resulting string. Return a NULL
// ** pointer if any kind of error was encountered.
// */
func _strAccumFinishRealloc(tls *libc.TLS, p uintptr) (r uintptr) {
var zText, p1 uintptr
_, _ = zText, p1
zText = _sqlite3DbMallocRaw(tls, (*TStrAccum)(unsafe.Pointer(p)).Fdb, uint64((*TStrAccum)(unsafe.Pointer(p)).FnChar+uint32(1)))
if zText != 0 {
libc.Xmemcpy(tls, zText, (*TStrAccum)(unsafe.Pointer(p)).FzText, uint64((*TStrAccum)(unsafe.Pointer(p)).FnChar+uint32(1)))
p1 = p + 29
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED))
} else {
_sqlite3StrAccumSetError(tls, p, uint8(SQLITE_NOMEM))
}
(*TStrAccum)(unsafe.Pointer(p)).FzText = zText
return zText
}
func _sqlite3StrAccumFinish(tls *libc.TLS, p uintptr) (r uintptr) {
if (*TStrAccum)(unsafe.Pointer(p)).FzText != 0 {
*(*int8)(unsafe.Pointer((*TStrAccum)(unsafe.Pointer(p)).FzText + uintptr((*TStrAccum)(unsafe.Pointer(p)).FnChar))) = 0
if (*TStrAccum)(unsafe.Pointer(p)).FmxAlloc > uint32(0) && !(int32((*TStrAccum)(unsafe.Pointer(p)).FprintfFlags)&libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED) != libc.Int32FromInt32(0)) {
return _strAccumFinishRealloc(tls, p)
}
}
return (*TStrAccum)(unsafe.Pointer(p)).FzText
}
// C documentation
//
// /*
// ** Use the content of the StrAccum passed as the second argument
// ** as the result of an SQL function.
// */
func _sqlite3ResultStrAccum(tls *libc.TLS, pCtx uintptr, p uintptr) {
if (*TStrAccum)(unsafe.Pointer(p)).FaccError != 0 {
Xsqlite3_result_error_code(tls, pCtx, int32((*TStrAccum)(unsafe.Pointer(p)).FaccError))
Xsqlite3_str_reset(tls, p)
} else {
if int32((*TStrAccum)(unsafe.Pointer(p)).FprintfFlags)&int32(SQLITE_PRINTF_MALLOCED) != 0 {
Xsqlite3_result_text(tls, pCtx, (*TStrAccum)(unsafe.Pointer(p)).FzText, int32((*TStrAccum)(unsafe.Pointer(p)).FnChar), __ccgo_fp(_sqlite3OomClear))
} else {
Xsqlite3_result_text(tls, pCtx, __ccgo_ts+1650, 0, libc.UintptrFromInt32(0))
Xsqlite3_str_reset(tls, p)
}
}
}
// C documentation
//
// /*
// ** This singleton is an sqlite3_str object that is returned if
// ** sqlite3_malloc() fails to provide space for a real one. This
// ** sqlite3_str object accepts no new text and always returns
// ** an SQLITE_NOMEM error.
// */
var _sqlite3OomStr = Tsqlite3_str{
FaccError: uint8(SQLITE_NOMEM),
}
// C documentation
//
// /* Finalize a string created using sqlite3_str_new().
// */
func Xsqlite3_str_finish(tls *libc.TLS, p uintptr) (r uintptr) {
var z uintptr
_ = z
if p != uintptr(0) && p != uintptr(unsafe.Pointer(&_sqlite3OomStr)) {
z = _sqlite3StrAccumFinish(tls, p)
Xsqlite3_free(tls, p)
} else {
z = uintptr(0)
}
return z
}
// C documentation
//
// /* Return any error code associated with p */
func Xsqlite3_str_errcode(tls *libc.TLS, p uintptr) (r int32) {
var v1 int32
_ = v1
if p != 0 {
v1 = int32((*Tsqlite3_str)(unsafe.Pointer(p)).FaccError)
} else {
v1 = int32(SQLITE_NOMEM)
}
return v1
}
// C documentation
//
// /* Return the current length of p in bytes */
func Xsqlite3_str_length(tls *libc.TLS, p uintptr) (r int32) {
var v1 uint32
_ = v1
if p != 0 {
v1 = (*Tsqlite3_str)(unsafe.Pointer(p)).FnChar
} else {
v1 = uint32(0)
}
return int32(v1)
}
// C documentation
//
// /* Return the current value for p */
func Xsqlite3_str_value(tls *libc.TLS, p uintptr) (r uintptr) {
if p == uintptr(0) || (*Tsqlite3_str)(unsafe.Pointer(p)).FnChar == uint32(0) {
return uintptr(0)
}
*(*int8)(unsafe.Pointer((*Tsqlite3_str)(unsafe.Pointer(p)).FzText + uintptr((*Tsqlite3_str)(unsafe.Pointer(p)).FnChar))) = 0
return (*Tsqlite3_str)(unsafe.Pointer(p)).FzText
}
// C documentation
//
// /*
// ** Reset an StrAccum string. Reclaim all malloced memory.
// */
func Xsqlite3_str_reset(tls *libc.TLS, p uintptr) {
var p1 uintptr
_ = p1
if int32((*TStrAccum)(unsafe.Pointer(p)).FprintfFlags)&int32(SQLITE_PRINTF_MALLOCED) != 0 {
_sqlite3DbFree(tls, (*TStrAccum)(unsafe.Pointer(p)).Fdb, (*TStrAccum)(unsafe.Pointer(p)).FzText)
p1 = p + 29
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(SQLITE_PRINTF_MALLOCED))
}
(*TStrAccum)(unsafe.Pointer(p)).FnAlloc = uint32(0)
(*TStrAccum)(unsafe.Pointer(p)).FnChar = uint32(0)
(*TStrAccum)(unsafe.Pointer(p)).FzText = uintptr(0)
}
// C documentation
//
// /*
// ** Initialize a string accumulator.
// **
// ** p: The accumulator to be initialized.
// ** db: Pointer to a database connection. May be NULL. Lookaside
// ** memory is used if not NULL. db->mallocFailed is set appropriately
// ** when not NULL.
// ** zBase: An initial buffer. May be NULL in which case the initial buffer
// ** is malloced.
// ** n: Size of zBase in bytes. If total space requirements never exceed
// ** n then no memory allocations ever occur.
// ** mx: Maximum number of bytes to accumulate. If mx==0 then no memory
// ** allocations will ever occur.
// */
func _sqlite3StrAccumInit(tls *libc.TLS, p uintptr, db uintptr, zBase uintptr, n int32, mx int32) {
(*TStrAccum)(unsafe.Pointer(p)).FzText = zBase
(*TStrAccum)(unsafe.Pointer(p)).Fdb = db
(*TStrAccum)(unsafe.Pointer(p)).FnAlloc = uint32(n)
(*TStrAccum)(unsafe.Pointer(p)).FmxAlloc = uint32(mx)
(*TStrAccum)(unsafe.Pointer(p)).FnChar = uint32(0)
(*TStrAccum)(unsafe.Pointer(p)).FaccError = uint8(0)
(*TStrAccum)(unsafe.Pointer(p)).FprintfFlags = uint8(0)
}
// C documentation
//
// /* Allocate and initialize a new dynamic string object */
func Xsqlite3_str_new(tls *libc.TLS, db uintptr) (r uintptr) {
var p uintptr
var v1 int32
_, _ = p, v1
p = Xsqlite3_malloc64(tls, uint64(32))
if p != 0 {
if db != 0 {
v1 = *(*int32)(unsafe.Pointer(db + 136))
} else {
v1 = int32(SQLITE_MAX_LENGTH)
}
_sqlite3StrAccumInit(tls, p, uintptr(0), uintptr(0), 0, v1)
} else {
p = uintptr(unsafe.Pointer(&_sqlite3OomStr))
}
return p
}
// C documentation
//
// /*
// ** Print into memory obtained from sqliteMalloc(). Use the internal
// ** %-conversion extensions.
// */
func _sqlite3VMPrintf(tls *libc.TLS, db uintptr, zFormat uintptr, ap Tva_list) (r uintptr) {
bp := tls.Alloc(112)
defer tls.Free(112)
var z uintptr
var _ /* acc at bp+72 */ TStrAccum
var _ /* zBase at bp+0 */ [70]int8
_ = z
_sqlite3StrAccumInit(tls, bp+72, db, bp, int32(70), *(*int32)(unsafe.Pointer(db + 136)))
(*(*TStrAccum)(unsafe.Pointer(bp + 72))).FprintfFlags = uint8(SQLITE_PRINTF_INTERNAL)
Xsqlite3_str_vappendf(tls, bp+72, zFormat, ap)
z = _sqlite3StrAccumFinish(tls, bp+72)
if int32((*(*TStrAccum)(unsafe.Pointer(bp + 72))).FaccError) == int32(SQLITE_NOMEM) {
_sqlite3OomFault(tls, db)
}
return z
}
// C documentation
//
// /*
// ** Print into memory obtained from sqliteMalloc(). Use the internal
// ** %-conversion extensions.
// */
func _sqlite3MPrintf(tls *libc.TLS, db uintptr, zFormat uintptr, va uintptr) (r uintptr) {
var ap Tva_list
var z uintptr
_, _ = ap, z
ap = va
z = _sqlite3VMPrintf(tls, db, zFormat, ap)
_ = ap
return z
}
// C documentation
//
// /*
// ** Print into memory obtained from sqlite3_malloc(). Omit the internal
// ** %-conversion extensions.
// */
func Xsqlite3_vmprintf(tls *libc.TLS, zFormat uintptr, ap Tva_list) (r uintptr) {
bp := tls.Alloc(112)
defer tls.Free(112)
var z uintptr
var _ /* acc at bp+72 */ TStrAccum
var _ /* zBase at bp+0 */ [70]int8
_ = z
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
_sqlite3StrAccumInit(tls, bp+72, uintptr(0), bp, int32(70), int32(SQLITE_MAX_LENGTH))
Xsqlite3_str_vappendf(tls, bp+72, zFormat, ap)
z = _sqlite3StrAccumFinish(tls, bp+72)
return z
}
// C documentation
//
// /*
// ** Print into memory obtained from sqlite3_malloc()(). Omit the internal
// ** %-conversion extensions.
// */
func Xsqlite3_mprintf(tls *libc.TLS, zFormat uintptr, va uintptr) (r uintptr) {
var ap Tva_list
var z uintptr
_, _ = ap, z
if Xsqlite3_initialize(tls) != 0 {
return uintptr(0)
}
ap = va
z = Xsqlite3_vmprintf(tls, zFormat, ap)
_ = ap
return z
}
// C documentation
//
// /*
// ** sqlite3_snprintf() works like snprintf() except that it ignores the
// ** current locale settings. This is important for SQLite because we
// ** are not able to use a "," as the decimal point in place of "." as
// ** specified by some locales.
// **
// ** Oops: The first two arguments of sqlite3_snprintf() are backwards
// ** from the snprintf() standard. Unfortunately, it is too late to change
// ** this without breaking compatibility, so we just have to live with the
// ** mistake.
// **
// ** sqlite3_vsnprintf() is the varargs version.
// */
func Xsqlite3_vsnprintf(tls *libc.TLS, n int32, zBuf uintptr, zFormat uintptr, ap Tva_list) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var _ /* acc at bp+0 */ TStrAccum
if n <= 0 {
return zBuf
}
_sqlite3StrAccumInit(tls, bp, uintptr(0), zBuf, n, 0)
Xsqlite3_str_vappendf(tls, bp, zFormat, ap)
*(*int8)(unsafe.Pointer(zBuf + uintptr((*(*TStrAccum)(unsafe.Pointer(bp))).FnChar))) = 0
return zBuf
}
func Xsqlite3_snprintf(tls *libc.TLS, n int32, zBuf uintptr, zFormat uintptr, va uintptr) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var ap Tva_list
var _ /* acc at bp+0 */ TStrAccum
_ = ap
if n <= 0 {
return zBuf
}
_sqlite3StrAccumInit(tls, bp, uintptr(0), zBuf, n, 0)
ap = va
Xsqlite3_str_vappendf(tls, bp, zFormat, ap)
_ = ap
*(*int8)(unsafe.Pointer(zBuf + uintptr((*(*TStrAccum)(unsafe.Pointer(bp))).FnChar))) = 0
return zBuf
}
// C documentation
//
// /*
// ** This is the routine that actually formats the sqlite3_log() message.
// ** We house it in a separate routine from sqlite3_log() to avoid using
// ** stack space on small-stack systems when logging is disabled.
// **
// ** sqlite3_log() must render into a static buffer. It cannot dynamically
// ** allocate memory because it might be called while the memory allocator
// ** mutex is held.
// **
// ** sqlite3_str_vappendf() might ask for *temporary* memory allocations for
// ** certain format characters (%q) or for very large precisions or widths.
// ** Care must be taken that any sqlite3_log() calls that occur while the
// ** memory mutex is held do not use these mechanisms.
// */
func _renderLogMsg(tls *libc.TLS, iErrCode int32, zFormat uintptr, ap Tva_list) {
bp := tls.Alloc(256)
defer tls.Free(256)
var _ /* acc at bp+0 */ TStrAccum
var _ /* zMsg at bp+32 */ [210]int8 /* Complete log message */
_sqlite3StrAccumInit(tls, bp, uintptr(0), bp+32, int32(210), 0)
Xsqlite3_str_vappendf(tls, bp, zFormat, ap)
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.FxLog})))(tls, _sqlite3Config.FpLogArg, iErrCode, _sqlite3StrAccumFinish(tls, bp))
}
// C documentation
//
// /*
// ** Format and write a message to the log if logging is enabled.
// */
func Xsqlite3_log(tls *libc.TLS, iErrCode int32, zFormat uintptr, va uintptr) {
var ap Tva_list
_ = ap /* Vararg list */
if _sqlite3Config.FxLog != 0 {
ap = va
_renderLogMsg(tls, iErrCode, zFormat, ap)
_ = ap
}
}
// C documentation
//
// /*
// ** variable-argument wrapper around sqlite3_str_vappendf(). The bFlags argument
// ** can contain the bit SQLITE_PRINTF_INTERNAL enable internal formats.
// */
func Xsqlite3_str_appendf(tls *libc.TLS, p uintptr, zFormat uintptr, va uintptr) {
var ap Tva_list
_ = ap
ap = va
Xsqlite3_str_vappendf(tls, p, zFormat, ap)
_ = ap
}
/*****************************************************************************
** Reference counted string/blob storage
*****************************************************************************/
// C documentation
//
// /*
// ** Increase the reference count of the string by one.
// **
// ** The input parameter is returned.
// */
func _sqlite3RCStrRef(tls *libc.TLS, z uintptr) (r uintptr) {
var p uintptr
_ = p
p = z
p -= 8
(*TRCStr)(unsafe.Pointer(p)).FnRCRef++
return z
}
// C documentation
//
// /*
// ** Decrease the reference count by one. Free the string when the
// ** reference count reaches zero.
// */
func _sqlite3RCStrUnref(tls *libc.TLS, z uintptr) {
var p uintptr
_ = p
p = z
p -= 8
if (*TRCStr)(unsafe.Pointer(p)).FnRCRef >= uint64(2) {
(*TRCStr)(unsafe.Pointer(p)).FnRCRef--
} else {
Xsqlite3_free(tls, p)
}
}
// C documentation
//
// /*
// ** Create a new string that is capable of holding N bytes of text, not counting
// ** the zero byte at the end. The string is uninitialized.
// **
// ** The reference count is initially 1. Call sqlite3RCStrUnref() to free the
// ** newly allocated string.
// **
// ** This routine returns 0 on an OOM.
// */
func _sqlite3RCStrNew(tls *libc.TLS, N Tu64) (r uintptr) {
var p uintptr
_ = p
p = Xsqlite3_malloc64(tls, N+uint64(8)+uint64(1))
if p == uintptr(0) {
return uintptr(0)
}
(*TRCStr)(unsafe.Pointer(p)).FnRCRef = uint64(1)
return p + 1*8
}
// C documentation
//
// /*
// ** Change the size of the string so that it is able to hold N bytes.
// ** The string might be reallocated, so return the new allocation.
// */
func _sqlite3RCStrResize(tls *libc.TLS, z uintptr, N Tu64) (r uintptr) {
var p, pNew uintptr
_, _ = p, pNew
p = z
p -= 8
pNew = Xsqlite3_realloc64(tls, p, N+uint64(8)+uint64(1))
if pNew == uintptr(0) {
Xsqlite3_free(tls, p)
return uintptr(0)
} else {
return pNew + 1*8
}
return r
}
/************** End of printf.c **********************************************/
/************** Begin file treeview.c ****************************************/
/*
** 2015-06-08
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains C code to implement the TreeView debugging routines.
** These routines print a parse tree to standard output for debugging and
** analysis.
**
** The interfaces in this file is only available when compiling
** with SQLITE_DEBUG.
*/
/* #include "sqliteInt.h" */
/************** End of treeview.c ********************************************/
/************** Begin file random.c ******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains code to implement a pseudo-random number
** generator (PRNG) for SQLite.
**
** Random numbers are used by some of the database backends in order
** to generate random integer keys for tables or random filenames.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /* All threads share a single random number generator.
// ** This structure is the current state of the generator.
// */
type Tsqlite3PrngType = struct {
Fs [16]Tu32
Fout [64]Tu8
Fn Tu8
}
type sqlite3PrngType = Tsqlite3PrngType
/************** End of printf.c **********************************************/
/************** Begin file treeview.c ****************************************/
/*
** 2015-06-08
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains C code to implement the TreeView debugging routines.
** These routines print a parse tree to standard output for debugging and
** analysis.
**
** The interfaces in this file is only available when compiling
** with SQLITE_DEBUG.
*/
/* #include "sqliteInt.h" */
/************** End of treeview.c ********************************************/
/************** Begin file random.c ******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains code to implement a pseudo-random number
** generator (PRNG) for SQLite.
**
** Random numbers are used by some of the database backends in order
** to generate random integer keys for tables or random filenames.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /* All threads share a single random number generator.
// ** This structure is the current state of the generator.
// */
var _sqlite3Prng Tsqlite3PrngType
// C documentation
//
// /* The RFC-7539 ChaCha20 block function
// */
func _chacha_block(tls *libc.TLS, out uintptr, in uintptr) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i int32
var _ /* x at bp+0 */ [16]Tu32
_ = i
libc.Xmemcpy(tls, bp, in, uint64(64))
i = 0
for {
if !(i < int32(10)) {
break
}
*(*Tu32)(unsafe.Pointer(bp)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]
*(*Tu32)(unsafe.Pointer(bp + 12*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[0]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 8*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]
*(*Tu32)(unsafe.Pointer(bp + 4*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(8)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]
*(*Tu32)(unsafe.Pointer(bp + 12*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[0]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 8*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]
*(*Tu32)(unsafe.Pointer(bp + 4*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(8)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 1*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]
*(*Tu32)(unsafe.Pointer(bp + 13*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(1)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 9*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]
*(*Tu32)(unsafe.Pointer(bp + 5*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(9)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 1*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]
*(*Tu32)(unsafe.Pointer(bp + 13*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(1)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 9*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]
*(*Tu32)(unsafe.Pointer(bp + 5*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(9)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 2*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]
*(*Tu32)(unsafe.Pointer(bp + 14*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(2)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 10*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]
*(*Tu32)(unsafe.Pointer(bp + 6*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(10)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 2*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]
*(*Tu32)(unsafe.Pointer(bp + 14*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(2)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 10*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]
*(*Tu32)(unsafe.Pointer(bp + 6*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(10)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 3*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]
*(*Tu32)(unsafe.Pointer(bp + 15*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(3)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 11*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]
*(*Tu32)(unsafe.Pointer(bp + 7*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(11)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 3*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]
*(*Tu32)(unsafe.Pointer(bp + 15*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(3)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 11*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]
*(*Tu32)(unsafe.Pointer(bp + 7*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(11)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]
*(*Tu32)(unsafe.Pointer(bp + 15*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[0]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 10*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]
*(*Tu32)(unsafe.Pointer(bp + 5*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(10)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]
*(*Tu32)(unsafe.Pointer(bp + 15*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[0]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 10*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(15)]
*(*Tu32)(unsafe.Pointer(bp + 5*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(10)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(5)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 1*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]
*(*Tu32)(unsafe.Pointer(bp + 12*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(1)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 11*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]
*(*Tu32)(unsafe.Pointer(bp + 6*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(11)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 1*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]
*(*Tu32)(unsafe.Pointer(bp + 12*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(1)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 11*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(12)]
*(*Tu32)(unsafe.Pointer(bp + 6*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(11)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(6)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 2*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]
*(*Tu32)(unsafe.Pointer(bp + 13*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(2)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 8*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]
*(*Tu32)(unsafe.Pointer(bp + 7*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(8)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 2*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]
*(*Tu32)(unsafe.Pointer(bp + 13*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(2)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 8*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(13)]
*(*Tu32)(unsafe.Pointer(bp + 7*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(8)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(7)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
*(*Tu32)(unsafe.Pointer(bp + 3*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]
*(*Tu32)(unsafe.Pointer(bp + 14*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(3)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(16))
*(*Tu32)(unsafe.Pointer(bp + 9*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]
*(*Tu32)(unsafe.Pointer(bp + 4*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(9)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(12))
*(*Tu32)(unsafe.Pointer(bp + 3*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]
*(*Tu32)(unsafe.Pointer(bp + 14*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(3)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(8))
*(*Tu32)(unsafe.Pointer(bp + 9*4)) += (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(14)]
*(*Tu32)(unsafe.Pointer(bp + 4*4)) ^= (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(9)]
(*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)] = (*(*[16]Tu32)(unsafe.Pointer(bp)))[int32(4)]<>(libc.Int32FromInt32(32)-libc.Int32FromInt32(7))
goto _1
_1:
;
i++
}
i = 0
for {
if !(i < int32(16)) {
break
}
*(*Tu32)(unsafe.Pointer(out + uintptr(i)*4)) = (*(*[16]Tu32)(unsafe.Pointer(bp)))[i] + *(*Tu32)(unsafe.Pointer(in + uintptr(i)*4))
goto _2
_2:
;
i++
}
}
// C documentation
//
// /*
// ** Return N random bytes.
// */
func Xsqlite3_randomness(tls *libc.TLS, N int32, pBuf uintptr) {
var mutex, pVfs, zBuf, p1 uintptr
_, _, _, _ = mutex, pVfs, zBuf, p1
zBuf = pBuf
if Xsqlite3_initialize(tls) != 0 {
return
}
mutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_PRNG))
Xsqlite3_mutex_enter(tls, mutex)
if N <= 0 || pBuf == uintptr(0) {
*(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)))) = uint32(0)
Xsqlite3_mutex_leave(tls, mutex)
return
}
/* Initialize the state of the random number generator once,
** the first time this routine is called.
*/
if *(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)))) == uint32(0) {
pVfs = Xsqlite3_vfs_find(tls, uintptr(0))
libc.Xmemcpy(tls, uintptr(unsafe.Pointer(&_sqlite3Prng)), uintptr(unsafe.Pointer(&_chacha20_init)), uint64(16))
if pVfs == uintptr(0) {
libc.Xmemset(tls, uintptr(unsafe.Pointer(&_sqlite3Prng))+4*4, 0, uint64(44))
} else {
_sqlite3OsRandomness(tls, pVfs, int32(44), uintptr(unsafe.Pointer(&_sqlite3Prng))+4*4)
}
*(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)) + 15*4)) = *(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)) + 12*4))
*(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)) + 12*4)) = uint32(0)
_sqlite3Prng.Fn = uint8(0)
}
for int32(1) != 0 {
if N <= int32(_sqlite3Prng.Fn) {
libc.Xmemcpy(tls, zBuf, uintptr(unsafe.Pointer(&_sqlite3Prng))+64+uintptr(int32(_sqlite3Prng.Fn)-N), uint64(N))
p1 = uintptr(unsafe.Pointer(&_sqlite3Prng)) + 128
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) - N)
break
}
if int32(_sqlite3Prng.Fn) > 0 {
libc.Xmemcpy(tls, zBuf, uintptr(unsafe.Pointer(&_sqlite3Prng))+64, uint64(_sqlite3Prng.Fn))
N -= int32(_sqlite3Prng.Fn)
zBuf += uintptr(_sqlite3Prng.Fn)
}
*(*Tu32)(unsafe.Pointer(uintptr(unsafe.Pointer(&_sqlite3Prng)) + 12*4))++
_chacha_block(tls, uintptr(unsafe.Pointer(&_sqlite3Prng))+64, uintptr(unsafe.Pointer(&_sqlite3Prng)))
_sqlite3Prng.Fn = uint8(64)
}
Xsqlite3_mutex_leave(tls, mutex)
}
var _chacha20_init = [4]Tu32{
0: uint32(0x61707865),
1: uint32(0x3320646e),
2: uint32(0x79622d32),
3: uint32(0x6b206574),
}
// C documentation
//
// /*
// ** For testing purposes, we sometimes want to preserve the state of
// ** PRNG and restore the PRNG to its saved state at a later time, or
// ** to reset the PRNG to its initial state. These routines accomplish
// ** those tasks.
// **
// ** The sqlite3_test_control() interface calls these routines to
// ** control the PRNG.
// */
var _sqlite3SavedPrng Tsqlite3PrngType
func _sqlite3PrngSaveState(tls *libc.TLS) {
libc.Xmemcpy(tls, uintptr(unsafe.Pointer(&_sqlite3SavedPrng)), uintptr(unsafe.Pointer(&_sqlite3Prng)), uint64(132))
}
func _sqlite3PrngRestoreState(tls *libc.TLS) {
libc.Xmemcpy(tls, uintptr(unsafe.Pointer(&_sqlite3Prng)), uintptr(unsafe.Pointer(&_sqlite3SavedPrng)), uint64(132))
}
/************** End of random.c **********************************************/
/************** Begin file threads.c *****************************************/
/*
** 2012 July 21
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** This file presents a simple cross-platform threading interface for
** use internally by SQLite.
**
** A "thread" can be created using sqlite3ThreadCreate(). This thread
** runs independently of its creator until it is joined using
** sqlite3ThreadJoin(), at which point it terminates.
**
** Threads do not have to be real. It could be that the work of the
** "thread" is done by the main thread at either the sqlite3ThreadCreate()
** or sqlite3ThreadJoin() call. This is, in fact, what happens in
** single threaded systems. Nothing in SQLite requires multiple threads.
** This interface exists so that applications that want to take advantage
** of multiple cores can do so, while also allowing applications to stay
** single-threaded if desired.
*/
/* #include "sqliteInt.h" */
/********************************* Unix Pthreads ****************************/
/******************************** End Unix Pthreads *************************/
/********************************* Win32 Threads ****************************/
/******************************** End Win32 Threads *************************/
/********************************* Single-Threaded **************************/
/*
** This implementation does not actually create a new thread. It does the
** work of the thread in the main thread, when either the thread is created
** or when it is joined
*/
/* A running thread */
type TSQLiteThread1 = struct {
FxTask uintptr
FpIn uintptr
FpResult uintptr
}
type SQLiteThread1 = TSQLiteThread1
// C documentation
//
// /* Create a new thread */
func _sqlite3ThreadCreate(tls *libc.TLS, ppThread uintptr, xTask uintptr, pIn uintptr) (r int32) {
var p uintptr
_ = p
*(*uintptr)(unsafe.Pointer(ppThread)) = uintptr(0)
p = _sqlite3Malloc(tls, uint64(24))
if p == uintptr(0) {
return int32(SQLITE_NOMEM)
}
if int32(int64(p))/int32(17)&int32(1) != 0 {
(*TSQLiteThread)(unsafe.Pointer(p)).FxTask = xTask
(*TSQLiteThread)(unsafe.Pointer(p)).FpIn = pIn
} else {
(*TSQLiteThread)(unsafe.Pointer(p)).FxTask = uintptr(0)
(*TSQLiteThread)(unsafe.Pointer(p)).FpResult = (*(*func(*libc.TLS, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{xTask})))(tls, pIn)
}
*(*uintptr)(unsafe.Pointer(ppThread)) = p
return SQLITE_OK
}
// C documentation
//
// /* Get the results of the thread */
func _sqlite3ThreadJoin(tls *libc.TLS, p uintptr, ppOut uintptr) (r int32) {
if p == uintptr(0) {
return int32(SQLITE_NOMEM)
}
if (*TSQLiteThread)(unsafe.Pointer(p)).FxTask != 0 {
*(*uintptr)(unsafe.Pointer(ppOut)) = (*(*func(*libc.TLS, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{(*TSQLiteThread)(unsafe.Pointer(p)).FxTask})))(tls, (*TSQLiteThread)(unsafe.Pointer(p)).FpIn)
} else {
*(*uintptr)(unsafe.Pointer(ppOut)) = (*TSQLiteThread)(unsafe.Pointer(p)).FpResult
}
Xsqlite3_free(tls, p)
return SQLITE_OK
}
/****************************** End Single-Threaded *************************/
/************** End of threads.c *********************************************/
/************** Begin file utf.c *********************************************/
/*
** 2004 April 13
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains routines used to translate between UTF-8,
** UTF-16, UTF-16BE, and UTF-16LE.
**
** Notes on UTF-8:
**
** Byte-0 Byte-1 Byte-2 Byte-3 Value
** 0xxxxxxx 00000000 00000000 0xxxxxxx
** 110yyyyy 10xxxxxx 00000000 00000yyy yyxxxxxx
** 1110zzzz 10yyyyyy 10xxxxxx 00000000 zzzzyyyy yyxxxxxx
** 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx 000uuuuu zzzzyyyy yyxxxxxx
**
**
** Notes on UTF-16: (with wwww+1==uuuuu)
**
** Word-0 Word-1 Value
** 110110ww wwzzzzyy 110111yy yyxxxxxx 000uuuuu zzzzyyyy yyxxxxxx
** zzzzyyyy yyxxxxxx 00000000 zzzzyyyy yyxxxxxx
**
**
** BOM or Byte Order Mark:
** 0xff 0xfe little-endian utf-16 follows
** 0xfe 0xff big-endian utf-16 follows
**
*/
/* #include "sqliteInt.h" */
/* #include */
/* #include "vdbeInt.h" */
// C documentation
//
// /*
// ** This lookup table is used to help decode the first byte of
// ** a multi-byte UTF8 character.
// */
var _sqlite3Utf8Trans1 = [64]uint8{
1: uint8(0x01),
2: uint8(0x02),
3: uint8(0x03),
4: uint8(0x04),
5: uint8(0x05),
6: uint8(0x06),
7: uint8(0x07),
8: uint8(0x08),
9: uint8(0x09),
10: uint8(0x0a),
11: uint8(0x0b),
12: uint8(0x0c),
13: uint8(0x0d),
14: uint8(0x0e),
15: uint8(0x0f),
16: uint8(0x10),
17: uint8(0x11),
18: uint8(0x12),
19: uint8(0x13),
20: uint8(0x14),
21: uint8(0x15),
22: uint8(0x16),
23: uint8(0x17),
24: uint8(0x18),
25: uint8(0x19),
26: uint8(0x1a),
27: uint8(0x1b),
28: uint8(0x1c),
29: uint8(0x1d),
30: uint8(0x1e),
31: uint8(0x1f),
33: uint8(0x01),
34: uint8(0x02),
35: uint8(0x03),
36: uint8(0x04),
37: uint8(0x05),
38: uint8(0x06),
39: uint8(0x07),
40: uint8(0x08),
41: uint8(0x09),
42: uint8(0x0a),
43: uint8(0x0b),
44: uint8(0x0c),
45: uint8(0x0d),
46: uint8(0x0e),
47: uint8(0x0f),
49: uint8(0x01),
50: uint8(0x02),
51: uint8(0x03),
52: uint8(0x04),
53: uint8(0x05),
54: uint8(0x06),
55: uint8(0x07),
57: uint8(0x01),
58: uint8(0x02),
59: uint8(0x03),
61: uint8(0x01),
}
// C documentation
//
// /*
// ** Translate a single UTF-8 character. Return the unicode value.
// **
// ** During translation, assume that the byte that zTerm points
// ** is a 0x00.
// **
// ** Write a pointer to the next unread byte back into *pzNext.
// **
// ** Notes On Invalid UTF-8:
// **
// ** * This routine never allows a 7-bit character (0x00 through 0x7f) to
// ** be encoded as a multi-byte character. Any multi-byte character that
// ** attempts to encode a value between 0x00 and 0x7f is rendered as 0xfffd.
// **
// ** * This routine never allows a UTF16 surrogate value to be encoded.
// ** If a multi-byte character attempts to encode a value between
// ** 0xd800 and 0xe000 then it is rendered as 0xfffd.
// **
// ** * Bytes in the range of 0x80 through 0xbf which occur as the first
// ** byte of a character are interpreted as single-byte characters
// ** and rendered as themselves even though they are technically
// ** invalid characters.
// **
// ** * This routine accepts over-length UTF8 encodings
// ** for unicode values 0x80 and greater. It does not change over-length
// ** encodings to 0xfffd as some systems recommend.
// */
func _sqlite3Utf8Read(tls *libc.TLS, pz uintptr) (r Tu32) {
var c uint32
var v1, v2, v3, v4 uintptr
_, _, _, _, _ = c, v1, v2, v3, v4
/* Same as READ_UTF8() above but without the zTerm parameter.
** For this routine, we assume the UTF8 string is always zero-terminated.
*/
v2 = pz
v1 = *(*uintptr)(unsafe.Pointer(v2))
*(*uintptr)(unsafe.Pointer(v2))++
c = uint32(*(*uint8)(unsafe.Pointer(v1)))
if c >= uint32(0xc0) {
c = uint32(_sqlite3Utf8Trans1[c-uint32(0xc0)])
for int32(*(*uint8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pz)))))&int32(0xc0) == int32(0x80) {
v4 = pz
v3 = *(*uintptr)(unsafe.Pointer(v4))
*(*uintptr)(unsafe.Pointer(v4))++
c = c<= uint32(0xc0) {
c = uint32(_sqlite3Utf8Trans1[c-uint32(0xc0)])
if n > int32(4) {
n = int32(4)
}
for i < n && int32(*(*Tu8)(unsafe.Pointer(z + uintptr(i))))&int32(0xc0) == int32(0x80) {
c = c< UTF-16 Little-endian */
for zIn < zTerm {
v2 = zIn
zIn++
c = uint32(*(*uint8)(unsafe.Pointer(v2)))
if c >= uint32(0xc0) {
c = uint32(_sqlite3Utf8Trans1[c-uint32(0xc0)])
for zIn != zTerm && int32(*(*uint8)(unsafe.Pointer(zIn)))&int32(0xc0) == int32(0x80) {
v3 = zIn
zIn++
c = c<> libc.Int32FromInt32(8) & libc.Uint32FromInt32(0x00FF))
} else {
v6 = z
z++
*(*uint8)(unsafe.Pointer(v6)) = uint8(c>>libc.Int32FromInt32(10)&libc.Uint32FromInt32(0x003F) + (c-libc.Uint32FromInt32(0x10000))>>libc.Int32FromInt32(10)&libc.Uint32FromInt32(0x00C0))
v7 = z
z++
*(*uint8)(unsafe.Pointer(v7)) = uint8(libc.Uint32FromInt32(0x00D8) + (c-libc.Uint32FromInt32(0x10000))>>libc.Int32FromInt32(18)&libc.Uint32FromInt32(0x03))
v8 = z
z++
*(*uint8)(unsafe.Pointer(v8)) = uint8(c & libc.Uint32FromInt32(0x00FF))
v9 = z
z++
*(*uint8)(unsafe.Pointer(v9)) = uint8(libc.Uint32FromInt32(0x00DC) + c>>libc.Int32FromInt32(8)&libc.Uint32FromInt32(0x03))
}
}
} else {
/* UTF-8 -> UTF-16 Big-endian */
for zIn < zTerm {
v10 = zIn
zIn++
c = uint32(*(*uint8)(unsafe.Pointer(v10)))
if c >= uint32(0xc0) {
c = uint32(_sqlite3Utf8Trans1[c-uint32(0xc0)])
for zIn != zTerm && int32(*(*uint8)(unsafe.Pointer(zIn)))&int32(0xc0) == int32(0x80) {
v11 = zIn
zIn++
c = c<> libc.Int32FromInt32(8) & libc.Uint32FromInt32(0x00FF))
v13 = z
z++
*(*uint8)(unsafe.Pointer(v13)) = uint8(c & libc.Uint32FromInt32(0x00FF))
} else {
v14 = z
z++
*(*uint8)(unsafe.Pointer(v14)) = uint8(libc.Uint32FromInt32(0x00D8) + (c-libc.Uint32FromInt32(0x10000))>>libc.Int32FromInt32(18)&libc.Uint32FromInt32(0x03))
v15 = z
z++
*(*uint8)(unsafe.Pointer(v15)) = uint8(c>>libc.Int32FromInt32(10)&libc.Uint32FromInt32(0x003F) + (c-libc.Uint32FromInt32(0x10000))>>libc.Int32FromInt32(10)&libc.Uint32FromInt32(0x00C0))
v16 = z
z++
*(*uint8)(unsafe.Pointer(v16)) = uint8(libc.Uint32FromInt32(0x00DC) + c>>libc.Int32FromInt32(8)&libc.Uint32FromInt32(0x03))
v17 = z
z++
*(*uint8)(unsafe.Pointer(v17)) = uint8(c & libc.Uint32FromInt32(0x00FF))
}
}
}
(*TMem)(unsafe.Pointer(pMem)).Fn = int32(int64(z) - int64(zOut))
v18 = z
z++
*(*uint8)(unsafe.Pointer(v18)) = uint8(0)
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fenc) == int32(SQLITE_UTF16LE) {
/* UTF-16 Little-endian -> UTF-8 */
for zIn < zTerm {
v19 = zIn
zIn++
c = uint32(*(*uint8)(unsafe.Pointer(v19)))
v20 = zIn
zIn++
c += uint32(int32(*(*uint8)(unsafe.Pointer(v20))) << int32(8))
if c >= uint32(0xd800) && c < uint32(0xe000) {
if zIn < zTerm {
v21 = zIn
zIn++
c2 = int32(*(*uint8)(unsafe.Pointer(v21)))
v22 = zIn
zIn++
c2 += int32(*(*uint8)(unsafe.Pointer(v22))) << int32(8)
c = uint32(c2&libc.Int32FromInt32(0x03FF)) + c&uint32(0x003F)<>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x1F))))
v25 = z
z++
*(*uint8)(unsafe.Pointer(v25)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
} else {
if c < uint32(0x10000) {
v26 = z
z++
*(*uint8)(unsafe.Pointer(v26)) = uint8(int32(0xE0) + int32(uint8(c>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x0F))))
v27 = z
z++
*(*uint8)(unsafe.Pointer(v27)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3F))))
v28 = z
z++
*(*uint8)(unsafe.Pointer(v28)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
} else {
v29 = z
z++
*(*uint8)(unsafe.Pointer(v29)) = uint8(int32(0xF0) + int32(uint8(c>>libc.Int32FromInt32(18)&libc.Uint32FromInt32(0x07))))
v30 = z
z++
*(*uint8)(unsafe.Pointer(v30)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x3F))))
v31 = z
z++
*(*uint8)(unsafe.Pointer(v31)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3F))))
v32 = z
z++
*(*uint8)(unsafe.Pointer(v32)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
}
}
}
}
} else {
/* UTF-16 Big-endian -> UTF-8 */
for zIn < zTerm {
v33 = zIn
zIn++
c = uint32(int32(*(*uint8)(unsafe.Pointer(v33))) << int32(8))
v34 = zIn
zIn++
c += uint32(*(*uint8)(unsafe.Pointer(v34)))
if c >= uint32(0xd800) && c < uint32(0xe000) {
if zIn < zTerm {
v35 = zIn
zIn++
c21 = int32(*(*uint8)(unsafe.Pointer(v35))) << int32(8)
v36 = zIn
zIn++
c21 += int32(*(*uint8)(unsafe.Pointer(v36)))
c = uint32(c21&libc.Int32FromInt32(0x03FF)) + c&uint32(0x003F)<>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x1F))))
v39 = z
z++
*(*uint8)(unsafe.Pointer(v39)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
} else {
if c < uint32(0x10000) {
v40 = z
z++
*(*uint8)(unsafe.Pointer(v40)) = uint8(int32(0xE0) + int32(uint8(c>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x0F))))
v41 = z
z++
*(*uint8)(unsafe.Pointer(v41)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3F))))
v42 = z
z++
*(*uint8)(unsafe.Pointer(v42)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
} else {
v43 = z
z++
*(*uint8)(unsafe.Pointer(v43)) = uint8(int32(0xF0) + int32(uint8(c>>libc.Int32FromInt32(18)&libc.Uint32FromInt32(0x07))))
v44 = z
z++
*(*uint8)(unsafe.Pointer(v44)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(12)&libc.Uint32FromInt32(0x3F))))
v45 = z
z++
*(*uint8)(unsafe.Pointer(v45)) = uint8(int32(0x80) + int32(uint8(c>>libc.Int32FromInt32(6)&libc.Uint32FromInt32(0x3F))))
v46 = z
z++
*(*uint8)(unsafe.Pointer(v46)) = uint8(int32(0x80) + int32(uint8(c&libc.Uint32FromInt32(0x3F))))
}
}
}
}
}
(*TMem)(unsafe.Pointer(pMem)).Fn = int32(int64(z) - int64(zOut))
}
*(*uint8)(unsafe.Pointer(z)) = uint8(0)
c = uint32(libc.Int32FromInt32(MEM_Str) | libc.Int32FromInt32(MEM_Term) | int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_AffMask)|libc.Int32FromInt32(MEM_Subtype)))
_sqlite3VdbeMemRelease(tls, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(c)
(*TMem)(unsafe.Pointer(pMem)).Fenc = desiredEnc
(*TMem)(unsafe.Pointer(pMem)).Fz = zOut
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = (*TMem)(unsafe.Pointer(pMem)).Fz
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = _sqlite3DbMallocSize(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).Fz)
goto translate_out
translate_out:
;
return SQLITE_OK
return r
}
// C documentation
//
// /*
// ** This routine checks for a byte-order mark at the beginning of the
// ** UTF-16 string stored in *pMem. If one is present, it is removed and
// ** the encoding of the Mem adjusted. This routine does not do any
// ** byte-swapping, it just sets Mem.enc appropriately.
// **
// ** The allocation (static, dynamic etc.) and encoding of the Mem may be
// ** changed by this function.
// */
func _sqlite3VdbeMemHandleBom(tls *libc.TLS, pMem uintptr) (r int32) {
var b1, b2, bom Tu8
var rc int32
var p1 uintptr
_, _, _, _, _ = b1, b2, bom, rc, p1
rc = SQLITE_OK
bom = uint8(0)
if (*TMem)(unsafe.Pointer(pMem)).Fn > int32(1) {
b1 = *(*Tu8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz))
b2 = *(*Tu8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + libc.UintptrFromInt32(1)))
if int32(b1) == int32(0xFE) && int32(b2) == int32(0xFF) {
bom = uint8(SQLITE_UTF16BE)
}
if int32(b1) == int32(0xFF) && int32(b2) == int32(0xFE) {
bom = uint8(SQLITE_UTF16LE)
}
}
if bom != 0 {
rc = _sqlite3VdbeMemMakeWriteable(tls, pMem)
if rc == SQLITE_OK {
*(*int32)(unsafe.Pointer(pMem + 16)) -= int32(2)
libc.Xmemmove(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, (*TMem)(unsafe.Pointer(pMem)).Fz+2, uint64((*TMem)(unsafe.Pointer(pMem)).Fn))
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn))) = int8('\000')
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn+int32(1)))) = int8('\000')
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(MEM_Term))
(*TMem)(unsafe.Pointer(pMem)).Fenc = bom
}
}
return rc
}
// C documentation
//
// /*
// ** pZ is a UTF-8 encoded unicode string. If nByte is less than zero,
// ** return the number of unicode characters in pZ up to (but not including)
// ** the first 0x00 byte. If nByte is not less than zero, return the
// ** number of unicode characters in the first nByte of pZ (or up to
// ** the first 0x00, whichever comes first).
// */
func _sqlite3Utf8CharLen(tls *libc.TLS, zIn uintptr, nByte int32) (r1 int32) {
var r int32
var z, zTerm, v1 uintptr
_, _, _, _ = r, z, zTerm, v1
r = 0
z = zIn
if nByte >= 0 {
zTerm = z + uintptr(nByte)
} else {
zTerm = uintptr(-libc.Int32FromInt32(1))
}
for int32(*(*Tu8)(unsafe.Pointer(z))) != 0 && z < zTerm {
v1 = z
z++
if int32(*(*Tu8)(unsafe.Pointer(v1))) >= int32(0xc0) {
for int32(*(*Tu8)(unsafe.Pointer(z)))&int32(0xc0) == int32(0x80) {
z++
}
}
r++
}
return r
}
/* This test function is not currently used by the automated test-suite.
** Hence it is only available in debug builds.
*/
// C documentation
//
// /*
// ** Convert a UTF-16 string in the native encoding into a UTF-8 string.
// ** Memory to hold the UTF-8 string is obtained from sqlite3_malloc and must
// ** be freed by the calling function.
// **
// ** NULL is returned if there is an allocation error.
// */
func _sqlite3Utf16to8(tls *libc.TLS, db uintptr, z uintptr, nByte int32, enc Tu8) (r uintptr) {
bp := tls.Alloc(64)
defer tls.Free(64)
var _ /* m at bp+0 */ TMem
libc.Xmemset(tls, bp, 0, uint64(56))
(*(*TMem)(unsafe.Pointer(bp))).Fdb = db
_sqlite3VdbeMemSetStr(tls, bp, z, int64(nByte), enc, libc.UintptrFromInt32(0))
_sqlite3VdbeChangeEncoding(tls, bp, int32(SQLITE_UTF8))
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
_sqlite3VdbeMemRelease(tls, bp)
(*(*TMem)(unsafe.Pointer(bp))).Fz = uintptr(0)
}
return (*(*TMem)(unsafe.Pointer(bp))).Fz
}
// C documentation
//
// /*
// ** zIn is a UTF-16 encoded unicode string at least nChar characters long.
// ** Return the number of bytes in the first nChar unicode characters
// ** in pZ. nChar must be non-negative.
// */
func _sqlite3Utf16ByteLen(tls *libc.TLS, zIn uintptr, nChar int32) (r int32) {
var c, n int32
var z uintptr
_, _, _ = c, n, z
z = zIn
n = 0
if true {
z++
}
for n < nChar {
c = int32(*(*uint8)(unsafe.Pointer(z)))
z += uintptr(2)
if c >= int32(0xd8) && c < int32(0xdc) && int32(*(*uint8)(unsafe.Pointer(z))) >= int32(0xdc) && int32(*(*uint8)(unsafe.Pointer(z))) < int32(0xe0) {
z += uintptr(2)
}
n++
}
return int32(int64(z)-int64(zIn)) - libc.BoolInt32(true)
}
type Tdouble_t = float64
type double_t = Tdouble_t
type Tfloat_t = float32
type float_t = Tfloat_t
// C documentation
//
// /*
// ** Calls to sqlite3FaultSim() are used to simulate a failure during testing,
// ** or to bypass normal error detection during testing in order to let
// ** execute proceed further downstream.
// **
// ** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The
// ** sqlite3FaultSim() function only returns non-zero during testing.
// **
// ** During testing, if the test harness has set a fault-sim callback using
// ** a call to sqlite3_test_control(SQLITE_TESTCTRL_FAULT_INSTALL), then
// ** each call to sqlite3FaultSim() is relayed to that application-supplied
// ** callback and the integer return value form the application-supplied
// ** callback is returned by sqlite3FaultSim().
// **
// ** The integer argument to sqlite3FaultSim() is a code to identify which
// ** sqlite3FaultSim() instance is being invoked. Each call to sqlite3FaultSim()
// ** should have a unique code. To prevent legacy testing applications from
// ** breaking, the codes should not be changed or reused.
// */
func _sqlite3FaultSim(tls *libc.TLS, iTest int32) (r int32) {
var xCallback uintptr
var v1 int32
_, _ = xCallback, v1
xCallback = _sqlite3Config.FxTestCallback
if xCallback != 0 {
v1 = (*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{xCallback})))(tls, iTest)
} else {
v1 = SQLITE_OK
}
return v1
}
// C documentation
//
// /*
// ** Return true if the floating point value is Not a Number (NaN).
// **
// ** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN.
// ** Otherwise, we have our own implementation that works on most systems.
// */
func _sqlite3IsNaN(tls *libc.TLS, _x float64) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
*(*float64)(unsafe.Pointer(bp)) = _x
var rc int32
var _ /* y at bp+8 */ Tu64
_ = rc
libc.Xmemcpy(tls, bp+8, bp, uint64(8))
rc = libc.BoolInt32(*(*Tu64)(unsafe.Pointer(bp + 8))&(libc.Uint64FromInt32(0x7ff)<>4)) != 0 {
return _sqlite3StdType[int32(uint32(*(*uint8)(unsafe.Pointer(pCol + 8))&0xf0>>4))-int32(1)]
} else {
return zDflt
}
}
return r
}
// C documentation
//
// /*
// ** Helper function for sqlite3Error() - called rarely. Broken out into
// ** a separate routine to avoid unnecessary register saves on entry to
// ** sqlite3Error().
// */
func _sqlite3ErrorFinish(tls *libc.TLS, db uintptr, err_code int32) {
if (*Tsqlite3)(unsafe.Pointer(db)).FpErr != 0 {
_sqlite3ValueSetNull(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpErr)
}
_sqlite3SystemError(tls, db, err_code)
}
// C documentation
//
// /*
// ** Set the current error code to err_code and clear any prior error message.
// ** Also set iSysErrno (by calling sqlite3System) if the err_code indicates
// ** that would be appropriate.
// */
func _sqlite3Error(tls *libc.TLS, db uintptr, err_code int32) {
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = err_code
if err_code != 0 || (*Tsqlite3)(unsafe.Pointer(db)).FpErr != 0 {
_sqlite3ErrorFinish(tls, db, err_code)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = -int32(1)
}
}
// C documentation
//
// /*
// ** The equivalent of sqlite3Error(db, SQLITE_OK). Clear the error state
// ** and error message.
// */
func _sqlite3ErrorClear(tls *libc.TLS, db uintptr) {
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = SQLITE_OK
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = -int32(1)
if (*Tsqlite3)(unsafe.Pointer(db)).FpErr != 0 {
_sqlite3ValueSetNull(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpErr)
}
}
// C documentation
//
// /*
// ** Load the sqlite3.iSysErrno field if that is an appropriate thing
// ** to do based on the SQLite error code in rc.
// */
func _sqlite3SystemError(tls *libc.TLS, db uintptr, rc int32) {
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(12)<= (*Tsqlite3)(unsafe.Pointer(db)).FnProgressOps {
if (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxProgress})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpProgressArg) != 0 {
(*TParse)(unsafe.Pointer(p)).FnErr++
(*TParse)(unsafe.Pointer(p)).Frc = int32(SQLITE_INTERRUPT)
}
(*TParse)(unsafe.Pointer(p)).FnProgressSteps = uint32(0)
}
}
}
}
// C documentation
//
// /*
// ** Add an error message to pParse->zErrMsg and increment pParse->nErr.
// **
// ** This function should be used to report any error that occurs while
// ** compiling an SQL statement (i.e. within sqlite3_prepare()). The
// ** last thing the sqlite3_prepare() function does is copy the error
// ** stored by this function into the database handle using sqlite3Error().
// ** Functions sqlite3Error() or sqlite3ErrorWithMsg() should be used
// ** during statement execution (sqlite3_step() etc.).
// */
func _sqlite3ErrorMsg(tls *libc.TLS, pParse uintptr, zFormat uintptr, va uintptr) {
var ap Tva_list
var db, zMsg uintptr
_, _, _ = ap, db, zMsg
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = -int32(2)
ap = va
zMsg = _sqlite3VMPrintf(tls, db, zFormat, ap)
_ = ap
if (*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset < -int32(1) {
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = -int32(1)
}
if (*Tsqlite3)(unsafe.Pointer(db)).FsuppressErr != 0 {
_sqlite3DbFree(tls, db, zMsg)
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
(*TParse)(unsafe.Pointer(pParse)).FnErr++
(*TParse)(unsafe.Pointer(pParse)).Frc = int32(SQLITE_NOMEM)
}
} else {
(*TParse)(unsafe.Pointer(pParse)).FnErr++
_sqlite3DbFree(tls, db, (*TParse)(unsafe.Pointer(pParse)).FzErrMsg)
(*TParse)(unsafe.Pointer(pParse)).FzErrMsg = zMsg
(*TParse)(unsafe.Pointer(pParse)).Frc = int32(SQLITE_ERROR)
(*TParse)(unsafe.Pointer(pParse)).FpWith = uintptr(0)
}
}
// C documentation
//
// /*
// ** If database connection db is currently parsing SQL, then transfer
// ** error code errCode to that parser if the parser has not already
// ** encountered some other kind of error.
// */
func _sqlite3ErrorToParser(tls *libc.TLS, db uintptr, errCode int32) (r int32) {
var pParse, v1 uintptr
var v2 bool
_, _, _ = pParse, v1, v2
if v2 = db == uintptr(0); !v2 {
v1 = (*Tsqlite3)(unsafe.Pointer(db)).FpParse
pParse = v1
}
if v2 || v1 == uintptr(0) {
return errCode
}
(*TParse)(unsafe.Pointer(pParse)).Frc = errCode
(*TParse)(unsafe.Pointer(pParse)).FnErr++
return errCode
}
// C documentation
//
// /*
// ** Convert an SQL-style quoted string into a normal string by removing
// ** the quote characters. The conversion is done in-place. If the
// ** input does not begin with a quote character, then this routine
// ** is a no-op.
// **
// ** The input string must be zero-terminated. A new zero-terminator
// ** is added to the dequoted string.
// **
// ** The return value is -1 if no dequoting occurs or the length of the
// ** dequoted string, exclusive of the zero terminator, if dequoting does
// ** occur.
// **
// ** 2002-02-14: This routine is extended to remove MS-Access style
// ** brackets from around identifiers. For example: "[a-b-c]" becomes
// ** "a-b-c".
// */
func _sqlite3Dequote(tls *libc.TLS, z uintptr) {
var i, j, v2, v3 int32
var quote int8
_, _, _, _, _ = i, j, quote, v2, v3
if z == uintptr(0) {
return
}
quote = *(*int8)(unsafe.Pointer(z))
if !(int32(_sqlite3CtypeMap[uint8(quote)])&libc.Int32FromInt32(0x80) != 0) {
return
}
if int32(quote) == int32('[') {
quote = int8(']')
}
i = int32(1)
j = libc.Int32FromInt32(0)
for {
if int32(*(*int8)(unsafe.Pointer(z + uintptr(i)))) == int32(quote) {
if int32(*(*int8)(unsafe.Pointer(z + uintptr(i+int32(1))))) == int32(quote) {
v2 = j
j++
*(*int8)(unsafe.Pointer(z + uintptr(v2))) = quote
i++
} else {
break
}
} else {
v3 = j
j++
*(*int8)(unsafe.Pointer(z + uintptr(v3))) = *(*int8)(unsafe.Pointer(z + uintptr(i)))
}
goto _1
_1:
;
i++
}
*(*int8)(unsafe.Pointer(z + uintptr(j))) = 0
}
func _sqlite3DequoteExpr(tls *libc.TLS, p uintptr) {
var v1 int32
_ = v1
if int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 8))))) == int32('"') {
v1 = libc.Int32FromInt32(EP_Quoted) | libc.Int32FromInt32(EP_DblQuoted)
} else {
v1 = int32(EP_Quoted)
}
*(*Tu32)(unsafe.Pointer(p + 4)) |= uint32(v1)
_sqlite3Dequote(tls, *(*uintptr)(unsafe.Pointer(p + 8)))
}
// C documentation
//
// /*
// ** If the input token p is quoted, try to adjust the token to remove
// ** the quotes. This is not always possible:
// **
// ** "abc" -> abc
// ** "ab""cd" -> (not possible because of the interior "")
// **
// ** Remove the quotes if possible. This is a optimization. The overall
// ** system should still return the correct answer even if this routine
// ** is always a no-op.
// */
func _sqlite3DequoteToken(tls *libc.TLS, p uintptr) {
var i uint32
_ = i
if (*TToken)(unsafe.Pointer(p)).Fn < uint32(2) {
return
}
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer((*TToken)(unsafe.Pointer(p)).Fz)))])&libc.Int32FromInt32(0x80) != 0) {
return
}
i = uint32(1)
for {
if !(i < (*TToken)(unsafe.Pointer(p)).Fn-uint32(1)) {
break
}
if int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer((*TToken)(unsafe.Pointer(p)).Fz + uintptr(i))))])&int32(0x80) != 0 {
return
}
goto _1
_1:
;
i++
}
*(*uint32)(unsafe.Pointer(p + 8)) -= uint32(2)
(*TToken)(unsafe.Pointer(p)).Fz++
}
// C documentation
//
// /*
// ** Generate a Token object from a string
// */
func _sqlite3TokenInit(tls *libc.TLS, p uintptr, z uintptr) {
(*TToken)(unsafe.Pointer(p)).Fz = z
(*TToken)(unsafe.Pointer(p)).Fn = uint32(_sqlite3Strlen30(tls, z))
}
/* Convenient short-hand */
// C documentation
//
// /*
// ** Some systems have stricmp(). Others have strcasecmp(). Because
// ** there is no consistency, we will define our own.
// **
// ** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and
// ** sqlite3_strnicmp() APIs allow applications and extensions to compare
// ** the contents of two buffers containing UTF-8 strings in a
// ** case-independent fashion, using the same definition of "case
// ** independence" that SQLite uses internally when comparing identifiers.
// */
func Xsqlite3_stricmp(tls *libc.TLS, zLeft uintptr, zRight uintptr) (r int32) {
var v1 int32
_ = v1
if zLeft == uintptr(0) {
if zRight != 0 {
v1 = -int32(1)
} else {
v1 = 0
}
return v1
} else {
if zRight == uintptr(0) {
return int32(1)
}
}
return _sqlite3StrICmp(tls, zLeft, zRight)
}
func _sqlite3StrICmp(tls *libc.TLS, zLeft uintptr, zRight uintptr) (r int32) {
var a, b uintptr
var c, x int32
_, _, _, _ = a, b, c, x
a = zLeft
b = zRight
for {
c = int32(*(*uint8)(unsafe.Pointer(a)))
x = int32(*(*uint8)(unsafe.Pointer(b)))
if c == x {
if c == 0 {
break
}
} else {
c = int32(_sqlite3UpperToLower[c]) - int32(_sqlite3UpperToLower[x])
if c != 0 {
break
}
}
a++
b++
goto _1
_1:
}
return c
}
func Xsqlite3_strnicmp(tls *libc.TLS, zLeft uintptr, zRight uintptr, N int32) (r int32) {
var a, b uintptr
var v1, v2, v3 int32
_, _, _, _, _ = a, b, v1, v2, v3
if zLeft == uintptr(0) {
if zRight != 0 {
v1 = -int32(1)
} else {
v1 = 0
}
return v1
} else {
if zRight == uintptr(0) {
return int32(1)
}
}
a = zLeft
b = zRight
for {
v2 = N
N--
if !(v2 > 0 && int32(*(*uint8)(unsafe.Pointer(a))) != 0 && int32(_sqlite3UpperToLower[*(*uint8)(unsafe.Pointer(a))]) == int32(_sqlite3UpperToLower[*(*uint8)(unsafe.Pointer(b))])) {
break
}
a++
b++
}
if N < 0 {
v3 = 0
} else {
v3 = int32(_sqlite3UpperToLower[*(*uint8)(unsafe.Pointer(a))]) - int32(_sqlite3UpperToLower[*(*uint8)(unsafe.Pointer(b))])
}
return v3
}
// C documentation
//
// /*
// ** Compute an 8-bit hash on a string that is insensitive to case differences
// */
func _sqlite3StrIHash(tls *libc.TLS, z uintptr) (r Tu8) {
var h Tu8
_ = h
h = uint8(0)
if z == uintptr(0) {
return uint8(0)
}
for *(*int8)(unsafe.Pointer(z)) != 0 {
h = Tu8(int32(h) + int32(_sqlite3UpperToLower[uint8(*(*int8)(unsafe.Pointer(z)))]))
z++
}
return h
}
// C documentation
//
// /* Double-Double multiplication. (x[0],x[1]) *= (y,yy)
// **
// ** Reference:
// ** T. J. Dekker, "A Floating-Point Technique for Extending the
// ** Available Precision". 1971-07-26.
// */
func _dekkerMul2(tls *libc.TLS, x uintptr, _y float64, yy float64) {
bp := tls.Alloc(32)
defer tls.Free(32)
*(*float64)(unsafe.Pointer(bp)) = _y
var c, cc, p, q, tx, ty float64
var _ /* hx at bp+8 */ float64
var _ /* hy at bp+16 */ float64
var _ /* m at bp+24 */ Tu64
_, _, _, _, _, _ = c, cc, p, q, tx, ty
libc.Xmemcpy(tls, bp+24, x, uint64(8))
*(*Tu64)(unsafe.Pointer(bp + 24)) &= uint64(0xfffffffffc000000)
libc.Xmemcpy(tls, bp+8, bp+24, uint64(8))
tx = libc.AtomicLoadPFloat64(x) - *(*float64)(unsafe.Pointer(bp + 8))
libc.Xmemcpy(tls, bp+24, bp, uint64(8))
*(*Tu64)(unsafe.Pointer(bp + 24)) &= uint64(0xfffffffffc000000)
libc.Xmemcpy(tls, bp+16, bp+24, uint64(8))
ty = *(*float64)(unsafe.Pointer(bp)) - *(*float64)(unsafe.Pointer(bp + 16))
p = *(*float64)(unsafe.Pointer(bp + 8)) * *(*float64)(unsafe.Pointer(bp + 16))
q = *(*float64)(unsafe.Pointer(bp + 8))*ty + tx**(*float64)(unsafe.Pointer(bp + 16))
c = p + q
cc = p - c + q + tx*ty
cc = libc.AtomicLoadPFloat64(x)*yy + libc.AtomicLoadPFloat64(x+1*8)**(*float64)(unsafe.Pointer(bp)) + cc
libc.AtomicStorePFloat64(x, c+cc)
libc.AtomicStorePFloat64(x+1*8, c-libc.AtomicLoadPFloat64(x))
*(*float64)(unsafe.Pointer(x + 1*8)) += cc
}
// C documentation
//
// /*
// ** The string z[] is an text representation of a real number.
// ** Convert this string to a double and write it into *pResult.
// **
// ** The string z[] is length bytes in length (bytes, not characters) and
// ** uses the encoding enc. The string is not necessarily zero-terminated.
// **
// ** Return TRUE if the result is a valid real number (or integer) and FALSE
// ** if the string is empty or contains extraneous text. More specifically
// ** return
// ** 1 => The input string is a pure integer
// ** 2 or more => The input has a decimal point or eNNN clause
// ** 0 or less => The input string is not a valid number
// ** -1 => Not a valid number, but has a valid prefix which
// ** includes a decimal point and/or an eNNN clause
// **
// ** Valid numbers are in one of these formats:
// **
// ** [+-]digits[E[+-]digits]
// ** [+-]digits.[digits][E[+-]digits]
// ** [+-].digits[E[+-]digits]
// **
// ** Leading and trailing whitespace is ignored for the purpose of determining
// ** validity.
// **
// ** If some prefix of the input string is a valid number, this routine
// ** returns FALSE but it still converts the prefix and writes the result
// ** into *pResult.
// */
func _sqlite3AtoF(tls *libc.TLS, z uintptr, pResult uintptr, length int32, enc Tu8) (r1 int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var d, e, eType, eValid, esign, i, incr, nDigit, sign, v2 int32
var r, v3, v4 float64
var s, s2 Tu64
var zEnd uintptr
var _ /* rr at bp+0 */ [2]float64
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = d, e, eType, eValid, esign, i, incr, nDigit, r, s, s2, sign, zEnd, v2, v3, v4
/* sign * significand * (10 ^ (esign * exponent)) */
sign = int32(1) /* sign of significand */
s = uint64(0) /* significand */
d = 0 /* adjust exponent for shifting decimal point */
esign = int32(1) /* sign of exponent */
e = 0 /* exponent */
eValid = int32(1) /* True exponent is either not used or is well-formed */
nDigit = 0 /* Number of digits processed */
eType = int32(1) /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */
*(*float64)(unsafe.Pointer(pResult)) = float64(0) /* Default return value, in case of an error */
if length == 0 {
return 0
}
if int32(enc) == int32(SQLITE_UTF8) {
incr = int32(1)
zEnd = z + uintptr(length)
} else {
incr = int32(2)
length &= ^libc.Int32FromInt32(1)
i = int32(3) - int32(enc)
for {
if !(i < length && int32(*(*int8)(unsafe.Pointer(z + uintptr(i)))) == 0) {
break
}
goto _1
_1:
;
i += int32(2)
}
if i < length {
eType = -int32(100)
}
zEnd = z + uintptr(i^int32(1))
z += uintptr(int32(enc) & libc.Int32FromInt32(1))
}
/* skip leading spaces */
for z < zEnd && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x01) != 0 {
z += uintptr(incr)
}
if z >= zEnd {
return 0
}
/* get sign of significand */
if int32(*(*int8)(unsafe.Pointer(z))) == int32('-') {
sign = -int32(1)
z += uintptr(incr)
} else {
if int32(*(*int8)(unsafe.Pointer(z))) == int32('+') {
z += uintptr(incr)
}
}
/* copy max significant digits to significand */
for z < zEnd && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x04) != 0 {
s = s*uint64(10) + uint64(int32(*(*int8)(unsafe.Pointer(z)))-libc.Int32FromUint8('0'))
z += uintptr(incr)
nDigit++
if s >= (libc.Uint64FromUint32(0xffffffff)|libc.Uint64FromUint32(0xffffffff)<= zEnd {
goto do_atof_calc
}
/* if decimal point is present */
if int32(*(*int8)(unsafe.Pointer(z))) == int32('.') {
z += uintptr(incr)
eType++
/* copy digits from after decimal to significand
** (decrease exponent by d to shift decimal right) */
for z < zEnd && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x04) != 0 {
if s < (libc.Uint64FromUint32(0xffffffff)|libc.Uint64FromUint32(0xffffffff)<= zEnd {
goto do_atof_calc
}
/* if exponent is present */
if int32(*(*int8)(unsafe.Pointer(z))) == int32('e') || int32(*(*int8)(unsafe.Pointer(z))) == int32('E') {
z += uintptr(incr)
eValid = 0
eType++
/* This branch is needed to avoid a (harmless) buffer overread. The
** special comment alerts the mutation tester that the correct answer
** is obtained even if the branch is omitted */
if z >= zEnd {
goto do_atof_calc
} /*PREVENTS-HARMLESS-OVERREAD*/
/* get sign of exponent */
if int32(*(*int8)(unsafe.Pointer(z))) == int32('-') {
esign = -int32(1)
z += uintptr(incr)
} else {
if int32(*(*int8)(unsafe.Pointer(z))) == int32('+') {
z += uintptr(incr)
}
}
/* copy digits to exponent */
for z < zEnd && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x04) != 0 {
if e < int32(10000) {
v2 = e*int32(10) + (int32(*(*int8)(unsafe.Pointer(z))) - int32('0'))
} else {
v2 = int32(10000)
}
e = v2
z += uintptr(incr)
eValid = int32(1)
}
}
/* skip trailing spaces */
for z < zEnd && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z)))])&int32(0x01) != 0 {
z += uintptr(incr)
}
goto do_atof_calc
do_atof_calc:
;
/* Zero is a special case */
if s == uint64(0) {
if sign < 0 {
v3 = -libc.Float64FromFloat64(0)
} else {
v3 = +libc.Float64FromFloat64(0)
}
*(*float64)(unsafe.Pointer(pResult)) = v3
goto atof_return
}
/* adjust exponent by d, and update sign */
e = e*esign + d
/* Try to adjust the exponent to make it smaller */
for e > 0 && s < (libc.Uint64FromUint32(0xffffffff)|libc.Uint64FromUint32(0xffffffff)< 0 {
for e >= int32(100) {
e -= int32(100)
r = r * libc.Float64FromFloat64(1e+100)
}
for e >= int32(10) {
e -= int32(10)
r = r * libc.Float64FromFloat64(1e+10)
}
for e >= int32(1) {
e -= int32(1)
r = r * libc.Float64FromFloat64(10)
}
} else {
for e <= -int32(100) {
e += int32(100)
r = r * libc.Float64FromFloat64(1e-100)
}
for e <= -int32(10) {
e += int32(10)
r = r * libc.Float64FromFloat64(1e-10)
}
for e <= -int32(1) {
e += int32(1)
r = r * libc.Float64FromFloat64(0.1)
}
}
if r > +libc.Float64FromFloat64(1.7976931348623157081452742373e+308) {
*(*float64)(unsafe.Pointer(pResult)) = float64(+libc.X__builtin_inff(tls))
} else {
*(*float64)(unsafe.Pointer(pResult)) = r
}
} else {
(*(*[2]float64)(unsafe.Pointer(bp)))[0] = float64(s)
s2 = uint64((*(*[2]float64)(unsafe.Pointer(bp)))[0])
if s >= s2 {
v4 = float64(s - s2)
} else {
v4 = -float64(s2 - s)
}
(*(*[2]float64)(unsafe.Pointer(bp)))[int32(1)] = v4
if e > 0 {
for e >= int32(100) {
e -= int32(100)
_dekkerMul2(tls, bp, float64(1e+100), -libc.Float64FromFloat64(1.5902891109759918e+83))
}
for e >= int32(10) {
e -= int32(10)
_dekkerMul2(tls, bp, float64(1e+10), float64(0))
}
for e >= int32(1) {
e -= int32(1)
_dekkerMul2(tls, bp, float64(10), float64(0))
}
} else {
for e <= -int32(100) {
e += int32(100)
_dekkerMul2(tls, bp, float64(1e-100), -libc.Float64FromFloat64(1.9991899802602883e-117))
}
for e <= -int32(10) {
e += int32(10)
_dekkerMul2(tls, bp, float64(1e-10), -libc.Float64FromFloat64(3.643219731549774e-27))
}
for e <= -int32(1) {
e += int32(1)
_dekkerMul2(tls, bp, float64(0.1), -libc.Float64FromFloat64(5.551115123125783e-18))
}
}
*(*float64)(unsafe.Pointer(pResult)) = (*(*[2]float64)(unsafe.Pointer(bp)))[0] + (*(*[2]float64)(unsafe.Pointer(bp)))[int32(1)]
if _sqlite3IsNaN(tls, *(*float64)(unsafe.Pointer(pResult))) != 0 {
*(*float64)(unsafe.Pointer(pResult)) = libc.Float64FromFloat64(1e+300) * libc.Float64FromFloat64(1e+300)
}
}
}
if sign < 0 {
*(*float64)(unsafe.Pointer(pResult)) = -*(*float64)(unsafe.Pointer(pResult))
}
goto atof_return
atof_return:
;
/* return true if number and no extra non-whitespace characters after */
if z == zEnd && nDigit > 0 && eValid != 0 && eType > 0 {
return eType
} else {
if eType >= int32(2) && (eType == int32(3) || eValid != 0) && nDigit > 0 {
return -int32(1)
} else {
return 0
}
}
return r1
}
// C documentation
//
// /*
// ** Render an signed 64-bit integer as text. Store the result in zOut[] and
// ** return the length of the string that was stored, in bytes. The value
// ** returned does not include the zero terminator at the end of the output
// ** string.
// **
// ** The caller must ensure that zOut[] is at least 21 bytes in size.
// */
func _sqlite3Int64ToText(tls *libc.TLS, v Ti64, zOut uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var i, v2 int32
var x Tu64
var v1 uint64
var _ /* zTemp at bp+0 */ [22]int8
_, _, _, _ = i, x, v1, v2
if v < 0 {
if v == int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= int32('0') && c <= int32('9')) {
break
}
u = u*uint64(10) + uint64(c) - uint64('0')
goto _2
_2:
;
i += incr
}
if u > uint64(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)< int32(19)*incr {
v6 = int32(1)
} else {
v6 = _compare2pow63(tls, zNum, incr)
}
c = v6
if c < 0 {
/* zNum is less than 9223372036854775808 so it fits */
return rc
} else {
if neg != 0 {
v7 = int64(-libc.Int32FromInt32(1)) - (libc.Int64FromUint32(0xffffffff) | libc.Int64FromInt32(0x7fffffff)< 0 {
/* zNum is greater than 9223372036854775808 so it overflows */
return int32(2)
} else {
/* zNum is exactly 9223372036854775808. Fits if negative. The
** special case 2 overflow if positive */
if neg != 0 {
v8 = rc
} else {
v8 = int32(3)
}
return v8
}
}
}
return r
}
// C documentation
//
// /*
// ** Transform a UTF-8 integer literal, in either decimal or hexadecimal,
// ** into a 64-bit signed integer. This routine accepts hexadecimal literals,
// ** whereas sqlite3Atoi64() does not.
// **
// ** Returns:
// **
// ** 0 Successful transformation. Fits in a 64-bit signed integer.
// ** 1 Excess text after the integer value
// ** 2 Integer too large for a 64-bit signed integer or is malformed
// ** 3 Special case of 9223372036854775808
// */
func _sqlite3DecOrHexToI64(tls *libc.TLS, z uintptr, pOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, k, n int32
var _ /* u at bp+0 */ Tu64
_, _, _ = i, k, n
if int32(*(*int8)(unsafe.Pointer(z))) == int32('0') && (int32(*(*int8)(unsafe.Pointer(z + 1))) == int32('x') || int32(*(*int8)(unsafe.Pointer(z + 1))) == int32('X')) {
*(*Tu64)(unsafe.Pointer(bp)) = uint64(0)
i = int32(2)
for {
if !(int32(*(*int8)(unsafe.Pointer(z + uintptr(i)))) == int32('0')) {
break
}
goto _1
_1:
;
i++
}
k = i
for {
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(k))))])&int32(0x08) != 0) {
break
}
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))*uint64(16) + uint64(_sqlite3HexToInt(tls, int32(*(*int8)(unsafe.Pointer(z + uintptr(k))))))
goto _2
_2:
;
k++
}
libc.Xmemcpy(tls, pOut, bp, uint64(8))
if k-i > int32(16) {
return int32(2)
}
if int32(*(*int8)(unsafe.Pointer(z + uintptr(k)))) != 0 {
return int32(1)
}
return 0
} else {
n = int32(libc.Uint64FromInt32(0x3fffffff) & libc.Xstrspn(tls, z, __ccgo_ts+1708))
if *(*int8)(unsafe.Pointer(z + uintptr(n))) != 0 {
n++
}
return _sqlite3Atoi64(tls, z, pOut, n, uint8(SQLITE_UTF8))
}
return r
}
// C documentation
//
// /*
// ** If zNum represents an integer that will fit in 32-bits, then set
// ** *pValue to that integer and return true. Otherwise return false.
// **
// ** This routine accepts both decimal and hexadecimal notation for integers.
// **
// ** Any non-numeric characters that following zNum are ignored.
// ** This is different from sqlite3Atoi64() which requires the
// ** input number to be zero-terminated.
// */
func _sqlite3GetInt32(tls *libc.TLS, zNum uintptr, pValue uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var c, i, neg, v3 int32
var v Tsqlite_int64
var v4 bool
var _ /* u at bp+0 */ Tu32
_, _, _, _, _, _ = c, i, neg, v, v3, v4
v = 0
neg = 0
if int32(*(*int8)(unsafe.Pointer(zNum))) == int32('-') {
neg = int32(1)
zNum++
} else {
if int32(*(*int8)(unsafe.Pointer(zNum))) == int32('+') {
zNum++
} else {
if int32(*(*int8)(unsafe.Pointer(zNum))) == int32('0') && (int32(*(*int8)(unsafe.Pointer(zNum + 1))) == int32('x') || int32(*(*int8)(unsafe.Pointer(zNum + 1))) == int32('X')) && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zNum + 2)))])&int32(0x08) != 0 {
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0)
zNum += uintptr(2)
for int32(*(*int8)(unsafe.Pointer(zNum))) == int32('0') {
zNum++
}
i = 0
for {
if !(i < int32(8) && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zNum + uintptr(i))))])&int32(0x08) != 0) {
break
}
*(*Tu32)(unsafe.Pointer(bp)) = *(*Tu32)(unsafe.Pointer(bp))*uint32(16) + uint32(_sqlite3HexToInt(tls, int32(*(*int8)(unsafe.Pointer(zNum + uintptr(i))))))
goto _1
_1:
;
i++
}
if *(*Tu32)(unsafe.Pointer(bp))&uint32(0x80000000) == uint32(0) && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zNum + uintptr(i))))])&int32(0x08) == 0 {
libc.Xmemcpy(tls, pValue, bp, uint64(4))
return int32(1)
} else {
return 0
}
}
}
}
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(zNum)))])&libc.Int32FromInt32(0x04) != 0) {
return 0
}
for int32(*(*int8)(unsafe.Pointer(zNum))) == int32('0') {
zNum++
}
i = 0
for {
if v4 = i < int32(11); v4 {
v3 = int32(*(*int8)(unsafe.Pointer(zNum + uintptr(i)))) - libc.Int32FromUint8('0')
c = v3
}
if !(v4 && v3 >= 0 && c <= int32(9)) {
break
}
v = v*int64(10) + int64(c)
goto _2
_2:
;
i++
}
/* The longest decimal representation of a 32 bit integer is 10 digits:
**
** 1234567890
** 2^31 -> 2147483648
*/
if i > int32(10) {
return 0
}
if v-int64(neg) > int64(2147483647) {
return 0
}
if neg != 0 {
v = -v
}
*(*int32)(unsafe.Pointer(pValue)) = int32(v)
return int32(1)
}
// C documentation
//
// /*
// ** Return a 32-bit integer value extracted from a string. If the
// ** string is not an integer, just return 0.
// */
func _sqlite3Atoi(tls *libc.TLS, z uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* x at bp+0 */ int32
*(*int32)(unsafe.Pointer(bp)) = 0
_sqlite3GetInt32(tls, z, bp)
return *(*int32)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Decode a floating-point value into an approximate decimal
// ** representation.
// **
// ** Round the decimal representation to n significant digits if
// ** n is positive. Or round to -n signficant digits after the
// ** decimal point if n is negative. No rounding is performed if
// ** n is zero.
// **
// ** The significant digits of the decimal representation are
// ** stored in p->z[] which is a often (but not always) a pointer
// ** into the middle of p->zBuf[]. There are p->n significant digits.
// ** The p->z[] array is *not* zero-terminated.
// */
func _sqlite3FpDecode(tls *libc.TLS, p uintptr, _r float64, iRound int32, mxRound int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
*(*float64)(unsafe.Pointer(bp)) = _r
var e, exp, i, j, v2, v3, v4 int32
var rr float64
var z uintptr
var v1 uint64
var _ /* rr at bp+16 */ [2]float64
var _ /* v at bp+8 */ Tu64
_, _, _, _, _, _, _, _, _, _ = e, exp, i, j, rr, z, v1, v2, v3, v4
exp = 0
(*TFpDecode)(unsafe.Pointer(p)).FisSpecial = 0
(*TFpDecode)(unsafe.Pointer(p)).Fz = p + 24
/* Convert negative numbers to positive. Deal with Infinity, 0.0, and
** NaN. */
if *(*float64)(unsafe.Pointer(bp)) < float64(0) {
(*TFpDecode)(unsafe.Pointer(p)).Fsign = int8('-')
*(*float64)(unsafe.Pointer(bp)) = -*(*float64)(unsafe.Pointer(bp))
} else {
if *(*float64)(unsafe.Pointer(bp)) == float64(0) {
(*TFpDecode)(unsafe.Pointer(p)).Fsign = int8('+')
(*TFpDecode)(unsafe.Pointer(p)).Fn = int32(1)
(*TFpDecode)(unsafe.Pointer(p)).FiDP = int32(1)
(*TFpDecode)(unsafe.Pointer(p)).Fz = __ccgo_ts + 1724
return
} else {
(*TFpDecode)(unsafe.Pointer(p)).Fsign = int8('+')
}
}
libc.Xmemcpy(tls, bp+8, bp, uint64(8))
e = int32(*(*Tu64)(unsafe.Pointer(bp + 8)) >> int32(52))
if e&int32(0x7ff) == int32(0x7ff) {
(*TFpDecode)(unsafe.Pointer(p)).FisSpecial = int8(int32(1) + libc.BoolInt32(*(*Tu64)(unsafe.Pointer(bp + 8)) != uint64(0x7ff0000000000000)))
(*TFpDecode)(unsafe.Pointer(p)).Fn = 0
(*TFpDecode)(unsafe.Pointer(p)).FiDP = 0
return
}
/* Multiply r by powers of ten until it lands somewhere in between
** 1.0e+19 and 1.0e+17.
*/
if _sqlite3Config.FbUseLongDouble != 0 {
rr = *(*float64)(unsafe.Pointer(bp))
if rr >= float64(1e+19) {
for rr >= libc.Float64FromFloat64(1e+119) {
exp += int32(100)
rr = rr * libc.Float64FromFloat64(1e-100)
}
for rr >= libc.Float64FromFloat64(1e+29) {
exp += int32(10)
rr = rr * libc.Float64FromFloat64(1e-10)
}
for rr >= libc.Float64FromFloat64(1e+19) {
exp++
rr = rr * libc.Float64FromFloat64(0.1)
}
} else {
for rr < libc.Float64FromFloat64(1e-97) {
exp -= int32(100)
rr = rr * libc.Float64FromFloat64(1e+100)
}
for rr < libc.Float64FromFloat64(1e+07) {
exp -= int32(10)
rr = rr * libc.Float64FromFloat64(1e+10)
}
for rr < libc.Float64FromFloat64(1e+17) {
exp--
rr = rr * libc.Float64FromFloat64(10)
}
}
*(*Tu64)(unsafe.Pointer(bp + 8)) = uint64(rr)
} else {
(*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] = *(*float64)(unsafe.Pointer(bp))
(*(*[2]float64)(unsafe.Pointer(bp + 16)))[int32(1)] = float64(0)
if (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] > float64(9.223372036854775e+18) {
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] > float64(9.223372036854774e+118) {
exp += int32(100)
_dekkerMul2(tls, bp+16, float64(1e-100), -libc.Float64FromFloat64(1.9991899802602883e-117))
}
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] > float64(9.223372036854774e+28) {
exp += int32(10)
_dekkerMul2(tls, bp+16, float64(1e-10), -libc.Float64FromFloat64(3.643219731549774e-27))
}
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] > float64(9.223372036854775e+18) {
exp += int32(1)
_dekkerMul2(tls, bp+16, float64(0.1), -libc.Float64FromFloat64(5.551115123125783e-18))
}
} else {
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] < float64(9.223372036854775e-83) {
exp -= int32(100)
_dekkerMul2(tls, bp+16, float64(1e+100), -libc.Float64FromFloat64(1.5902891109759918e+83))
}
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] < float64(9.223372036854775e+07) {
exp -= int32(10)
_dekkerMul2(tls, bp+16, float64(1e+10), float64(0))
}
for (*(*[2]float64)(unsafe.Pointer(bp + 16)))[0] < float64(9.223372036854775e+17) {
exp -= int32(1)
_dekkerMul2(tls, bp+16, float64(10), float64(0))
}
}
if (*(*[2]float64)(unsafe.Pointer(bp + 16)))[int32(1)] < float64(0) {
v1 = uint64((*(*[2]float64)(unsafe.Pointer(bp + 16)))[0]) - uint64(-(*(*[2]float64)(unsafe.Pointer(bp + 16)))[int32(1)])
} else {
v1 = uint64((*(*[2]float64)(unsafe.Pointer(bp + 16)))[0]) + uint64((*(*[2]float64)(unsafe.Pointer(bp + 16)))[int32(1)])
}
*(*Tu64)(unsafe.Pointer(bp + 8)) = v1
}
/* Extract significant digits. */
i = int32(libc.Uint64FromInt64(24) - libc.Uint64FromInt32(1))
for *(*Tu64)(unsafe.Pointer(bp + 8)) != 0 {
v2 = i
i--
*(*int8)(unsafe.Pointer(p + 24 + uintptr(v2))) = int8(*(*Tu64)(unsafe.Pointer(bp + 8))%uint64(10) + uint64('0'))
*(*Tu64)(unsafe.Pointer(bp + 8)) /= uint64(10)
}
(*TFpDecode)(unsafe.Pointer(p)).Fn = int32(libc.Uint64FromInt64(24) - libc.Uint64FromInt32(1) - uint64(i))
(*TFpDecode)(unsafe.Pointer(p)).FiDP = (*TFpDecode)(unsafe.Pointer(p)).Fn + exp
if iRound <= 0 {
iRound = (*TFpDecode)(unsafe.Pointer(p)).FiDP - iRound
if iRound == 0 && int32(*(*int8)(unsafe.Pointer(p + 24 + uintptr(i+int32(1))))) >= int32('5') {
iRound = int32(1)
v3 = i
i--
*(*int8)(unsafe.Pointer(p + 24 + uintptr(v3))) = int8('0')
(*TFpDecode)(unsafe.Pointer(p)).Fn++
(*TFpDecode)(unsafe.Pointer(p)).FiDP++
}
}
if iRound > 0 && (iRound < (*TFpDecode)(unsafe.Pointer(p)).Fn || (*TFpDecode)(unsafe.Pointer(p)).Fn > mxRound) {
z = p + 24 + uintptr(i+int32(1))
if iRound > mxRound {
iRound = mxRound
}
(*TFpDecode)(unsafe.Pointer(p)).Fn = iRound
if int32(*(*int8)(unsafe.Pointer(z + uintptr(iRound)))) >= int32('5') {
j = iRound - int32(1)
for int32(1) != 0 {
*(*int8)(unsafe.Pointer(z + uintptr(j)))++
if int32(*(*int8)(unsafe.Pointer(z + uintptr(j)))) <= int32('9') {
break
}
*(*int8)(unsafe.Pointer(z + uintptr(j))) = int8('0')
if j == 0 {
v4 = i
i--
*(*int8)(unsafe.Pointer((*TFpDecode)(unsafe.Pointer(p)).Fz + uintptr(v4))) = int8('1')
(*TFpDecode)(unsafe.Pointer(p)).Fn++
(*TFpDecode)(unsafe.Pointer(p)).FiDP++
break
} else {
j--
}
}
}
}
(*TFpDecode)(unsafe.Pointer(p)).Fz = p + 24 + uintptr(i+int32(1))
for (*TFpDecode)(unsafe.Pointer(p)).Fn > 0 && int32(*(*int8)(unsafe.Pointer((*TFpDecode)(unsafe.Pointer(p)).Fz + uintptr((*TFpDecode)(unsafe.Pointer(p)).Fn-int32(1))))) == int32('0') {
(*TFpDecode)(unsafe.Pointer(p)).Fn--
}
}
// C documentation
//
// /*
// ** Try to convert z into an unsigned 32-bit integer. Return true on
// ** success and false if there is an error.
// **
// ** Only decimal notation is accepted.
// */
func _sqlite3GetUInt32(tls *libc.TLS, z uintptr, pI uintptr) (r int32) {
var i int32
var v Tu64
_, _ = i, v
v = uint64(0)
i = 0
for {
if !(int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(z + uintptr(i))))])&int32(0x04) != 0) {
break
}
v = v*uint64(10) + uint64(*(*int8)(unsafe.Pointer(z + uintptr(i)))) - uint64('0')
if v > uint64(4294967296) {
*(*Tu32)(unsafe.Pointer(pI)) = uint32(0)
return 0
}
goto _1
_1:
;
i++
}
if i == 0 || int32(*(*int8)(unsafe.Pointer(z + uintptr(i)))) != 0 {
*(*Tu32)(unsafe.Pointer(pI)) = uint32(0)
return 0
}
*(*Tu32)(unsafe.Pointer(pI)) = uint32(v)
return int32(1)
}
/*
** The variable-length integer encoding is as follows:
**
** KEY:
** A = 0xxxxxxx 7 bits of data and one flag bit
** B = 1xxxxxxx 7 bits of data and one flag bit
** C = xxxxxxxx 8 bits of data
**
** 7 bits - A
** 14 bits - BA
** 21 bits - BBA
** 28 bits - BBBA
** 35 bits - BBBBA
** 42 bits - BBBBBA
** 49 bits - BBBBBBA
** 56 bits - BBBBBBBA
** 64 bits - BBBBBBBBC
*/
// C documentation
//
// /*
// ** Write a 64-bit variable-length integer to memory starting at p[0].
// ** The length of data write will be between 1 and 9 bytes. The number
// ** of bytes written is returned.
// **
// ** A variable-length integer consists of the lower 7 bits of each byte
// ** for all bytes that have the 8th bit set and one byte with the 8th
// ** bit clear. Except, if we get to the 9th byte, it stores the full
// ** 8 bits and is the last byte.
// */
func _putVarint64(tls *libc.TLS, p uintptr, v Tu64) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, j, n, v2 int32
var p3 uintptr
var _ /* buf at bp+0 */ [10]Tu8
_, _, _, _, _ = i, j, n, v2, p3
if v&(libc.Uint64FromUint32(0xff000000)<>= uint64(8)
i = int32(7)
for {
if !(i >= 0) {
break
}
*(*uint8)(unsafe.Pointer(p + uintptr(i))) = uint8(v&libc.Uint64FromInt32(0x7f) | libc.Uint64FromInt32(0x80))
v >>= uint64(7)
goto _1
_1:
;
i--
}
return int32(9)
}
n = 0
for cond := true; cond; cond = v != uint64(0) {
v2 = n
n++
(*(*[10]Tu8)(unsafe.Pointer(bp)))[v2] = uint8(v&libc.Uint64FromInt32(0x7f) | libc.Uint64FromInt32(0x80))
v >>= uint64(7)
}
p3 = bp
*(*Tu8)(unsafe.Pointer(p3)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p3))) & libc.Int32FromInt32(0x7f))
i = 0
j = n - libc.Int32FromInt32(1)
for {
if !(j >= 0) {
break
}
*(*uint8)(unsafe.Pointer(p + uintptr(i))) = (*(*[10]Tu8)(unsafe.Pointer(bp)))[j]
goto _4
_4:
;
j--
i++
}
return n
}
func _sqlite3PutVarint(tls *libc.TLS, p uintptr, v Tu64) (r int32) {
if v <= uint64(0x7f) {
*(*uint8)(unsafe.Pointer(p)) = uint8(v & uint64(0x7f))
return int32(1)
}
if v <= uint64(0x3fff) {
*(*uint8)(unsafe.Pointer(p)) = uint8(v>>libc.Int32FromInt32(7)&uint64(0x7f) | uint64(0x80))
*(*uint8)(unsafe.Pointer(p + 1)) = uint8(v & uint64(0x7f))
return int32(2)
}
return _putVarint64(tls, p, v)
}
/*
** Bitmasks used by sqlite3GetVarint(). These precomputed constants
** are defined here rather than simply putting the constant expressions
** inline in order to work around bugs in the RVT compiler.
**
** SLOT_2_0 A mask for (0x7f<<14) | 0x7f
**
** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0
*/
// C documentation
//
// /*
// ** Read a 64-bit variable-length integer from memory starting at p[0].
// ** Return the number of bytes read. The value is stored in *v.
// */
func _sqlite3GetVarint(tls *libc.TLS, p uintptr, v uintptr) (r Tu8) {
var a, b, s Tu32
_, _, _ = a, b, s
if int32(*(*int8)(unsafe.Pointer(p))) >= 0 {
*(*Tu64)(unsafe.Pointer(v)) = uint64(*(*uint8)(unsafe.Pointer(p)))
return uint8(1)
}
if int32(*(*int8)(unsafe.Pointer(p + 1))) >= 0 {
*(*Tu64)(unsafe.Pointer(v)) = uint64(uint32(int32(*(*uint8)(unsafe.Pointer(p)))&libc.Int32FromInt32(0x7f))<> int32(18)
*(*Tu64)(unsafe.Pointer(v)) = uint64(s)<> int32(18)
*(*Tu64)(unsafe.Pointer(v)) = uint64(s)<> int32(11)
*(*Tu64)(unsafe.Pointer(v)) = uint64(s)<> int32(4)
*(*Tu64)(unsafe.Pointer(v)) = uint64(s)<> int32(3)
s |= b
*(*Tu64)(unsafe.Pointer(v)) = uint64(s)<>= uint64(7)
if !(v != uint64(0)) {
break
}
goto _1
_1:
;
i++
}
return i
}
// C documentation
//
// /*
// ** Read or write a four-byte big-endian integer value.
// */
func _sqlite3Get4byte(tls *libc.TLS, p uintptr) (r Tu32) {
return uint32(*(*Tu8)(unsafe.Pointer(p)))<> libc.Int32FromInt32(24))
*(*uint8)(unsafe.Pointer(p + 1)) = uint8(v >> libc.Int32FromInt32(16))
*(*uint8)(unsafe.Pointer(p + 2)) = uint8(v >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(p + 3)) = uint8(v)
}
// C documentation
//
// /*
// ** Translate a single byte of Hex into an integer.
// ** This routine only works if h really is a valid hexadecimal
// ** character: 0..9a..fA..F
// */
func _sqlite3HexToInt(tls *libc.TLS, h int32) (r Tu8) {
h += int32(9) * (int32(1) & (h >> int32(6)))
return uint8(h & libc.Int32FromInt32(0xf))
}
// C documentation
//
// /*
// ** Convert a BLOB literal of the form "x'hhhhhh'" into its binary
// ** value. Return a pointer to its binary value. Space to hold the
// ** binary value has been obtained from malloc and must be freed by
// ** the calling routine.
// */
func _sqlite3HexToBlob(tls *libc.TLS, db uintptr, z uintptr, n int32) (r uintptr) {
var i int32
var zBlob uintptr
_, _ = i, zBlob
zBlob = _sqlite3DbMallocRawNN(tls, db, uint64(n/int32(2)+int32(1)))
n--
if zBlob != 0 {
i = 0
for {
if !(i < n) {
break
}
*(*int8)(unsafe.Pointer(zBlob + uintptr(i/int32(2)))) = int8(int32(_sqlite3HexToInt(tls, int32(*(*int8)(unsafe.Pointer(z + uintptr(i))))))<= 0 {
if iA > 0 && libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)< iB+int64(1) {
return int32(1)
}
}
*(*Ti64)(unsafe.Pointer(pA)) += iB
return 0
}
func _sqlite3SubInt64(tls *libc.TLS, pA uintptr, iB Ti64) (r int32) {
if iB == int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= 0 {
return int32(1)
}
*(*Ti64)(unsafe.Pointer(pA)) -= iB
return 0
} else {
return _sqlite3AddInt64(tls, pA, -iB)
}
return r
}
func _sqlite3MulInt64(tls *libc.TLS, pA uintptr, iB Ti64) (r int32) {
var iA Ti64
_ = iA
iA = *(*Ti64)(unsafe.Pointer(pA))
if iB > 0 {
if iA > (libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)< 0 {
if iB < (int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)< (libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= 0 {
return x
}
if x == libc.Int32FromUint32(0x80000000) {
return int32(0x7fffffff)
}
return -x
}
// C documentation
//
// /*
// ** Find (an approximate) sum of two LogEst values. This computation is
// ** not a simple "+" operator because LogEst is stored as a logarithmic
// ** value.
// **
// */
func _sqlite3LogEstAdd(tls *libc.TLS, a TLogEst, b TLogEst) (r TLogEst) {
if int32(a) >= int32(b) {
if int32(a) > int32(b)+int32(49) {
return a
}
if int32(a) > int32(b)+int32(31) {
return int16(int32(a) + int32(1))
}
return int16(int32(a) + int32(_x[int32(a)-int32(b)]))
} else {
if int32(b) > int32(a)+int32(49) {
return b
}
if int32(b) > int32(a)+int32(31) {
return int16(int32(b) + int32(1))
}
return int16(int32(b) + int32(_x[int32(b)-int32(a)]))
}
return r
}
var _x = [32]uint8{
0: uint8(10),
1: uint8(10),
2: uint8(9),
3: uint8(9),
4: uint8(8),
5: uint8(8),
6: uint8(7),
7: uint8(7),
8: uint8(7),
9: uint8(6),
10: uint8(6),
11: uint8(6),
12: uint8(5),
13: uint8(5),
14: uint8(5),
15: uint8(4),
16: uint8(4),
17: uint8(4),
18: uint8(4),
19: uint8(3),
20: uint8(3),
21: uint8(3),
22: uint8(3),
23: uint8(3),
24: uint8(3),
25: uint8(2),
26: uint8(2),
27: uint8(2),
28: uint8(2),
29: uint8(2),
30: uint8(2),
31: uint8(2),
}
// C documentation
//
// /*
// ** Convert an integer into a LogEst. In other words, compute an
// ** approximation for 10*log2(x).
// */
func _sqlite3LogEst(tls *libc.TLS, x Tu64) (r TLogEst) {
var y TLogEst
_ = y
y = int16(40)
if x < uint64(8) {
if x < uint64(2) {
return 0
}
for x < uint64(8) {
y = TLogEst(int32(y) - libc.Int32FromInt32(10))
x <<= uint64(1)
}
} else {
for x > uint64(255) {
y = TLogEst(int32(y) + libc.Int32FromInt32(40))
x >>= uint64(4)
} /*OPTIMIZATION-IF-TRUE*/
for x > uint64(15) {
y = TLogEst(int32(y) + libc.Int32FromInt32(10))
x >>= uint64(1)
}
}
return int16(int32(_a[x&uint64(7)]) + int32(y) - int32(10))
}
var _a = [8]TLogEst{
1: int16(2),
2: int16(3),
3: int16(5),
4: int16(6),
5: int16(7),
6: int16(8),
7: int16(9),
}
// C documentation
//
// /*
// ** Convert a double into a LogEst
// ** In other words, compute an approximation for 10*log2(x).
// */
func _sqlite3LogEstFromDouble(tls *libc.TLS, _x float64) (r TLogEst) {
bp := tls.Alloc(16)
defer tls.Free(16)
*(*float64)(unsafe.Pointer(bp)) = _x
var e TLogEst
var _ /* a at bp+8 */ Tu64
_ = e
if *(*float64)(unsafe.Pointer(bp)) <= libc.Float64FromInt32(1) {
return 0
}
if *(*float64)(unsafe.Pointer(bp)) <= libc.Float64FromInt32(2000000000) {
return _sqlite3LogEst(tls, uint64(*(*float64)(unsafe.Pointer(bp))))
}
libc.Xmemcpy(tls, bp+8, bp, uint64(8))
e = int16(*(*Tu64)(unsafe.Pointer(bp + 8))>>libc.Int32FromInt32(52) - uint64(1022))
return int16(int32(e) * int32(10))
}
// C documentation
//
// /*
// ** Convert a LogEst into an integer.
// */
func _sqlite3LogEstToInt(tls *libc.TLS, x TLogEst) (r Tu64) {
var n Tu64
var v1 uint64
_, _ = n, v1
n = uint64(int32(x) % int32(10))
x = TLogEst(int32(x) / libc.Int32FromInt32(10))
if n >= uint64(5) {
n -= uint64(2)
} else {
if n >= uint64(1) {
n -= uint64(1)
}
}
if int32(x) > int32(60) {
return uint64(libc.Int64FromUint32(0xffffffff) | libc.Int64FromInt32(0x7fffffff)<= int32(3) {
v1 = (n + uint64(8)) << (int32(x) - int32(3))
} else {
v1 = (n + uint64(8)) >> (int32(3) - int32(x))
}
return v1
}
// C documentation
//
// /*
// ** Add a new name/number pair to a VList. This might require that the
// ** VList object be reallocated, so return the new VList. If an OOM
// ** error occurs, the original VList returned and the
// ** db->mallocFailed flag is set.
// **
// ** A VList is really just an array of integers. To destroy a VList,
// ** simply pass it to sqlite3DbFree().
// **
// ** The first integer is the number of integers allocated for the whole
// ** VList. The second integer is the number of integers actually used.
// ** Each name/number pair is encoded by subsequent groups of 3 or more
// ** integers.
// **
// ** Each name/number pair starts with two integers which are the numeric
// ** value for the pair and the size of the name/number pair, respectively.
// ** The text name overlays one or more following integers. The text name
// ** is always zero-terminated.
// **
// ** Conceptually:
// **
// ** struct VList {
// ** int nAlloc; // Number of allocated slots
// ** int nUsed; // Number of used slots
// ** struct VListEntry {
// ** int iValue; // Value for this entry
// ** int nSlot; // Slots used by this entry
// ** // ... variable name goes here
// ** } a[0];
// ** }
// **
// ** During code generation, pointers to the variable names within the
// ** VList are taken. When that happens, nAlloc is set to zero as an
// ** indication that the VList may never again be enlarged, since the
// ** accompanying realloc() would invalidate the pointers.
// */
func _sqlite3VListAdd(tls *libc.TLS, db uintptr, pIn uintptr, zName uintptr, nName int32, iVal int32) (r uintptr) {
var i, nInt int32
var nAlloc Tsqlite3_int64
var pOut, z uintptr
var v1 int64
_, _, _, _, _, _ = i, nAlloc, nInt, pOut, z, v1 /* Index in pIn[] where zName is stored */
nInt = nName/int32(4) + int32(3)
/* Verify ok to add new elements */
if pIn == uintptr(0) || *(*TVList)(unsafe.Pointer(pIn + 1*4))+nInt > *(*TVList)(unsafe.Pointer(pIn)) {
if pIn != 0 {
v1 = int64(2) * int64(*(*TVList)(unsafe.Pointer(pIn)))
} else {
v1 = int64(10)
}
/* Enlarge the allocation */
nAlloc = v1 + int64(nInt)
pOut = _sqlite3DbRealloc(tls, db, pIn, uint64(nAlloc)*uint64(4))
if pOut == uintptr(0) {
return pIn
}
if pIn == uintptr(0) {
*(*TVList)(unsafe.Pointer(pOut + 1*4)) = int32(2)
}
pIn = pOut
*(*TVList)(unsafe.Pointer(pIn)) = int32(nAlloc)
}
i = *(*TVList)(unsafe.Pointer(pIn + 1*4))
*(*TVList)(unsafe.Pointer(pIn + uintptr(i)*4)) = iVal
*(*TVList)(unsafe.Pointer(pIn + uintptr(i+int32(1))*4)) = nInt
z = pIn + uintptr(i+int32(2))*4
*(*TVList)(unsafe.Pointer(pIn + 1*4)) = i + nInt
libc.Xmemcpy(tls, z, zName, uint64(nName))
*(*int8)(unsafe.Pointer(z + uintptr(nName))) = 0
return pIn
}
// C documentation
//
// /*
// ** Return a pointer to the name of a variable in the given VList that
// ** has the value iVal. Or return a NULL if there is no such variable in
// ** the list
// */
func _sqlite3VListNumToName(tls *libc.TLS, pIn uintptr, iVal int32) (r uintptr) {
var i, mx int32
_, _ = i, mx
if pIn == uintptr(0) {
return uintptr(0)
}
mx = *(*TVList)(unsafe.Pointer(pIn + 1*4))
i = int32(2)
for cond := true; cond; cond = i < mx {
if *(*TVList)(unsafe.Pointer(pIn + uintptr(i)*4)) == iVal {
return pIn + uintptr(i+int32(2))*4
}
i += *(*TVList)(unsafe.Pointer(pIn + uintptr(i+int32(1))*4))
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Return the number of the variable named zName, if it is in VList.
// ** or return 0 if there is no such variable.
// */
func _sqlite3VListNameToNum(tls *libc.TLS, pIn uintptr, zName uintptr, nName int32) (r int32) {
var i, mx int32
var z uintptr
_, _, _ = i, mx, z
if pIn == uintptr(0) {
return 0
}
mx = *(*TVList)(unsafe.Pointer(pIn + 1*4))
i = int32(2)
for cond := true; cond; cond = i < mx {
z = pIn + uintptr(i+int32(2))*4
if libc.Xstrncmp(tls, z, zName, uint64(nName)) == 0 && int32(*(*int8)(unsafe.Pointer(z + uintptr(nName)))) == 0 {
return *(*TVList)(unsafe.Pointer(pIn + uintptr(i)*4))
}
i += *(*TVList)(unsafe.Pointer(pIn + uintptr(i+int32(1))*4))
}
return 0
}
/*
** High-resolution hardware timer used for debugging and testing only.
*/
/************** End of util.c ************************************************/
/************** Begin file hash.c ********************************************/
/*
** 2001 September 22
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This is the implementation of generic hash-tables
** used in SQLite.
*/
/* #include "sqliteInt.h" */
/* #include */
// C documentation
//
// /* Turn bulk memory into a hash table object by initializing the
// ** fields of the Hash structure.
// **
// ** "pNew" is a pointer to the hash table that is to be initialized.
// */
func _sqlite3HashInit(tls *libc.TLS, pNew uintptr) {
(*THash)(unsafe.Pointer(pNew)).Ffirst = uintptr(0)
(*THash)(unsafe.Pointer(pNew)).Fcount = uint32(0)
(*THash)(unsafe.Pointer(pNew)).Fhtsize = uint32(0)
(*THash)(unsafe.Pointer(pNew)).Fht = uintptr(0)
}
// C documentation
//
// /* Remove all entries from a hash table. Reclaim all memory.
// ** Call this routine to delete a hash table or to reset a hash table
// ** to the empty state.
// */
func _sqlite3HashClear(tls *libc.TLS, pH uintptr) {
var elem, next_elem uintptr
_, _ = elem, next_elem /* For looping over all elements of the table */
elem = (*THash)(unsafe.Pointer(pH)).Ffirst
(*THash)(unsafe.Pointer(pH)).Ffirst = uintptr(0)
Xsqlite3_free(tls, (*THash)(unsafe.Pointer(pH)).Fht)
(*THash)(unsafe.Pointer(pH)).Fht = uintptr(0)
(*THash)(unsafe.Pointer(pH)).Fhtsize = uint32(0)
for elem != 0 {
next_elem = (*THashElem)(unsafe.Pointer(elem)).Fnext
Xsqlite3_free(tls, elem)
elem = next_elem
}
(*THash)(unsafe.Pointer(pH)).Fcount = uint32(0)
}
// C documentation
//
// /*
// ** The hashing function.
// */
func _strHash(tls *libc.TLS, z uintptr) (r uint32) {
var c, v1 uint8
var h uint32
var v2 uintptr
_, _, _, _ = c, h, v1, v2
h = uint32(0)
for {
v2 = z
z++
v1 = uint8(*(*int8)(unsafe.Pointer(v2)))
c = v1
if !(int32(v1) != 0) {
break
} /*OPTIMIZATION-IF-TRUE*/
/* Knuth multiplicative hashing. (Sorting & Searching, p. 510).
** 0x9e3779b1 is 2654435761 which is the closest prime number to
** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. */
h += uint32(_sqlite3UpperToLower[c])
h *= uint32(0x9e3779b1)
}
return h
}
// C documentation
//
// /* Link pNew element into the hash table pH. If pEntry!=0 then also
// ** insert pNew into the pEntry hash bucket.
// */
func _insertElement(tls *libc.TLS, pH uintptr, pEntry uintptr, pNew uintptr) {
var pHead, v1 uintptr
_, _ = pHead, v1 /* First element already in pEntry */
if pEntry != 0 {
if (*T_ht)(unsafe.Pointer(pEntry)).Fcount != 0 {
v1 = (*T_ht)(unsafe.Pointer(pEntry)).Fchain
} else {
v1 = uintptr(0)
}
pHead = v1
(*T_ht)(unsafe.Pointer(pEntry)).Fcount++
(*T_ht)(unsafe.Pointer(pEntry)).Fchain = pNew
} else {
pHead = uintptr(0)
}
if pHead != 0 {
(*THashElem)(unsafe.Pointer(pNew)).Fnext = pHead
(*THashElem)(unsafe.Pointer(pNew)).Fprev = (*THashElem)(unsafe.Pointer(pHead)).Fprev
if (*THashElem)(unsafe.Pointer(pHead)).Fprev != 0 {
(*THashElem)(unsafe.Pointer((*THashElem)(unsafe.Pointer(pHead)).Fprev)).Fnext = pNew
} else {
(*THash)(unsafe.Pointer(pH)).Ffirst = pNew
}
(*THashElem)(unsafe.Pointer(pHead)).Fprev = pNew
} else {
(*THashElem)(unsafe.Pointer(pNew)).Fnext = (*THash)(unsafe.Pointer(pH)).Ffirst
if (*THash)(unsafe.Pointer(pH)).Ffirst != 0 {
(*THashElem)(unsafe.Pointer((*THash)(unsafe.Pointer(pH)).Ffirst)).Fprev = pNew
}
(*THashElem)(unsafe.Pointer(pNew)).Fprev = uintptr(0)
(*THash)(unsafe.Pointer(pH)).Ffirst = pNew
}
}
// C documentation
//
// /* Resize the hash table so that it contains "new_size" buckets.
// **
// ** The hash table might fail to resize if sqlite3_malloc() fails or
// ** if the new size is the same as the prior size.
// ** Return TRUE if the resize occurs and false if not.
// */
func _rehash(tls *libc.TLS, pH uintptr, new_size uint32) (r int32) {
var elem, new_ht, next_elem uintptr
var h, v1 uint32
_, _, _, _, _ = elem, h, new_ht, next_elem, v1 /* For looping over existing elements */
if uint64(new_size)*uint64(16) > uint64(SQLITE_MALLOC_SOFT_LIMIT) {
new_size = uint32(libc.Uint64FromInt32(SQLITE_MALLOC_SOFT_LIMIT) / libc.Uint64FromInt64(16))
}
if new_size == (*THash)(unsafe.Pointer(pH)).Fhtsize {
return 0
}
/* The inability to allocates space for a larger hash table is
** a performance hit but it is not a fatal error. So mark the
** allocation as a benign. Use sqlite3Malloc()/memset(0) instead of
** sqlite3MallocZero() to make the allocation, as sqlite3MallocZero()
** only zeroes the requested number of bytes whereas this module will
** use the actual amount of space allocated for the hash table (which
** may be larger than the requested amount).
*/
_sqlite3BeginBenignMalloc(tls)
new_ht = _sqlite3Malloc(tls, uint64(new_size)*uint64(16))
_sqlite3EndBenignMalloc(tls)
if new_ht == uintptr(0) {
return 0
}
Xsqlite3_free(tls, (*THash)(unsafe.Pointer(pH)).Fht)
(*THash)(unsafe.Pointer(pH)).Fht = new_ht
v1 = uint32(uint64(_sqlite3MallocSize(tls, new_ht)) / libc.Uint64FromInt64(16))
new_size = v1
(*THash)(unsafe.Pointer(pH)).Fhtsize = v1
libc.Xmemset(tls, new_ht, 0, uint64(new_size)*uint64(16))
elem = (*THash)(unsafe.Pointer(pH)).Ffirst
(*THash)(unsafe.Pointer(pH)).Ffirst = libc.UintptrFromInt32(0)
for {
if !(elem != 0) {
break
}
h = _strHash(tls, (*THashElem)(unsafe.Pointer(elem)).FpKey) % new_size
next_elem = (*THashElem)(unsafe.Pointer(elem)).Fnext
_insertElement(tls, pH, new_ht+uintptr(h)*16, elem)
goto _2
_2:
;
elem = next_elem
}
return int32(1)
}
// C documentation
//
// /* This function (for internal use only) locates an element in an
// ** hash table that matches the given key. If no element is found,
// ** a pointer to a static null element with HashElem.data==0 is returned.
// ** If pH is not NULL, then the hash for this key is written to *pH.
// */
func _findElementWithHash(tls *libc.TLS, pH uintptr, pKey uintptr, pHash uintptr) (r uintptr) {
var count, h uint32
var elem, pEntry uintptr
_, _, _, _ = count, elem, h, pEntry /* The computed hash */
if (*THash)(unsafe.Pointer(pH)).Fht != 0 {
h = _strHash(tls, pKey) % (*THash)(unsafe.Pointer(pH)).Fhtsize
pEntry = (*THash)(unsafe.Pointer(pH)).Fht + uintptr(h)*16
elem = (*T_ht)(unsafe.Pointer(pEntry)).Fchain
count = (*T_ht)(unsafe.Pointer(pEntry)).Fcount
} else {
h = uint32(0)
elem = (*THash)(unsafe.Pointer(pH)).Ffirst
count = (*THash)(unsafe.Pointer(pH)).Fcount
}
if pHash != 0 {
*(*uint32)(unsafe.Pointer(pHash)) = h
}
for count != 0 {
if _sqlite3StrICmp(tls, (*THashElem)(unsafe.Pointer(elem)).FpKey, pKey) == 0 {
return elem
}
elem = (*THashElem)(unsafe.Pointer(elem)).Fnext
count--
}
return uintptr(unsafe.Pointer(&_nullElement))
}
var _nullElement = THashElem{}
// C documentation
//
// /* Remove a single entry from the hash table given a pointer to that
// ** element and a hash on the element's key.
// */
func _removeElementGivenHash(tls *libc.TLS, pH uintptr, elem uintptr, h uint32) {
var pEntry uintptr
_ = pEntry
if (*THashElem)(unsafe.Pointer(elem)).Fprev != 0 {
(*THashElem)(unsafe.Pointer((*THashElem)(unsafe.Pointer(elem)).Fprev)).Fnext = (*THashElem)(unsafe.Pointer(elem)).Fnext
} else {
(*THash)(unsafe.Pointer(pH)).Ffirst = (*THashElem)(unsafe.Pointer(elem)).Fnext
}
if (*THashElem)(unsafe.Pointer(elem)).Fnext != 0 {
(*THashElem)(unsafe.Pointer((*THashElem)(unsafe.Pointer(elem)).Fnext)).Fprev = (*THashElem)(unsafe.Pointer(elem)).Fprev
}
if (*THash)(unsafe.Pointer(pH)).Fht != 0 {
pEntry = (*THash)(unsafe.Pointer(pH)).Fht + uintptr(h)*16
if (*T_ht)(unsafe.Pointer(pEntry)).Fchain == elem {
(*T_ht)(unsafe.Pointer(pEntry)).Fchain = (*THashElem)(unsafe.Pointer(elem)).Fnext
}
(*T_ht)(unsafe.Pointer(pEntry)).Fcount--
}
Xsqlite3_free(tls, elem)
(*THash)(unsafe.Pointer(pH)).Fcount--
if (*THash)(unsafe.Pointer(pH)).Fcount == uint32(0) {
_sqlite3HashClear(tls, pH)
}
}
// C documentation
//
// /* Attempt to locate an element of the hash table pH with a key
// ** that matches pKey. Return the data for this element if it is
// ** found, or NULL if there is no match.
// */
func _sqlite3HashFind(tls *libc.TLS, pH uintptr, pKey uintptr) (r uintptr) {
return (*THashElem)(unsafe.Pointer(_findElementWithHash(tls, pH, pKey, uintptr(0)))).Fdata
}
// C documentation
//
// /* Insert an element into the hash table pH. The key is pKey
// ** and the data is "data".
// **
// ** If no element exists with a matching key, then a new
// ** element is created and NULL is returned.
// **
// ** If another element already exists with the same key, then the
// ** new data replaces the old data and the old data is returned.
// ** The key is not copied in this instance. If a malloc fails, then
// ** the new data is returned and the hash table is unchanged.
// **
// ** If the "data" parameter to this function is NULL, then the
// ** element corresponding to "key" is removed from the hash table.
// */
func _sqlite3HashInsert(tls *libc.TLS, pH uintptr, pKey uintptr, data uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var elem, new_elem, old_data, v1 uintptr
var _ /* h at bp+0 */ uint32
_, _, _, _ = elem, new_elem, old_data, v1 /* New element added to the pH */
elem = _findElementWithHash(tls, pH, pKey, bp)
if (*THashElem)(unsafe.Pointer(elem)).Fdata != 0 {
old_data = (*THashElem)(unsafe.Pointer(elem)).Fdata
if data == uintptr(0) {
_removeElementGivenHash(tls, pH, elem, *(*uint32)(unsafe.Pointer(bp)))
} else {
(*THashElem)(unsafe.Pointer(elem)).Fdata = data
(*THashElem)(unsafe.Pointer(elem)).FpKey = pKey
}
return old_data
}
if data == uintptr(0) {
return uintptr(0)
}
new_elem = _sqlite3Malloc(tls, uint64(32))
if new_elem == uintptr(0) {
return data
}
(*THashElem)(unsafe.Pointer(new_elem)).FpKey = pKey
(*THashElem)(unsafe.Pointer(new_elem)).Fdata = data
(*THash)(unsafe.Pointer(pH)).Fcount++
if (*THash)(unsafe.Pointer(pH)).Fcount >= uint32(10) && (*THash)(unsafe.Pointer(pH)).Fcount > uint32(2)*(*THash)(unsafe.Pointer(pH)).Fhtsize {
if _rehash(tls, pH, (*THash)(unsafe.Pointer(pH)).Fcount*uint32(2)) != 0 {
*(*uint32)(unsafe.Pointer(bp)) = _strHash(tls, pKey) % (*THash)(unsafe.Pointer(pH)).Fhtsize
}
}
if (*THash)(unsafe.Pointer(pH)).Fht != 0 {
v1 = (*THash)(unsafe.Pointer(pH)).Fht + uintptr(*(*uint32)(unsafe.Pointer(bp)))*16
} else {
v1 = uintptr(0)
}
_insertElement(tls, pH, v1, new_elem)
return uintptr(0)
}
// C documentation
//
// /************** End of hash.c ************************************************/
// /************** Begin file opcodes.c *****************************************/
// /* Automatically generated. Do not edit */
// /* See the tool/mkopcodec.tcl script for details. */
func _sqlite3OpcodeName(tls *libc.TLS, i int32) (r uintptr) {
return _azName[i]
}
var _azName = [190]uintptr{
0: __ccgo_ts + 1788,
1: __ccgo_ts + 1798,
2: __ccgo_ts + 1809,
3: __ccgo_ts + 1821,
4: __ccgo_ts + 1832,
5: __ccgo_ts + 1844,
6: __ccgo_ts + 1851,
7: __ccgo_ts + 1859,
8: __ccgo_ts + 1867,
9: __ccgo_ts + 1872,
10: __ccgo_ts + 1877,
11: __ccgo_ts + 1883,
12: __ccgo_ts + 1897,
13: __ccgo_ts + 1903,
14: __ccgo_ts + 1913,
15: __ccgo_ts + 1918,
16: __ccgo_ts + 1923,
17: __ccgo_ts + 1926,
18: __ccgo_ts + 1932,
19: __ccgo_ts + 1939,
20: __ccgo_ts + 1943,
21: __ccgo_ts + 1953,
22: __ccgo_ts + 1960,
23: __ccgo_ts + 1967,
24: __ccgo_ts + 1974,
25: __ccgo_ts + 1981,
26: __ccgo_ts + 1991,
27: __ccgo_ts + 2000,
28: __ccgo_ts + 2011,
29: __ccgo_ts + 2020,
30: __ccgo_ts + 2026,
31: __ccgo_ts + 2036,
32: __ccgo_ts + 2046,
33: __ccgo_ts + 2051,
34: __ccgo_ts + 2061,
35: __ccgo_ts + 2072,
36: __ccgo_ts + 2077,
37: __ccgo_ts + 2084,
38: __ccgo_ts + 2095,
39: __ccgo_ts + 2100,
40: __ccgo_ts + 2105,
41: __ccgo_ts + 2111,
42: __ccgo_ts + 2117,
43: __ccgo_ts + 2123,
44: __ccgo_ts + 2126,
45: __ccgo_ts + 2130,
46: __ccgo_ts + 2136,
47: __ccgo_ts + 2147,
48: __ccgo_ts + 2158,
49: __ccgo_ts + 2166,
50: __ccgo_ts + 2175,
51: __ccgo_ts + 2182,
52: __ccgo_ts + 2190,
53: __ccgo_ts + 2193,
54: __ccgo_ts + 2196,
55: __ccgo_ts + 2199,
56: __ccgo_ts + 2202,
57: __ccgo_ts + 2205,
58: __ccgo_ts + 2208,
59: __ccgo_ts + 2215,
60: __ccgo_ts + 2221,
61: __ccgo_ts + 2231,
62: __ccgo_ts + 2244,
63: __ccgo_ts + 2255,
64: __ccgo_ts + 2261,
65: __ccgo_ts + 2268,
66: __ccgo_ts + 2277,
67: __ccgo_ts + 2286,
68: __ccgo_ts + 2293,
69: __ccgo_ts + 2306,
70: __ccgo_ts + 2317,
71: __ccgo_ts + 2322,
72: __ccgo_ts + 2330,
73: __ccgo_ts + 2336,
74: __ccgo_ts + 2343,
75: __ccgo_ts + 2355,
76: __ccgo_ts + 2360,
77: __ccgo_ts + 2369,
78: __ccgo_ts + 2374,
79: __ccgo_ts + 2383,
80: __ccgo_ts + 2388,
81: __ccgo_ts + 2393,
82: __ccgo_ts + 2399,
83: __ccgo_ts + 2407,
84: __ccgo_ts + 2415,
85: __ccgo_ts + 2425,
86: __ccgo_ts + 2433,
87: __ccgo_ts + 2440,
88: __ccgo_ts + 2453,
89: __ccgo_ts + 2458,
90: __ccgo_ts + 2470,
91: __ccgo_ts + 2478,
92: __ccgo_ts + 2485,
93: __ccgo_ts + 2496,
94: __ccgo_ts + 2503,
95: __ccgo_ts + 2510,
96: __ccgo_ts + 2520,
97: __ccgo_ts + 2529,
98: __ccgo_ts + 2540,
99: __ccgo_ts + 2546,
100: __ccgo_ts + 2557,
101: __ccgo_ts + 2567,
102: __ccgo_ts + 2577,
103: __ccgo_ts + 2584,
104: __ccgo_ts + 2590,
105: __ccgo_ts + 2600,
106: __ccgo_ts + 2611,
107: __ccgo_ts + 2615,
108: __ccgo_ts + 2624,
109: __ccgo_ts + 2633,
110: __ccgo_ts + 2640,
111: __ccgo_ts + 2650,
112: __ccgo_ts + 2657,
113: __ccgo_ts + 2666,
114: __ccgo_ts + 2676,
115: __ccgo_ts + 2683,
116: __ccgo_ts + 2691,
117: __ccgo_ts + 2705,
118: __ccgo_ts + 2713,
119: __ccgo_ts + 2727,
120: __ccgo_ts + 2738,
121: __ccgo_ts + 2751,
122: __ccgo_ts + 2762,
123: __ccgo_ts + 2768,
124: __ccgo_ts + 2780,
125: __ccgo_ts + 2789,
126: __ccgo_ts + 2797,
127: __ccgo_ts + 2806,
128: __ccgo_ts + 2815,
129: __ccgo_ts + 2822,
130: __ccgo_ts + 2830,
131: __ccgo_ts + 2837,
132: __ccgo_ts + 2848,
133: __ccgo_ts + 2862,
134: __ccgo_ts + 2873,
135: __ccgo_ts + 2881,
136: __ccgo_ts + 2887,
137: __ccgo_ts + 2895,
138: __ccgo_ts + 2903,
139: __ccgo_ts + 2913,
140: __ccgo_ts + 2926,
141: __ccgo_ts + 2936,
142: __ccgo_ts + 2949,
143: __ccgo_ts + 2958,
144: __ccgo_ts + 2969,
145: __ccgo_ts + 2977,
146: __ccgo_ts + 2983,
147: __ccgo_ts + 2995,
148: __ccgo_ts + 3007,
149: __ccgo_ts + 3015,
150: __ccgo_ts + 3027,
151: __ccgo_ts + 3040,
152: __ccgo_ts + 3050,
153: __ccgo_ts + 3060,
154: __ccgo_ts + 3065,
155: __ccgo_ts + 3077,
156: __ccgo_ts + 3089,
157: __ccgo_ts + 3099,
158: __ccgo_ts + 3105,
159: __ccgo_ts + 3115,
160: __ccgo_ts + 3122,
161: __ccgo_ts + 3134,
162: __ccgo_ts + 3145,
163: __ccgo_ts + 3153,
164: __ccgo_ts + 3162,
165: __ccgo_ts + 3171,
166: __ccgo_ts + 3180,
167: __ccgo_ts + 3187,
168: __ccgo_ts + 3198,
169: __ccgo_ts + 3211,
170: __ccgo_ts + 3221,
171: __ccgo_ts + 3228,
172: __ccgo_ts + 3236,
173: __ccgo_ts + 3245,
174: __ccgo_ts + 3251,
175: __ccgo_ts + 3258,
176: __ccgo_ts + 3266,
177: __ccgo_ts + 3274,
178: __ccgo_ts + 3282,
179: __ccgo_ts + 3292,
180: __ccgo_ts + 3301,
181: __ccgo_ts + 3312,
182: __ccgo_ts + 3323,
183: __ccgo_ts + 3334,
184: __ccgo_ts + 3344,
185: __ccgo_ts + 3350,
186: __ccgo_ts + 3361,
187: __ccgo_ts + 3372,
188: __ccgo_ts + 3377,
189: __ccgo_ts + 3385,
}
type Tpthread_once = struct {
Fstate int32
Fmutex Tpthread_mutex_t
}
type pthread_once = Tpthread_once
type Tpthread_t = uintptr
type pthread_t = Tpthread_t
type Tpthread_attr_t = uintptr
type pthread_attr_t = Tpthread_attr_t
type Tpthread_mutex_t = uintptr
type pthread_mutex_t = Tpthread_mutex_t
type Tpthread_mutexattr_t = uintptr
type pthread_mutexattr_t = Tpthread_mutexattr_t
type Tpthread_cond_t = uintptr
type pthread_cond_t = Tpthread_cond_t
type Tpthread_condattr_t = uintptr
type pthread_condattr_t = Tpthread_condattr_t
type Tpthread_key_t = int32
type pthread_key_t = Tpthread_key_t
type Tpthread_once_t = struct {
Fstate int32
Fmutex Tpthread_mutex_t
}
type pthread_once_t = Tpthread_once_t
type Tpthread_rwlock_t = uintptr
type pthread_rwlock_t = Tpthread_rwlock_t
type Tpthread_rwlockattr_t = uintptr
type pthread_rwlockattr_t = Tpthread_rwlockattr_t
type Tpthread_barrier_t = uintptr
type pthread_barrier_t = Tpthread_barrier_t
type Tpthread_barrierattr_t = uintptr
type pthread_barrierattr_t = Tpthread_barrierattr_t
type Tpthread_spinlock_t = uintptr
type pthread_spinlock_t = Tpthread_spinlock_t
type Tpthread_addr_t = uintptr
type pthread_addr_t = Tpthread_addr_t
type Tpthread_startroutine_t = uintptr
type pthread_startroutine_t = Tpthread_startroutine_t
type Tu_char = uint8
type u_char = Tu_char
type Tu_short = uint16
type u_short = Tu_short
type Tu_int = uint32
type u_int = Tu_int
type Tu_long = uint64
type u_long = Tu_long
type Tushort = uint16
type ushort = Tushort
type Tuint = uint32
type uint = Tuint
type Tint8_t = int8
type int8_t = Tint8_t
type Tint16_t = int16
type int16_t = Tint16_t
type Tint32_t = int32
type int32_t = Tint32_t
type Tint64_t = int64
type int64_t = Tint64_t
type Tuint8_t = uint8
type uint8_t = Tuint8_t
type Tuint16_t = uint16
type uint16_t = Tuint16_t
type Tuint32_t = uint32
type uint32_t = Tuint32_t
type Tuint64_t = uint64
type uint64_t = Tuint64_t
type Tintptr_t = int64
type intptr_t = Tintptr_t
type Tuintptr_t = uint64
type uintptr_t = Tuintptr_t
type Tintmax_t = int64
type intmax_t = Tintmax_t
type Tuintmax_t = uint64
type uintmax_t = Tuintmax_t
type Tu_int8_t = uint8
type u_int8_t = Tu_int8_t
type Tu_int16_t = uint16
type u_int16_t = Tu_int16_t
type Tu_int32_t = uint32
type u_int32_t = Tu_int32_t
type Tu_int64_t = uint64
type u_int64_t = Tu_int64_t
type Tu_quad_t = uint64
type u_quad_t = Tu_quad_t
type Tquad_t = int64
type quad_t = Tquad_t
type Tqaddr_t = uintptr
type qaddr_t = Tqaddr_t
type Tcaddr_t = uintptr
type caddr_t = Tcaddr_t
type Tc_caddr_t = uintptr
type c_caddr_t = Tc_caddr_t
type Tblksize_t = int32
type blksize_t = Tblksize_t
type Tcpuwhich_t = int32
type cpuwhich_t = Tcpuwhich_t
type Tcpulevel_t = int32
type cpulevel_t = Tcpulevel_t
type Tcpusetid_t = int32
type cpusetid_t = Tcpusetid_t
type Tblkcnt_t = int64
type blkcnt_t = Tblkcnt_t
type Tcritical_t = int64
type critical_t = Tcritical_t
type Tdaddr_t = int64
type daddr_t = Tdaddr_t
type Tdev_t = uint64
type dev_t = Tdev_t
type Tfflags_t = uint32
type fflags_t = Tfflags_t
type Tfixpt_t = uint32
type fixpt_t = Tfixpt_t
type Tfsblkcnt_t = uint64
type fsblkcnt_t = Tfsblkcnt_t
type Tfsfilcnt_t = uint64
type fsfilcnt_t = Tfsfilcnt_t
type Tgid_t = uint32
type gid_t = Tgid_t
type Tin_addr_t = uint32
type in_addr_t = Tin_addr_t
type Tin_port_t = uint16
type in_port_t = Tin_port_t
type Tid_t = int64
type id_t = Tid_t
type Tino_t = uint64
type ino_t = Tino_t
type Tkey_t = int64
type key_t = Tkey_t
type Tlwpid_t = int32
type lwpid_t = Tlwpid_t
type Taccmode_t = int32
type accmode_t = Taccmode_t
type Tnlink_t = uint64
type nlink_t = Tnlink_t
type Tregister_t = int64
type register_t = Tregister_t
type Trlim_t = int64
type rlim_t = Trlim_t
type Tsbintime_t = int64
type sbintime_t = Tsbintime_t
type Tsegsz_t = int64
type segsz_t = Tsegsz_t
type Tsuseconds_t = int64
type suseconds_t = Tsuseconds_t
type Tmqd_t = uintptr
type mqd_t = Tmqd_t
type Tu_register_t = uint64
type u_register_t = Tu_register_t
type Tuid_t = uint32
type uid_t = Tuid_t
type Tuseconds_t = uint32
type useconds_t = Tuseconds_t
type Tcap_ioctl_t = uint64
type cap_ioctl_t = Tcap_ioctl_t
type Tkpaddr_t = uint64
type kpaddr_t = Tkpaddr_t
type Tkvaddr_t = uint64
type kvaddr_t = Tkvaddr_t
type Tksize_t = uint64
type ksize_t = Tksize_t
type Tkssize_t = int64
type kssize_t = Tkssize_t
type Tvm_offset_t = uint64
type vm_offset_t = Tvm_offset_t
type Tvm_ooffset_t = uint64
type vm_ooffset_t = Tvm_ooffset_t
type Tvm_paddr_t = uint64
type vm_paddr_t = Tvm_paddr_t
type Tvm_pindex_t = uint64
type vm_pindex_t = Tvm_pindex_t
type Tvm_size_t = uint64
type vm_size_t = Tvm_size_t
type Trman_res_t = uint64
type rman_res_t = Trman_res_t
type Tsyscallarg_t = int64
type syscallarg_t = Tsyscallarg_t
type t__sigset_t = struct {
F__bits [4]t__uint32_t
}
type t__sigset = t__sigset_t
type Ttimeval = struct {
Ftv_sec Ttime_t
Ftv_usec Tsuseconds_t
}
type timeval = Ttimeval
type t__fd_mask = uint64
type Tfd_mask = uint64
type fd_mask = Tfd_mask
type Tsigset_t = struct {
F__bits [4]t__uint32_t
}
type sigset_t = Tsigset_t
type Tfd_set = struct {
F__fds_bits [16]t__fd_mask
}
type fd_set = Tfd_set
type Ttimezone = struct {
Ftz_minuteswest int32
Ftz_dsttime int32
}
type timezone = Ttimezone
type Tbintime = struct {
Fsec Ttime_t
Ffrac Tuint64_t
}
type bintime = Tbintime
type Titimerval = struct {
Fit_interval Ttimeval
Fit_value Ttimeval
}
type itimerval = Titimerval
type Tclockinfo = struct {
Fhz int32
Ftick int32
Fspare int32
Fstathz int32
Fprofhz int32
}
type clockinfo = Tclockinfo
type Tstat = struct {
Fst_dev Tdev_t
Fst_ino Tino_t
Fst_nlink Tnlink_t
Fst_mode Tmode_t
Fst_padding0 t__int16_t
Fst_uid Tuid_t
Fst_gid Tgid_t
Fst_padding1 t__int32_t
Fst_rdev Tdev_t
Fst_atim Ttimespec
Fst_mtim Ttimespec
Fst_ctim Ttimespec
Fst_birthtim Ttimespec
Fst_size Toff_t
Fst_blocks Tblkcnt_t
Fst_blksize Tblksize_t
Fst_flags Tfflags_t
Fst_gen t__uint64_t
Fst_spare [10]t__uint64_t
}
type stat = Tstat
type Tflock = struct {
Fl_start Toff_t
Fl_len Toff_t
Fl_pid Tpid_t
Fl_type int16
Fl_whence int16
Fl_sysid int32
}
type flock = Tflock
type t__oflock = struct {
Fl_start Toff_t
Fl_len Toff_t
Fl_pid Tpid_t
Fl_type int16
Fl_whence int16
}
type Tspacectl_range = struct {
Fr_offset Toff_t
Fr_len Toff_t
}
type spacectl_range = Tspacectl_range
type Tfiodgname_arg = struct {
Flen1 int32
Fbuf uintptr
}
type fiodgname_arg = Tfiodgname_arg
type Tfiobmap2_arg = struct {
Fbn t__daddr_t
Frunp int32
Frunb int32
}
type fiobmap2_arg = Tfiobmap2_arg
type Twinsize = struct {
Fws_row uint16
Fws_col uint16
Fws_xpixel uint16
Fws_ypixel uint16
}
type winsize = Twinsize
type Tcrypt_data = struct {
Finitialized int32
F__buf [256]int8
}
type crypt_data = Tcrypt_data
type Tshm_largepage_conf = struct {
Fpsind int32
Falloc_policy int32
Fpad [10]int32
}
type shm_largepage_conf = Tshm_largepage_conf
/*
** Try to determine if gethostuuid() is available based on standard
** macros. This might sometimes compute the wrong value for some
** obscure platforms. For those cases, simply compile with one of
** the following:
**
** -DHAVE_GETHOSTUUID=0
** -DHAVE_GETHOSTUUID=1
**
** None if this matters except when building on Apple products with
** -DSQLITE_ENABLE_LOCKING_STYLE.
*/
/*
** Allowed values of unixFile.fsFlags
*/
/*
** If we are to be thread-safe, include the pthreads header.
*/
/* # include */
/*
** Default permissions when creating a new file
*/
/*
** Default permissions when creating auto proxy dir
*/
/*
** Maximum supported path-length.
*/
/*
** Maximum supported symbolic links
*/
/*
** Remove and stub certain info for WASI (WebAssembly System
** Interface) builds.
*/
/* Always cast the getpid() return type for compatibility with
** kernel modules in VxWorks. */
/*
** Only set the lastErrno if the error code is a real error and not
** a normal expected return code of SQLITE_BUSY or SQLITE_OK
*/
// C documentation
//
// /* Forward references */
type TunixShm = struct {
FpShmNode uintptr
FpNext uintptr
FhasMutex Tu8
Fid Tu8
FsharedMask Tu16
FexclMask Tu16
}
type unixShm = TunixShm
/* Connection shared memory */
type TunixShmNode = struct {
FpInode uintptr
FpShmMutex uintptr
FzFilename uintptr
FhShm int32
FszRegion int32
FnRegion Tu16
FisReadonly Tu8
FisUnlocked Tu8
FapRegion uintptr
FnRef int32
FpFirst uintptr
FaLock [8]int32
}
type unixShmNode = TunixShmNode
/* Shared memory instance */
type TunixInodeInfo = struct {
FfileId TunixFileId
FpLockMutex uintptr
FnShared int32
FnLock int32
FeFileLock uint8
FbProcessLock uint8
FpUnused uintptr
FnRef int32
FpShmNode uintptr
FpNext uintptr
FpPrev uintptr
}
type unixInodeInfo = TunixInodeInfo
/* An i-node */
type TUnixUnusedFd = struct {
Ffd int32
Fflags int32
FpNext uintptr
}
type UnixUnusedFd = TUnixUnusedFd
/* An unused file descriptor */
/*
** Sometimes, after a file handle is closed by SQLite, the file descriptor
** cannot be closed immediately. In these cases, instances of the following
** structure are used to store the file descriptor while waiting for an
** opportunity to either close or reuse it.
*/
type TUnixUnusedFd1 = struct {
Ffd int32
Fflags int32
FpNext uintptr
}
type UnixUnusedFd1 = TUnixUnusedFd1
// C documentation
//
// /*
// ** The unixFile structure is subclass of sqlite3_file specific to the unix
// ** VFS implementations.
// */
type TunixFile = struct {
FpMethod uintptr
FpVfs uintptr
FpInode uintptr
Fh int32
FeFileLock uint8
FctrlFlags uint16
FlastErrno int32
FlockingContext uintptr
FpPreallocatedUnused uintptr
FzPath uintptr
FpShm uintptr
FszChunk int32
FnFetchOut int32
FmmapSize Tsqlite3_int64
FmmapSizeActual Tsqlite3_int64
FmmapSizeMax Tsqlite3_int64
FpMapRegion uintptr
FsectorSize int32
FdeviceCharacteristics int32
}
type unixFile = TunixFile
type TunixFile1 = struct {
FpMethod uintptr
FpVfs uintptr
FpInode uintptr
Fh int32
FeFileLock uint8
FctrlFlags uint16
FlastErrno int32
FlockingContext uintptr
FpPreallocatedUnused uintptr
FzPath uintptr
FpShm uintptr
FszChunk int32
FnFetchOut int32
FmmapSize Tsqlite3_int64
FmmapSizeActual Tsqlite3_int64
FmmapSizeMax Tsqlite3_int64
FpMapRegion uintptr
FsectorSize int32
FdeviceCharacteristics int32
}
type unixFile1 = TunixFile1
// C documentation
//
// /* This variable holds the process id (pid) from when the xRandomness()
// ** method was called. If xOpen() is called from a different process id,
// ** indicating that a fork() has occurred, the PRNG will be reset.
// */
var _randomnessPid = int32(0)
/*
** Allowed values for the unixFile.ctrlFlags bitmask:
*/
/*
** Include code that is common to all os_*.c files
*/
/* #include "os_common.h" */
/*
** Define various macros that are missing from some systems.
*/
/*
** The threadid macro resolves to the thread-id or to 0. Used for
** testing and debugging only.
*/
/*
** HAVE_MREMAP defaults to true on Linux and false everywhere else.
*/
/*
** Explicitly call the 64-bit version of lseek() on Android. Otherwise, lseek()
** is the 32-bit version, even if _FILE_OFFSET_BITS=64 is defined.
*/
// C documentation
//
// /*
// ** Different Unix systems declare open() in different ways. Same use
// ** open(const char*,int,mode_t). Others use open(const char*,int,...).
// ** The difference is important when using a pointer to the function.
// **
// ** The safest way to deal with the problem is to always use this wrapper
// ** which always has the same well-defined interface.
// */
func _posixOpen(tls *libc.TLS, zFile uintptr, flags int32, mode int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
return libc.Xopen(tls, zFile, flags, libc.VaList(bp+8, mode))
}
// C documentation
//
// /*
// ** Many system calls are accessed through pointer-to-functions so that
// ** they may be overridden at runtime to facilitate fault injection during
// ** testing and sandboxing. The following array holds the names and pointers
// ** to all overrideable system calls.
// */
type Tunix_syscall = struct {
FzName uintptr
FpCurrent Tsqlite3_syscall_ptr
FpDefault Tsqlite3_syscall_ptr
}
type unix_syscall = Tunix_syscall
// C documentation
//
// /*
// ** Many system calls are accessed through pointer-to-functions so that
// ** they may be overridden at runtime to facilitate fault injection during
// ** testing and sandboxing. The following array holds the names and pointers
// ** to all overrideable system calls.
// */
var _aSyscall = [29]Tunix_syscall{
0: {
FzName: __ccgo_ts + 3395,
},
1: {
FzName: __ccgo_ts + 3400,
},
2: {
FzName: __ccgo_ts + 3406,
},
3: {
FzName: __ccgo_ts + 3413,
},
4: {
FzName: __ccgo_ts + 3420,
},
5: {
FzName: __ccgo_ts + 3425,
},
6: {
FzName: __ccgo_ts + 3431,
},
7: {
FzName: __ccgo_ts + 3441,
},
8: {
FzName: __ccgo_ts + 3447,
},
9: {
FzName: __ccgo_ts + 3452,
},
10: {
FzName: __ccgo_ts + 3458,
},
11: {
FzName: __ccgo_ts + 3466,
},
12: {
FzName: __ccgo_ts + 3472,
},
13: {
FzName: __ccgo_ts + 3479,
},
14: {
FzName: __ccgo_ts + 3488,
},
15: {
FzName: __ccgo_ts + 3495,
},
16: {
FzName: __ccgo_ts + 3505,
},
17: {
FzName: __ccgo_ts + 3512,
},
18: {
FzName: __ccgo_ts + 3526,
},
19: {
FzName: __ccgo_ts + 3532,
},
20: {
FzName: __ccgo_ts + 3538,
},
21: {
FzName: __ccgo_ts + 3545,
},
22: {
FzName: __ccgo_ts + 3553,
},
23: {
FzName: __ccgo_ts + 3558,
},
24: {
FzName: __ccgo_ts + 3565,
},
25: {
FzName: __ccgo_ts + 3572,
},
26: {
FzName: __ccgo_ts + 3584,
},
27: {
FzName: __ccgo_ts + 3593,
},
28: {
FzName: __ccgo_ts + 3599,
},
}
func init() {
p := unsafe.Pointer(&_aSyscall)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_posixOpen)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(libc.Xclose)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(libc.Xaccess)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(libc.Xgetcwd)
*(*uintptr)(unsafe.Add(p, 104)) = __ccgo_fp(libc.Xstat)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(libc.Xfstat)
*(*uintptr)(unsafe.Add(p, 152)) = __ccgo_fp(libc.Xftruncate)
*(*uintptr)(unsafe.Add(p, 176)) = __ccgo_fp(libc.Xfcntl)
*(*uintptr)(unsafe.Add(p, 200)) = __ccgo_fp(libc.Xread)
*(*uintptr)(unsafe.Add(p, 272)) = __ccgo_fp(libc.Xwrite)
*(*uintptr)(unsafe.Add(p, 344)) = __ccgo_fp(libc.Xfchmod)
*(*uintptr)(unsafe.Add(p, 392)) = __ccgo_fp(libc.Xunlink)
*(*uintptr)(unsafe.Add(p, 416)) = __ccgo_fp(_openDirectory)
*(*uintptr)(unsafe.Add(p, 440)) = __ccgo_fp(libc.Xmkdir)
*(*uintptr)(unsafe.Add(p, 464)) = __ccgo_fp(libc.Xrmdir)
*(*uintptr)(unsafe.Add(p, 488)) = __ccgo_fp(libc.Xfchown)
*(*uintptr)(unsafe.Add(p, 512)) = __ccgo_fp(libc.Xgeteuid)
*(*uintptr)(unsafe.Add(p, 536)) = __ccgo_fp(libc.Xmmap)
*(*uintptr)(unsafe.Add(p, 560)) = __ccgo_fp(libc.Xmunmap)
*(*uintptr)(unsafe.Add(p, 608)) = __ccgo_fp(_unixGetpagesize)
*(*uintptr)(unsafe.Add(p, 632)) = __ccgo_fp(libc.Xreadlink)
*(*uintptr)(unsafe.Add(p, 656)) = __ccgo_fp(libc.Xlstat)
}
/* End of the overrideable system calls */
// C documentation
//
// /*
// ** On some systems, calls to fchown() will trigger a message in a security
// ** log if they come from non-root processes. So avoid calling fchown() if
// ** we are not running as root.
// */
func _robustFchown(tls *libc.TLS, fd int32, uid Tuid_t, gid Tgid_t) (r int32) {
var v1 int32
_ = v1
if (*(*func(*libc.TLS) Tuid_t)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(21)].FpCurrent})))(tls) != 0 {
v1 = 0
} else {
v1 = (*(*func(*libc.TLS, int32, Tuid_t, Tgid_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(20)].FpCurrent})))(tls, fd, uid, gid)
}
return v1
}
// C documentation
//
// /*
// ** This is the xSetSystemCall() method of sqlite3_vfs for all of the
// ** "unix" VFSes. Return SQLITE_OK upon successfully updating the
// ** system call pointer, or SQLITE_NOTFOUND if there is no configurable
// ** system call named zName.
// */
func _unixSetSystemCall(tls *libc.TLS, pNotUsed uintptr, zName uintptr, pNewFunc Tsqlite3_syscall_ptr) (r int32) {
var i uint32
var rc int32
_, _ = i, rc
rc = int32(SQLITE_NOTFOUND)
_ = pNotUsed
if zName == uintptr(0) {
/* If no zName is given, restore all system calls to their default
** settings and return NULL
*/
rc = SQLITE_OK
i = uint32(0)
for {
if !(uint64(i) < libc.Uint64FromInt64(696)/libc.Uint64FromInt64(24)) {
break
}
if _aSyscall[i].FpDefault != 0 {
_aSyscall[i].FpCurrent = _aSyscall[i].FpDefault
}
goto _1
_1:
;
i++
}
} else {
/* If zName is specified, operate on only the one system call
** specified.
*/
i = uint32(0)
for {
if !(uint64(i) < libc.Uint64FromInt64(696)/libc.Uint64FromInt64(24)) {
break
}
if libc.Xstrcmp(tls, zName, _aSyscall[i].FzName) == 0 {
if _aSyscall[i].FpDefault == uintptr(0) {
_aSyscall[i].FpDefault = _aSyscall[i].FpCurrent
}
rc = SQLITE_OK
if pNewFunc == uintptr(0) {
pNewFunc = _aSyscall[i].FpDefault
}
_aSyscall[i].FpCurrent = pNewFunc
break
}
goto _2
_2:
;
i++
}
}
return rc
}
// C documentation
//
// /*
// ** Return the value of a system call. Return NULL if zName is not a
// ** recognized system call name. NULL is also returned if the system call
// ** is currently undefined.
// */
func _unixGetSystemCall(tls *libc.TLS, pNotUsed uintptr, zName uintptr) (r Tsqlite3_syscall_ptr) {
var i uint32
_ = i
_ = pNotUsed
i = uint32(0)
for {
if !(uint64(i) < libc.Uint64FromInt64(696)/libc.Uint64FromInt64(24)) {
break
}
if libc.Xstrcmp(tls, zName, _aSyscall[i].FzName) == 0 {
return _aSyscall[i].FpCurrent
}
goto _1
_1:
;
i++
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Return the name of the first system call after zName. If zName==NULL
// ** then return the name of the first system call. Return NULL if zName
// ** is the last system call or if zName is not the name of a valid
// ** system call.
// */
func _unixNextSystemCall(tls *libc.TLS, p uintptr, zName uintptr) (r uintptr) {
var i int32
_ = i
i = -int32(1)
_ = p
if zName != 0 {
i = 0
for {
if !(i < int32(libc.Uint64FromInt64(696)/libc.Uint64FromInt64(24))-libc.Int32FromInt32(1)) {
break
}
if libc.Xstrcmp(tls, zName, _aSyscall[i].FzName) == 0 {
break
}
goto _1
_1:
;
i++
}
}
i++
for {
if !(i < int32(libc.Uint64FromInt64(696)/libc.Uint64FromInt64(24))) {
break
}
if _aSyscall[i].FpCurrent != uintptr(0) {
return _aSyscall[i].FzName
}
goto _2
_2:
;
i++
}
return uintptr(0)
}
/*
** Do not accept any file descriptor less than this value, in order to avoid
** opening database file using file descriptors that are commonly used for
** standard input, output, and error.
*/
// C documentation
//
// /*
// ** Invoke open(). Do so multiple times, until it either succeeds or
// ** fails for some reason other than EINTR.
// **
// ** If the file creation mode "m" is 0 then set it to the default for
// ** SQLite. The default is SQLITE_DEFAULT_FILE_PERMISSIONS (normally
// ** 0644) as modified by the system umask. If m is not 0, then
// ** make the file creation mode be exactly m ignoring the umask.
// **
// ** The m parameter will be non-zero only when creating -wal, -journal,
// ** and -shm files. We want those files to have *exactly* the same
// ** permissions as their original database, unadulterated by the umask.
// ** In that way, if a database file is -rw-rw-rw or -rw-rw-r-, and a
// ** transaction crashes and leaves behind hot journals, then any
// ** process that is able to write to the database will also be able to
// ** recover the hot journals.
// */
func _robust_open(tls *libc.TLS, z uintptr, f int32, m Tmode_t) (r int32) {
bp := tls.Alloc(256)
defer tls.Free(256)
var fd, v1 int32
var m2 Tmode_t
var _ /* statbuf at bp+0 */ Tstat
_, _, _ = fd, m2, v1
if m != 0 {
v1 = int32(m)
} else {
v1 = int32(SQLITE_DEFAULT_FILE_PERMISSIONS)
}
m2 = uint16(v1)
for int32(1) != 0 {
fd = (*(*func(*libc.TLS, uintptr, int32, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[0].FpCurrent})))(tls, z, f|int32(O_CLOEXEC), int32(m2))
if fd < 0 {
if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == int32(EINTR) {
continue
}
break
}
if fd >= int32(SQLITE_MINIMUM_FILE_DESCRIPTOR) {
break
}
if f&(libc.Int32FromInt32(O_EXCL)|libc.Int32FromInt32(O_CREAT)) == libc.Int32FromInt32(O_EXCL)|libc.Int32FromInt32(O_CREAT) {
(*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(16)].FpCurrent})))(tls, z)
}
(*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(1)].FpCurrent})))(tls, fd)
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+3605, libc.VaList(bp+232, z, fd))
fd = -int32(1)
if (*(*func(*libc.TLS, uintptr, int32, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[0].FpCurrent})))(tls, __ccgo_ts+3648, O_RDONLY, int32(m)) < 0 {
break
}
}
if fd >= 0 {
if int32(m) != 0 {
if (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, fd, bp) == 0 && (*(*Tstat)(unsafe.Pointer(bp))).Fst_size == 0 && int32((*(*Tstat)(unsafe.Pointer(bp))).Fst_mode)&int32(0777) != int32(m) {
(*(*func(*libc.TLS, int32, Tmode_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(14)].FpCurrent})))(tls, fd, m)
}
}
}
return fd
}
// C documentation
//
// /*
// ** Helper functions to obtain and relinquish the global mutex. The
// ** global mutex is used to protect the unixInodeInfo and
// ** vxworksFileId objects used by this file, all of which may be
// ** shared by multiple threads.
// **
// ** Function unixMutexHeld() is used to assert() that the global mutex
// ** is held when required. This function is only used as part of assert()
// ** statements. e.g.
// **
// ** unixEnterMutex()
// ** assert( unixMutexHeld() );
// ** unixEnterLeave()
// **
// ** To prevent deadlock, the global unixBigLock must must be acquired
// ** before the unixInodeInfo.pLockMutex mutex, if both are held. It is
// ** OK to get the pLockMutex without holding unixBigLock first, but if
// ** that happens, the unixBigLock mutex must not be acquired until after
// ** pLockMutex is released.
// **
// ** OK: enter(unixBigLock), enter(pLockInfo)
// ** OK: enter(unixBigLock)
// ** OK: enter(pLockInfo)
// ** ERROR: enter(pLockInfo), enter(unixBigLock)
// */
var _unixBigLock = uintptr(0)
func _unixEnterMutex(tls *libc.TLS) {
/* Not a recursive mutex */
Xsqlite3_mutex_enter(tls, _unixBigLock)
}
func _unixLeaveMutex(tls *libc.TLS) {
Xsqlite3_mutex_leave(tls, _unixBigLock)
}
// C documentation
//
// /*
// ** Retry ftruncate() calls that fail due to EINTR
// **
// ** All calls to ftruncate() within this file should be made through
// ** this wrapper. On the Android platform, bypassing the logic below
// ** could lead to a corrupt database.
// */
func _robust_ftruncate(tls *libc.TLS, h int32, sz Tsqlite3_int64) (r int32) {
var rc int32
_ = rc
for cond := true; cond; cond = rc < 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == int32(EINTR) {
rc = (*(*func(*libc.TLS, int32, Toff_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(6)].FpCurrent})))(tls, h, sz)
}
return rc
}
// C documentation
//
// /*
// ** This routine translates a standard POSIX errno code into something
// ** useful to the clients of the sqlite3 functions. Specifically, it is
// ** intended to translate a variety of "try again" errors into SQLITE_BUSY
// ** and a variety of "please close the file descriptor NOW" errors into
// ** SQLITE_IOERR
// **
// ** Errors during initialization of locks, or file system support for locks,
// ** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately.
// */
func _sqliteErrorFromPosixError(tls *libc.TLS, posixError int32, sqliteIOErr int32) (r int32) {
switch posixError {
case int32(EACCES):
fallthrough
case int32(EAGAIN):
fallthrough
case int32(ETIMEDOUT):
fallthrough
case int32(EBUSY):
fallthrough
case int32(EINTR):
fallthrough
case int32(ENOLCK):
/* random NFS retry error, unless during file system support
* introspection, in which it actually means what it says */
return int32(SQLITE_BUSY)
case int32(EPERM):
return int32(SQLITE_PERM)
default:
return sqliteIOErr
}
return r
}
/******************************************************************************
****************** Begin Unique File ID Utility Used By VxWorks ***************
**
** On most versions of unix, we can get a unique ID for a file by concatenating
** the device number and the inode number. But this does not work on VxWorks.
** On VxWorks, a unique file id must be based on the canonical filename.
**
** A pointer to an instance of the following structure can be used as a
** unique file ID in VxWorks. Each instance of this structure contains
** a copy of the canonical filename. There is also a reference count.
** The structure is reclaimed when the number of pointers to it drops to
** zero.
**
** There are never very many files open at one time and lookups are not
** a performance-critical path, so it is sufficient to put these
** structures on a linked list.
*/
type TvxworksFileId = struct {
FpNext uintptr
FnRef int32
FnName int32
FzCanonicalName uintptr
}
type vxworksFileId = TvxworksFileId
/*************** End of Unique File ID Utility Used By VxWorks ****************
******************************************************************************/
/******************************************************************************
*************************** Posix Advisory Locking ****************************
**
** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996)
** section 6.5.2.2 lines 483 through 490 specify that when a process
** sets or clears a lock, that operation overrides any prior locks set
** by the same process. It does not explicitly say so, but this implies
** that it overrides locks set by the same process using a different
** file descriptor. Consider this test case:
**
** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644);
** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644);
**
** Suppose ./file1 and ./file2 are really the same file (because
** one is a hard or symbolic link to the other) then if you set
** an exclusive lock on fd1, then try to get an exclusive lock
** on fd2, it works. I would have expected the second lock to
** fail since there was already a lock on the file due to fd1.
** But not so. Since both locks came from the same process, the
** second overrides the first, even though they were on different
** file descriptors opened on different file names.
**
** This means that we cannot use POSIX locks to synchronize file access
** among competing threads of the same process. POSIX locks will work fine
** to synchronize access for threads in separate processes, but not
** threads within the same process.
**
** To work around the problem, SQLite has to manage file locks internally
** on its own. Whenever a new database is opened, we have to find the
** specific inode of the database file (the inode is determined by the
** st_dev and st_ino fields of the stat structure that fstat() fills in)
** and check for locks already existing on that inode. When locks are
** created or removed, we have to look at our own internal record of the
** locks to see if another thread has previously set a lock on that same
** inode.
**
** (Aside: The use of inode numbers as unique IDs does not work on VxWorks.
** For VxWorks, we have to use the alternative unique ID system based on
** canonical filename and implemented in the previous division.)
**
** The sqlite3_file structure for POSIX is no longer just an integer file
** descriptor. It is now a structure that holds the integer file
** descriptor and a pointer to a structure that describes the internal
** locks on the corresponding inode. There is one locking structure
** per inode, so if the same inode is opened twice, both unixFile structures
** point to the same locking structure. The locking structure keeps
** a reference count (so we will know when to delete it) and a "cnt"
** field that tells us its internal lock status. cnt==0 means the
** file is unlocked. cnt==-1 means the file has an exclusive lock.
** cnt>0 means there are cnt shared locks on the file.
**
** Any attempt to lock or unlock a file first checks the locking
** structure. The fcntl() system call is only invoked to set a
** POSIX lock if the internal lock structure transitions between
** a locked and an unlocked state.
**
** But wait: there are yet more problems with POSIX advisory locks.
**
** If you close a file descriptor that points to a file that has locks,
** all locks on that file that are owned by the current process are
** released. To work around this problem, each unixInodeInfo object
** maintains a count of the number of pending locks on the inode.
** When an attempt is made to close an unixFile, if there are
** other unixFile open on the same inode that are holding locks, the call
** to close() the file descriptor is deferred until all of the locks clear.
** The unixInodeInfo structure keeps a list of file descriptors that need to
** be closed and that list is walked (and cleared) when the last lock
** clears.
**
** Yet another problem: LinuxThreads do not play well with posix locks.
**
** Many older versions of linux use the LinuxThreads library which is
** not posix compliant. Under LinuxThreads, a lock created by thread
** A cannot be modified or overridden by a different thread B.
** Only thread A can modify the lock. Locking behavior is correct
** if the application uses the newer Native Posix Thread Library (NPTL)
** on linux - with NPTL a lock created by thread A can override locks
** in thread B. But there is no way to know at compile-time which
** threading library is being used. So there is no way to know at
** compile-time whether or not thread A can override locks on thread B.
** One has to do a run-time check to discover the behavior of the
** current process.
**
** SQLite used to support LinuxThreads. But support for LinuxThreads
** was dropped beginning with version 3.7.0. SQLite will still work with
** LinuxThreads provided that (1) there is no more than one connection
** per database file in the same process and (2) database connections
** do not move across threads.
*/
/*
** An instance of the following structure serves as the key used
** to locate a particular unixInodeInfo object.
*/
type TunixFileId = struct {
Fdev Tdev_t
Fino Tu64
}
type unixFileId = TunixFileId
/*
** An instance of the following structure is allocated for each open
** inode.
**
** A single inode can have multiple file descriptors, so each unixFile
** structure contains a pointer to an instance of this object and this
** object keeps a count of the number of unixFile pointing to it.
**
** Mutex rules:
**
** (1) Only the pLockMutex mutex must be held in order to read or write
** any of the locking fields:
** nShared, nLock, eFileLock, bProcessLock, pUnused
**
** (2) When nRef>0, then the following fields are unchanging and can
** be read (but not written) without holding any mutex:
** fileId, pLockMutex
**
** (3) With the exceptions above, all the fields may only be read
** or written while holding the global unixBigLock mutex.
**
** Deadlock prevention: The global unixBigLock mutex may not
** be acquired while holding the pLockMutex mutex. If both unixBigLock
** and pLockMutex are needed, then unixBigLock must be acquired first.
*/
type TunixInodeInfo1 = struct {
FfileId TunixFileId
FpLockMutex uintptr
FnShared int32
FnLock int32
FeFileLock uint8
FbProcessLock uint8
FpUnused uintptr
FnRef int32
FpShmNode uintptr
FpNext uintptr
FpPrev uintptr
}
type unixInodeInfo1 = TunixInodeInfo1
// C documentation
//
// /*
// ** A lists of all unixInodeInfo objects.
// **
// ** Must hold unixBigLock in order to read or write this variable.
// */
var _inodeList = uintptr(0)
// C documentation
//
// /*
// **
// ** This function - unixLogErrorAtLine(), is only ever called via the macro
// ** unixLogError().
// **
// ** It is invoked after an error occurs in an OS function and errno has been
// ** set. It logs a message using sqlite3_log() containing the current value of
// ** errno and, if possible, the human-readable equivalent from strerror() or
// ** strerror_r().
// **
// ** The first argument passed to the macro should be the error code that
// ** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN).
// ** The two subsequent arguments should be the name of the OS function that
// ** failed (e.g. "unlink", "open") and the associated file-system path,
// ** if any.
// */
func _unixLogErrorAtLine(tls *libc.TLS, errcode int32, zFunc uintptr, zPath uintptr, iLine int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var iErrno int32
var zErr uintptr
_, _ = iErrno, zErr /* Message from strerror() or equivalent */
iErrno = *(*int32)(unsafe.Pointer(libc.X__error(tls))) /* Saved syscall error number */
/* If this is not a threadsafe build (SQLITE_THREADSAFE==0), then use
** the strerror() function to obtain the human-readable error message
** equivalent to errno. Otherwise, use strerror_r().
*/
/* This is a threadsafe build, but strerror_r() is not available. */
zErr = __ccgo_ts + 1650
if zPath == uintptr(0) {
zPath = __ccgo_ts + 1650
}
Xsqlite3_log(tls, errcode, __ccgo_ts+3658, libc.VaList(bp+8, iLine, iErrno, zFunc, zPath, zErr))
return errcode
}
// C documentation
//
// /*
// ** Close a file descriptor.
// **
// ** We assume that close() almost always works, since it is only in a
// ** very sick application or on a very sick platform that it might fail.
// ** If it does fail, simply leak the file descriptor, but do log the
// ** error.
// **
// ** Note that it is not safe to retry close() after EINTR since the
// ** file descriptor might have already been reused by another thread.
// ** So we don't even try to recover from an EINTR. Just log the error
// ** and move on.
// */
func _robust_close(tls *libc.TLS, pFile uintptr, h int32, lineno int32) {
var v1 uintptr
_ = v1
if (*(*func(*libc.TLS, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(1)].FpCurrent})))(tls, h) != 0 {
if pFile != 0 {
v1 = (*TunixFile)(unsafe.Pointer(pFile)).FzPath
} else {
v1 = uintptr(0)
}
_unixLogErrorAtLine(tls, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(16)<lastErrno. Do this in a subroutine as that provides
// ** a convenient place to set a breakpoint.
// */
func _storeLastErrno(tls *libc.TLS, pFile uintptr, error1 int32) {
(*TunixFile)(unsafe.Pointer(pFile)).FlastErrno = error1
}
// C documentation
//
// /*
// ** Close all file descriptors accumulated in the unixInodeInfo->pUnused list.
// */
func _closePendingFds(tls *libc.TLS, pFile uintptr) {
var p, pInode, pNext uintptr
_, _, _ = p, pInode, pNext
pInode = (*TunixFile)(unsafe.Pointer(pFile)).FpInode
p = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpUnused
for {
if !(p != 0) {
break
}
pNext = (*TUnixUnusedFd)(unsafe.Pointer(p)).FpNext
_robust_close(tls, pFile, (*TUnixUnusedFd)(unsafe.Pointer(p)).Ffd, int32(39388))
Xsqlite3_free(tls, p)
goto _1
_1:
;
p = pNext
}
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FpUnused = uintptr(0)
}
// C documentation
//
// /*
// ** Release a unixInodeInfo structure previously allocated by findInodeInfo().
// **
// ** The global mutex must be held when this routine is called, but the mutex
// ** on the inode being deleted must NOT be held.
// */
func _releaseInodeInfo(tls *libc.TLS, pFile uintptr) {
var pInode uintptr
_ = pInode
pInode = (*TunixFile)(unsafe.Pointer(pFile)).FpInode
if pInode != 0 {
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnRef--
if (*TunixInodeInfo)(unsafe.Pointer(pInode)).FnRef == 0 {
Xsqlite3_mutex_enter(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
_closePendingFds(tls, pFile)
Xsqlite3_mutex_leave(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
if (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpPrev != 0 {
(*TunixInodeInfo)(unsafe.Pointer((*TunixInodeInfo)(unsafe.Pointer(pInode)).FpPrev)).FpNext = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext
} else {
_inodeList = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext
}
if (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext != 0 {
(*TunixInodeInfo)(unsafe.Pointer((*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext)).FpPrev = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpPrev
}
Xsqlite3_mutex_free(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
Xsqlite3_free(tls, pInode)
}
}
}
// C documentation
//
// /*
// ** Given a file descriptor, locate the unixInodeInfo object that
// ** describes that file descriptor. Create a new one if necessary. The
// ** return value might be uninitialized if an error occurs.
// **
// ** The global mutex must held when calling this routine.
// **
// ** Return an appropriate error code.
// */
func _findInodeInfo(tls *libc.TLS, pFile uintptr, ppInode uintptr) (r int32) {
bp := tls.Alloc(240)
defer tls.Free(240)
var fd, rc int32
var pInode uintptr
var _ /* fileId at bp+0 */ TunixFileId
var _ /* statbuf at bp+16 */ Tstat
_, _, _ = fd, pInode, rc /* Low-level file information */
pInode = uintptr(0) /* Candidate unixInodeInfo object */
/* Get low-level information about the file that we can used to
** create a unique name for the file.
*/
fd = (*TunixFile)(unsafe.Pointer(pFile)).Fh
rc = (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, fd, bp+16)
if rc != 0 {
_storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
return int32(SQLITE_IOERR)
}
libc.Xmemset(tls, bp, 0, uint64(16))
(*(*TunixFileId)(unsafe.Pointer(bp))).Fdev = (*(*Tstat)(unsafe.Pointer(bp + 16))).Fst_dev
(*(*TunixFileId)(unsafe.Pointer(bp))).Fino = (*(*Tstat)(unsafe.Pointer(bp + 16))).Fst_ino
pInode = _inodeList
for pInode != 0 && libc.Xmemcmp(tls, bp, pInode, uint64(16)) != 0 {
pInode = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext
}
if pInode == uintptr(0) {
pInode = Xsqlite3_malloc64(tls, uint64(80))
if pInode == uintptr(0) {
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, pInode, 0, uint64(80))
libc.Xmemcpy(tls, pInode, bp, uint64(16))
if _sqlite3Config.FbCoreMutex != 0 {
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex = Xsqlite3_mutex_alloc(tls, SQLITE_MUTEX_FAST)
if (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex == uintptr(0) {
Xsqlite3_free(tls, pInode)
return int32(SQLITE_NOMEM)
}
}
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnRef = int32(1)
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext = _inodeList
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FpPrev = uintptr(0)
if _inodeList != 0 {
(*TunixInodeInfo)(unsafe.Pointer(_inodeList)).FpPrev = pInode
}
_inodeList = pInode
} else {
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnRef++
}
*(*uintptr)(unsafe.Pointer(ppInode)) = pInode
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return TRUE if pFile has been renamed or unlinked since it was first opened.
// */
func _fileHasMoved(tls *libc.TLS, pFile uintptr) (r int32) {
bp := tls.Alloc(224)
defer tls.Free(224)
var _ /* buf at bp+0 */ Tstat
return libc.BoolInt32((*TunixFile)(unsafe.Pointer(pFile)).FpInode != uintptr(0) && ((*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(4)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).FzPath, bp) != 0 || (*(*Tstat)(unsafe.Pointer(bp))).Fst_ino != (*TunixInodeInfo)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpInode)).FfileId.Fino))
}
// C documentation
//
// /*
// ** Check a unixFile that is a database. Verify the following:
// **
// ** (1) There is exactly one hard link on the file
// ** (2) The file is not a symbolic link
// ** (3) The file has not been renamed or unlinked
// **
// ** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right.
// */
func _verifyDbFile(tls *libc.TLS, pFile uintptr) {
bp := tls.Alloc(240)
defer tls.Free(240)
var rc int32
var _ /* buf at bp+0 */ Tstat
_ = rc
/* These verifications occurs for the main database only */
if int32((*TunixFile)(unsafe.Pointer(pFile)).FctrlFlags)&int32(UNIXFILE_NOLOCK) != 0 {
return
}
rc = (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, bp)
if rc != 0 {
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+3689, libc.VaList(bp+232, (*TunixFile)(unsafe.Pointer(pFile)).FzPath))
return
}
if (*(*Tstat)(unsafe.Pointer(bp))).Fst_nlink == uint64(0) {
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+3713, libc.VaList(bp+232, (*TunixFile)(unsafe.Pointer(pFile)).FzPath))
return
}
if (*(*Tstat)(unsafe.Pointer(bp))).Fst_nlink > uint64(1) {
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+3742, libc.VaList(bp+232, (*TunixFile)(unsafe.Pointer(pFile)).FzPath))
return
}
if _fileHasMoved(tls, pFile) != 0 {
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+3769, libc.VaList(bp+232, (*TunixFile)(unsafe.Pointer(pFile)).FzPath))
return
}
}
// C documentation
//
// /*
// ** This routine checks if there is a RESERVED lock held on the specified
// ** file by this or any other process. If such a lock is held, set *pResOut
// ** to a non-zero value otherwise *pResOut is set to zero. The return value
// ** is set to SQLITE_OK unless an I/O error occurs during lock checking.
// */
func _unixCheckReservedLock(tls *libc.TLS, id uintptr, pResOut uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var pFile uintptr
var rc, reserved int32
var _ /* lock at bp+0 */ Tflock
_, _, _ = pFile, rc, reserved
rc = SQLITE_OK
reserved = 0
pFile = id
Xsqlite3_mutex_enter(tls, (*TunixInodeInfo)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpInode)).FpLockMutex)
/* Check if a thread in this process holds such a lock */
if int32((*TunixInodeInfo)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpInode)).FeFileLock) > int32(SHARED_LOCK) {
reserved = int32(1)
}
/* Otherwise see if some other process holds it.
*/
if !(reserved != 0) && !((*TunixInodeInfo)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpInode)).FbProcessLock != 0) {
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte + libc.Int32FromInt32(1))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(1)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_WRLCK)
if (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, int32(F_GETLK), libc.VaList(bp+40, bp)) != 0 {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(14)<iBusyTimeout
** value is set, then it is the number of milliseconds to wait before
** failing the lock. The iBusyTimeout value is always reset back to
** zero on each call.
**
** If SQLITE_ENABLE_SETLK_TIMEOUT is not defined, then do a non-blocking
** attempt to set the lock.
*/
// C documentation
//
// /*
// ** Attempt to set a system-lock on the file pFile. The lock is
// ** described by pLock.
// **
// ** If the pFile was opened read/write from unix-excl, then the only lock
// ** ever obtained is an exclusive lock, and it is obtained exactly once
// ** the first time any lock is attempted. All subsequent system locking
// ** operations become no-ops. Locking operations still happen internally,
// ** in order to coordinate access between separate database connections
// ** within this process, but all of that is handled in memory and the
// ** operating system does not participate.
// **
// ** This function is a pass-through to fcntl(F_SETLK) if pFile is using
// ** any VFS other than "unix-excl" or if pFile is opened on "unix-excl"
// ** and is read-only.
// **
// ** Zero is returned if the call completes successfully, or -1 if a call
// ** to fcntl() fails. In this case, errno is set appropriately (by fcntl()).
// */
func _unixFileLock(tls *libc.TLS, pFile uintptr, pLock uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var pInode uintptr
var rc int32
var _ /* lock at bp+0 */ Tflock
_, _ = pInode, rc
pInode = (*TunixFile)(unsafe.Pointer(pFile)).FpInode
if int32((*TunixFile)(unsafe.Pointer(pFile)).FctrlFlags)&(libc.Int32FromInt32(UNIXFILE_EXCL)|libc.Int32FromInt32(UNIXFILE_RDONLY)) == int32(UNIXFILE_EXCL) {
if int32((*TunixInodeInfo)(unsafe.Pointer(pInode)).FbProcessLock) == 0 {
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte + libc.Int32FromInt32(2))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(SHARED_SIZE)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_WRLCK)
rc = (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, int32(F_SETLK), libc.VaList(bp+40, bp))
if rc < 0 {
return rc
}
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FbProcessLock = uint8(1)
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnLock++
} else {
rc = 0
}
} else {
rc = (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, int32(F_SETLK), libc.VaList(bp+40, pLock))
}
return rc
}
// C documentation
//
// /*
// ** Lock the file with the lock specified by parameter eFileLock - one
// ** of the following:
// **
// ** (1) SHARED_LOCK
// ** (2) RESERVED_LOCK
// ** (3) PENDING_LOCK
// ** (4) EXCLUSIVE_LOCK
// **
// ** Sometimes when requesting one lock state, additional lock states
// ** are inserted in between. The locking might fail on one of the later
// ** transitions leaving the lock state different from what it started but
// ** still short of its goal. The following chart shows the allowed
// ** transitions and the inserted intermediate states:
// **
// ** UNLOCKED -> SHARED
// ** SHARED -> RESERVED
// ** SHARED -> EXCLUSIVE
// ** RESERVED -> (PENDING) -> EXCLUSIVE
// ** PENDING -> EXCLUSIVE
// **
// ** This routine will only increase a lock. Use the sqlite3OsUnlock()
// ** routine to lower a locking level.
// */
func _unixLock(tls *libc.TLS, id uintptr, eFileLock int32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var pFile, pInode uintptr
var rc, tErrno, v1 int32
var _ /* lock at bp+0 */ Tflock
_, _, _, _, _ = pFile, pInode, rc, tErrno, v1
/* The following describes the implementation of the various locks and
** lock transitions in terms of the POSIX advisory shared and exclusive
** lock primitives (called read-locks and write-locks below, to avoid
** confusion with SQLite lock names). The algorithms are complicated
** slightly in order to be compatible with Windows95 systems simultaneously
** accessing the same database file, in case that is ever required.
**
** Symbols defined in os.h identify the 'pending byte' and the 'reserved
** byte', each single bytes at well known offsets, and the 'shared byte
** range', a range of 510 bytes at a well known offset.
**
** To obtain a SHARED lock, a read-lock is obtained on the 'pending
** byte'. If this is successful, 'shared byte range' is read-locked
** and the lock on the 'pending byte' released. (Legacy note: When
** SQLite was first developed, Windows95 systems were still very common,
** and Windows95 lacks a shared-lock capability. So on Windows95, a
** single randomly selected by from the 'shared byte range' is locked.
** Windows95 is now pretty much extinct, but this work-around for the
** lack of shared-locks on Windows95 lives on, for backwards
** compatibility.)
**
** A process may only obtain a RESERVED lock after it has a SHARED lock.
** A RESERVED lock is implemented by grabbing a write-lock on the
** 'reserved byte'.
**
** An EXCLUSIVE lock may only be requested after either a SHARED or
** RESERVED lock is held. An EXCLUSIVE lock is implemented by obtaining
** a write-lock on the entire 'shared byte range'. Since all other locks
** require a read-lock on one of the bytes within this range, this ensures
** that no other locks are held on the database.
**
** If a process that holds a RESERVED lock requests an EXCLUSIVE, then
** a PENDING lock is obtained first. A PENDING lock is implemented by
** obtaining a write-lock on the 'pending byte'. This ensures that no new
** SHARED locks can be obtained, but existing SHARED locks are allowed to
** persist. If the call to this function fails to obtain the EXCLUSIVE
** lock in this case, it holds the PENDING lock instead. The client may
** then re-attempt the EXCLUSIVE lock later on, after existing SHARED
** locks have cleared.
*/
rc = SQLITE_OK
pFile = id
tErrno = 0
/* If there is already a lock of this type or more restrictive on the
** unixFile, do nothing. Don't use the end_lock: exit path, as
** unixEnterMutex() hasn't been called yet.
*/
if int32((*TunixFile)(unsafe.Pointer(pFile)).FeFileLock) >= eFileLock {
return SQLITE_OK
}
/* Make sure the locking sequence is correct.
** (1) We never move from unlocked to anything higher than shared lock.
** (2) SQLite never explicitly requests a pending lock.
** (3) A shared lock is always held when a reserve lock is requested.
*/
/* This mutex is needed because pFile->pInode is shared across threads
*/
pInode = (*TunixFile)(unsafe.Pointer(pFile)).FpInode
Xsqlite3_mutex_enter(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
/* If some thread using this PID has a lock via a different unixFile*
** handle that precludes the requested lock, return BUSY.
*/
if int32((*TunixFile)(unsafe.Pointer(pFile)).FeFileLock) != int32((*TunixInodeInfo)(unsafe.Pointer(pInode)).FeFileLock) && (int32((*TunixInodeInfo)(unsafe.Pointer(pInode)).FeFileLock) >= int32(PENDING_LOCK) || eFileLock > int32(SHARED_LOCK)) {
rc = int32(SQLITE_BUSY)
goto end_lock
}
/* If a SHARED lock is requested, and some thread using this PID already
** has a SHARED or RESERVED lock, then increment reference counts and
** return SQLITE_OK.
*/
if eFileLock == int32(SHARED_LOCK) && (int32((*TunixInodeInfo)(unsafe.Pointer(pInode)).FeFileLock) == int32(SHARED_LOCK) || int32((*TunixInodeInfo)(unsafe.Pointer(pInode)).FeFileLock) == int32(RESERVED_LOCK)) {
(*TunixFile)(unsafe.Pointer(pFile)).FeFileLock = uint8(SHARED_LOCK)
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnShared++
(*TunixInodeInfo)(unsafe.Pointer(pInode)).FnLock++
goto end_lock
}
/* A PENDING lock is needed before acquiring a SHARED lock and before
** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will
** be released.
*/
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(1)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
if eFileLock == int32(SHARED_LOCK) || eFileLock == int32(EXCLUSIVE_LOCK) && int32((*TunixFile)(unsafe.Pointer(pFile)).FeFileLock) == int32(RESERVED_LOCK) {
if eFileLock == int32(SHARED_LOCK) {
v1 = int32(F_RDLCK)
} else {
v1 = int32(F_WRLCK)
}
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(v1)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte)
if _unixFileLock(tls, pFile, bp) != 0 {
tErrno = *(*int32)(unsafe.Pointer(libc.X__error(tls)))
rc = _sqliteErrorFromPosixError(tls, tErrno, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(15)< int32(1) {
/* We are trying for an exclusive lock but another thread in this
** same process is still holding a shared lock. */
rc = int32(SQLITE_BUSY)
} else {
/* The request was for a RESERVED or EXCLUSIVE lock. It is
** assumed that there is a SHARED or greater lock on the file
** already.
*/
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_WRLCK)
if eFileLock == int32(RESERVED_LOCK) {
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte + libc.Int32FromInt32(1))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(1)
} else {
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte + libc.Int32FromInt32(2))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(SHARED_SIZE)
}
if _unixFileLock(tls, pFile, bp) != 0 {
tErrno = *(*int32)(unsafe.Pointer(libc.X__error(tls)))
rc = _sqliteErrorFromPosixError(tls, tErrno, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(15)< int32(SHARED_LOCK) {
/* downgrading to a shared lock on NFS involves clearing the write lock
** before establishing the readlock - to avoid a race condition we downgrade
** the lock in 2 blocks, so that part of the range will be covered by a
** write lock until the rest is covered by a read lock:
** 1: [WWWWW]
** 2: [....W]
** 3: [RRRRW]
** 4: [RRRR.]
*/
if eFileLock == int32(SHARED_LOCK) {
_ = handleNFSUnlock
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_RDLCK)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(_sqlite3PendingByte + libc.Int32FromInt32(2))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(SHARED_SIZE)
if _unixFileLock(tls, pFile, bp) != 0 {
/* In theory, the call to unixFileLock() cannot fail because another
** process is holding an incompatible lock. If it does, this
** indicates that the other process is not following the locking
** protocol. If this happens, return SQLITE_IOERR_RDLOCK. Returning
** SQLITE_BUSY would confuse the upper layer (in practice it causes
** an assert to fail). */
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(9)<= 0 {
_robust_close(tls, pFile, (*TunixFile)(unsafe.Pointer(pFile)).Fh, int32(40172))
(*TunixFile)(unsafe.Pointer(pFile)).Fh = -int32(1)
}
Xsqlite3_free(tls, (*TunixFile)(unsafe.Pointer(pFile)).FpPreallocatedUnused)
libc.Xmemset(tls, pFile, 0, uint64(120))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Close a file.
// */
func _unixClose(tls *libc.TLS, id uintptr) (r int32) {
var pFile, pInode uintptr
var rc int32
_, _, _ = pFile, pInode, rc
rc = SQLITE_OK
pFile = id
pInode = (*TunixFile)(unsafe.Pointer(pFile)).FpInode
_verifyDbFile(tls, pFile)
_unixUnlock(tls, id, NO_LOCK)
_unixEnterMutex(tls)
/* unixFile.pInode is always valid here. Otherwise, a different close
** routine (e.g. nolockClose()) would be called instead.
*/
Xsqlite3_mutex_enter(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
if (*TunixInodeInfo)(unsafe.Pointer(pInode)).FnLock != 0 {
/* If there are outstanding locks, do not actually close the file just
** yet because that would clear those locks. Instead, add the file
** descriptor to pInode->pUnused list. It will be automatically closed
** when the last lock is cleared.
*/
_setPendingFd(tls, pFile)
}
Xsqlite3_mutex_leave(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
_releaseInodeInfo(tls, pFile)
rc = _closeUnixFile(tls, id)
_unixLeaveMutex(tls)
return rc
}
/************** End of the posix advisory lock implementation *****************
******************************************************************************/
/******************************************************************************
****************************** No-op Locking **********************************
**
** Of the various locking implementations available, this is by far the
** simplest: locking is ignored. No attempt is made to lock the database
** file for reading or writing.
**
** This locking mode is appropriate for use on read-only databases
** (ex: databases that are burned into CD-ROM, for example.) It can
** also be used if the application employs some external mechanism to
** prevent simultaneous access of the same database by two or more
** database connections. But there is a serious risk of database
** corruption if this locking mode is used in situations where multiple
** database connections are accessing the same database file at the same
** time and one or more of those connections are writing.
*/
func _nolockCheckReservedLock(tls *libc.TLS, NotUsed uintptr, pResOut uintptr) (r int32) {
_ = NotUsed
*(*int32)(unsafe.Pointer(pResOut)) = 0
return SQLITE_OK
}
func _nolockLock(tls *libc.TLS, NotUsed uintptr, NotUsed2 int32) (r int32) {
_ = NotUsed
_ = NotUsed2
return SQLITE_OK
}
func _nolockUnlock(tls *libc.TLS, NotUsed uintptr, NotUsed2 int32) (r int32) {
_ = NotUsed
_ = NotUsed2
return SQLITE_OK
}
// C documentation
//
// /*
// ** Close the file.
// */
func _nolockClose(tls *libc.TLS, id uintptr) (r int32) {
return _closeUnixFile(tls, id)
}
/******************* End of the no-op lock implementation *********************
******************************************************************************/
/******************************************************************************
************************* Begin dot-file Locking ******************************
**
** The dotfile locking implementation uses the existence of separate lock
** files (really a directory) to control access to the database. This works
** on just about every filesystem imaginable. But there are serious downsides:
**
** (1) There is zero concurrency. A single reader blocks all other
** connections from reading or writing the database.
**
** (2) An application crash or power loss can leave stale lock files
** sitting around that need to be cleared manually.
**
** Nevertheless, a dotlock is an appropriate locking mode for use if no
** other locking strategy is available.
**
** Dotfile locking works by creating a subdirectory in the same directory as
** the database and with the same name but with a ".lock" extension added.
** The existence of a lock directory implies an EXCLUSIVE lock. All other
** lock types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE.
*/
/*
** The file suffix added to the data base filename in order to create the
** lock directory.
*/
// C documentation
//
// /*
// ** This routine checks if there is a RESERVED lock held on the specified
// ** file by this or any other process. If such a lock is held, set *pResOut
// ** to a non-zero value otherwise *pResOut is set to zero. The return value
// ** is set to SQLITE_OK unless an I/O error occurs during lock checking.
// **
// ** In dotfile locking, either a lock exists or it does not. So in this
// ** variation of CheckReservedLock(), *pResOut is set to true if any lock
// ** is held on the file and false if the file is unlocked.
// */
func _dotlockCheckReservedLock(tls *libc.TLS, id uintptr, pResOut uintptr) (r int32) {
var pFile uintptr
var rc, reserved int32
_, _, _ = pFile, rc, reserved
rc = SQLITE_OK
reserved = 0
pFile = id
reserved = libc.BoolInt32((*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(2)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).FlockingContext, 0) == 0)
*(*int32)(unsafe.Pointer(pResOut)) = reserved
return rc
}
// C documentation
//
// /*
// ** Lock the file with the lock specified by parameter eFileLock - one
// ** of the following:
// **
// ** (1) SHARED_LOCK
// ** (2) RESERVED_LOCK
// ** (3) PENDING_LOCK
// ** (4) EXCLUSIVE_LOCK
// **
// ** Sometimes when requesting one lock state, additional lock states
// ** are inserted in between. The locking might fail on one of the later
// ** transitions leaving the lock state different from what it started but
// ** still short of its goal. The following chart shows the allowed
// ** transitions and the inserted intermediate states:
// **
// ** UNLOCKED -> SHARED
// ** SHARED -> RESERVED
// ** SHARED -> (PENDING) -> EXCLUSIVE
// ** RESERVED -> (PENDING) -> EXCLUSIVE
// ** PENDING -> EXCLUSIVE
// **
// ** This routine will only increase a lock. Use the sqlite3OsUnlock()
// ** routine to lower a locking level.
// **
// ** With dotfile locking, we really only support state (4): EXCLUSIVE.
// ** But we track the other locking levels internally.
// */
func _dotlockLock(tls *libc.TLS, id uintptr, eFileLock int32) (r int32) {
var pFile, zLockFile uintptr
var rc, tErrno int32
_, _, _, _ = pFile, rc, tErrno, zLockFile
pFile = id
zLockFile = (*TunixFile)(unsafe.Pointer(pFile)).FlockingContext
rc = SQLITE_OK
/* If we have any lock, then the lock file already exists. All we have
** to do is adjust our internal record of the lock level.
*/
if int32((*TunixFile)(unsafe.Pointer(pFile)).FeFileLock) > NO_LOCK {
(*TunixFile)(unsafe.Pointer(pFile)).FeFileLock = uint8(eFileLock)
/* Always update the timestamp on the old file */
libc.Xutimes(tls, zLockFile, libc.UintptrFromInt32(0))
return SQLITE_OK
}
/* grab an exclusive lock */
rc = (*(*func(*libc.TLS, uintptr, Tmode_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(18)].FpCurrent})))(tls, zLockFile, uint16(0777))
if rc < 0 {
/* failed to open/create the lock directory */
tErrno = *(*int32)(unsafe.Pointer(libc.X__error(tls)))
if int32(EEXIST) == tErrno {
rc = int32(SQLITE_BUSY)
} else {
rc = _sqliteErrorFromPosixError(tls, tErrno, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(15)< 0 {
newOffset = libc.Xlseek(tls, (*TunixFile)(unsafe.Pointer(id)).Fh, offset, SEEK_SET)
if newOffset < 0 {
_storeLastErrno(tls, id, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
return -int32(1)
}
got = int32((*(*func(*libc.TLS, int32, uintptr, Tsize_t) Tssize_t)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(8)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(id)).Fh, pBuf, uint64(cnt)))
if got == cnt {
break
}
if got < 0 {
if *(*int32)(unsafe.Pointer(libc.X__error(tls))) == int32(EINTR) {
got = int32(1)
continue
}
prior = 0
_storeLastErrno(tls, id, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
break
} else {
if got > 0 {
cnt -= got
offset += int64(got)
prior += got
pBuf = uintptr(got) + pBuf
}
}
}
return got + prior
}
// C documentation
//
// /*
// ** Read data from a file into a buffer. Return SQLITE_OK if all
// ** bytes were read successfully and SQLITE_IOERR if anything goes
// ** wrong.
// */
func _unixRead(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Tsqlite3_int64) (r int32) {
var got, nCopy int32
var pFile uintptr
_, _, _ = got, nCopy, pFile
pFile = id
/* If this is a database file (not a journal, super-journal or temp
** file), the bytes in the locking range should never be read or written. */
/* Deal with as much of this read request as possible by transferring
** data from the memory mapping using memcpy(). */
if offset < (*TunixFile)(unsafe.Pointer(pFile)).FmmapSize {
if offset+int64(amt) <= (*TunixFile)(unsafe.Pointer(pFile)).FmmapSize {
libc.Xmemcpy(tls, pBuf, (*TunixFile)(unsafe.Pointer(pFile)).FpMapRegion+uintptr(offset), uint64(amt))
return SQLITE_OK
} else {
nCopy = int32((*TunixFile)(unsafe.Pointer(pFile)).FmmapSize - offset)
libc.Xmemcpy(tls, pBuf, (*TunixFile)(unsafe.Pointer(pFile)).FpMapRegion+uintptr(offset), uint64(nCopy))
pBuf = pBuf + uintptr(nCopy)
amt -= nCopy
offset += int64(nCopy)
}
}
got = _seekAndRead(tls, pFile, offset, pBuf, amt)
if got == amt {
return SQLITE_OK
} else {
if got < 0 {
/* pFile->lastErrno has been set by seekAndRead().
** Usually we return SQLITE_IOERR_READ here, though for some
** kinds of errors we return SQLITE_IOERR_CORRUPTFS. The
** SQLITE_IOERR_CORRUPTFS will be converted into SQLITE_CORRUPT
** prior to returning to the application by the sqlite3ApiExit()
** routine.
*/
switch (*TunixFile)(unsafe.Pointer(pFile)).FlastErrno {
case int32(ERANGE):
fallthrough
case int32(EIO):
fallthrough
case int32(ENXIO):
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(33)<offset then read cnt bytes into pBuf.
// ** Return the number of bytes actually read. Update the offset.
// **
// ** To avoid stomping the errno value on a failed write the lastErrno value
// ** is set before returning.
// */
func _seekAndWrite(tls *libc.TLS, id uintptr, offset Ti64, pBuf uintptr, cnt int32) (r int32) {
return _seekAndWriteFd(tls, (*TunixFile)(unsafe.Pointer(id)).Fh, offset, pBuf, cnt, id+32)
}
// C documentation
//
// /*
// ** Write data from a buffer into a file. Return SQLITE_OK on success
// ** or some other error code on failure.
// */
func _unixWrite(tls *libc.TLS, id uintptr, pBuf uintptr, amt int32, offset Tsqlite3_int64) (r int32) {
var pFile uintptr
var wrote, v1 int32
_, _, _ = pFile, wrote, v1
pFile = id
wrote = 0
/* If this is a database file (not a journal, super-journal or temp
** file), the bytes in the locking range should never be read or written. */
for {
v1 = _seekAndWrite(tls, pFile, offset, pBuf, amt)
wrote = v1
if !(v1 < amt && wrote > 0) {
break
}
amt -= wrote
offset += int64(wrote)
pBuf = pBuf + uintptr(wrote)
}
if amt > wrote {
if wrote < 0 && (*TunixFile)(unsafe.Pointer(pFile)).FlastErrno != int32(ENOSPC) {
/* lastErrno set by seekAndWrite */
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(3)< 0 && int32((*(*[513]int8)(unsafe.Pointer(bp)))[ii]) != int32('/')) {
break
}
goto _1
_1:
;
ii--
}
if ii > 0 {
(*(*[513]int8)(unsafe.Pointer(bp)))[ii] = int8('\000')
} else {
if int32((*(*[513]int8)(unsafe.Pointer(bp)))[0]) != int32('/') {
(*(*[513]int8)(unsafe.Pointer(bp)))[0] = int8('.')
}
(*(*[513]int8)(unsafe.Pointer(bp)))[int32(1)] = 0
}
fd = _robust_open(tls, bp, libc.Int32FromInt32(O_RDONLY)|libc.Int32FromInt32(O_BINARY), uint16(0))
if fd >= 0 {
}
*(*int32)(unsafe.Pointer(pFd)) = fd
if fd >= 0 {
return SQLITE_OK
}
return _unixLogErrorAtLine(tls, _sqlite3CantopenError(tls, int32(41778)), __ccgo_ts+3512, bp, int32(41778))
}
// C documentation
//
// /*
// ** Make sure all writes to a particular file are committed to disk.
// **
// ** If dataOnly==0 then both the file itself and its metadata (file
// ** size, access time, etc) are synced. If dataOnly!=0 then only the
// ** file data is synced.
// **
// ** Under Unix, also make sure that the directory entry for the file
// ** has been created by fsync-ing the directory that contains the file.
// ** If we do not do this and we encounter a power failure, the directory
// ** entry for the journal might not exist after we reboot. The next
// ** SQLite to access the file will not know that the journal exists (because
// ** the directory entry for the journal was never created) and the transaction
// ** will not roll back - possibly leading to database corruption.
// */
func _unixSync(tls *libc.TLS, id uintptr, flags int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var isDataOnly, isFullsync, rc int32
var pFile, p1 uintptr
var _ /* dirfd at bp+0 */ int32
_, _, _, _, _ = isDataOnly, isFullsync, pFile, rc, p1
pFile = id
isDataOnly = flags & int32(SQLITE_SYNC_DATAONLY)
isFullsync = libc.BoolInt32(flags&int32(0x0F) == int32(SQLITE_SYNC_FULL))
/* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */
/* Unix cannot, but some systems may return SQLITE_FULL from here. This
** line is to test that doing so does not cause any problems.
*/
rc = _full_fsync(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, isFullsync, isDataOnly)
if rc != 0 {
_storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
return _unixLogErrorAtLine(tls, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(4)< 0 {
nByte = (nByte + int64((*TunixFile)(unsafe.Pointer(pFile)).FszChunk) - int64(1)) / int64((*TunixFile)(unsafe.Pointer(pFile)).FszChunk) * int64((*TunixFile)(unsafe.Pointer(pFile)).FszChunk)
}
rc = _robust_ftruncate(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, nByte)
if rc != 0 {
_storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
return _unixLogErrorAtLine(tls, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(6)< 0 { /* Used to hold return values of fstat() */
if (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, bp) != 0 {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(7)< (*(*Tstat)(unsafe.Pointer(bp))).Fst_size {
/* If the OS does not have posix_fallocate(), fake it. Write a
** single byte to the last byte in each block that falls entirely
** within the extended region. Then, if required, a single byte
** at offset (nSize-1), to set the size of the file correctly.
** This is a similar technique to that used by glibc on systems
** that do not have a real fallocate() call.
*/
nBlk = (*(*Tstat)(unsafe.Pointer(bp))).Fst_blksize /* File-system block size */
nWrite = 0 /* Next offset to write to */
iWrite = (*(*Tstat)(unsafe.Pointer(bp))).Fst_size/int64(nBlk)*int64(nBlk) + int64(nBlk) - int64(1)
for {
if !(iWrite < nSize+int64(nBlk)-int64(1)) {
break
}
if iWrite >= nSize {
iWrite = nSize - int64(1)
}
nWrite = _seekAndWrite(tls, pFile, iWrite, __ccgo_ts+1650, int32(1))
if nWrite != int32(1) {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(3)< 0 && nByte > (*TunixFile)(unsafe.Pointer(pFile)).FmmapSize {
if (*TunixFile)(unsafe.Pointer(pFile)).FszChunk <= 0 {
if _robust_ftruncate(tls, (*TunixFile)(unsafe.Pointer(pFile)).Fh, nByte) != 0 {
_storeLastErrno(tls, pFile, *(*int32)(unsafe.Pointer(libc.X__error(tls))))
return _unixLogErrorAtLine(tls, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(6)<ctrlFlags is set.
// **
// ** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags.
// */
func _unixModeBit(tls *libc.TLS, pFile uintptr, mask uint8, pArg uintptr) {
var p1, p2 uintptr
_, _ = p1, p2
if *(*int32)(unsafe.Pointer(pArg)) < 0 {
*(*int32)(unsafe.Pointer(pArg)) = libc.BoolInt32(int32((*TunixFile)(unsafe.Pointer(pFile)).FctrlFlags)&int32(mask) != 0)
} else {
if *(*int32)(unsafe.Pointer(pArg)) == 0 {
p1 = pFile + 30
*(*uint16)(unsafe.Pointer(p1)) = uint16(int32(*(*uint16)(unsafe.Pointer(p1))) & ^int32(mask))
} else {
p2 = pFile + 30
*(*uint16)(unsafe.Pointer(p2)) = uint16(int32(*(*uint16)(unsafe.Pointer(p2))) | int32(mask))
}
}
}
// C documentation
//
// /*
// ** Information and control of an open file handle.
// */
func _unixFileControl(tls *libc.TLS, id uintptr, op int32, pArg uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var newLimit Ti64
var pFile, zTFile uintptr
var rc, rc1 int32
_, _, _, _, _ = newLimit, pFile, rc, rc1, zTFile
pFile = id
switch op {
case int32(SQLITE_FCNTL_LOCKSTATE):
*(*int32)(unsafe.Pointer(pArg)) = int32((*TunixFile)(unsafe.Pointer(pFile)).FeFileLock)
return SQLITE_OK
case int32(SQLITE_FCNTL_LAST_ERRNO):
*(*int32)(unsafe.Pointer(pArg)) = (*TunixFile)(unsafe.Pointer(pFile)).FlastErrno
return SQLITE_OK
case int32(SQLITE_FCNTL_CHUNK_SIZE):
(*TunixFile)(unsafe.Pointer(pFile)).FszChunk = *(*int32)(unsafe.Pointer(pArg))
return SQLITE_OK
case int32(SQLITE_FCNTL_SIZE_HINT):
rc = _fcntlSizeHint(tls, pFile, *(*Ti64)(unsafe.Pointer(pArg)))
return rc
case int32(SQLITE_FCNTL_PERSIST_WAL):
_unixModeBit(tls, pFile, uint8(UNIXFILE_PERSIST_WAL), pArg)
return SQLITE_OK
case int32(SQLITE_FCNTL_POWERSAFE_OVERWRITE):
_unixModeBit(tls, pFile, uint8(UNIXFILE_PSOW), pArg)
return SQLITE_OK
case int32(SQLITE_FCNTL_VFSNAME):
*(*uintptr)(unsafe.Pointer(pArg)) = Xsqlite3_mprintf(tls, __ccgo_ts+3797, libc.VaList(bp+8, (*Tsqlite3_vfs)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpVfs)).FzName))
return SQLITE_OK
case int32(SQLITE_FCNTL_TEMPFILENAME):
zTFile = Xsqlite3_malloc64(tls, uint64((*Tsqlite3_vfs)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpVfs)).FmxPathname))
if zTFile != 0 {
_unixGetTempname(tls, (*Tsqlite3_vfs)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpVfs)).FmxPathname, zTFile)
*(*uintptr)(unsafe.Pointer(pArg)) = zTFile
}
return SQLITE_OK
case int32(SQLITE_FCNTL_HAS_MOVED):
*(*int32)(unsafe.Pointer(pArg)) = _fileHasMoved(tls, pFile)
return SQLITE_OK
case int32(SQLITE_FCNTL_MMAP_SIZE):
newLimit = *(*Ti64)(unsafe.Pointer(pArg))
rc1 = SQLITE_OK
if newLimit > _sqlite3Config.FmxMmap {
newLimit = _sqlite3Config.FmxMmap
}
/* The value of newLimit may be eventually cast to (size_t) and passed
** to mmap(). Restrict its value to 2GB if (size_t) is not at least a
** 64-bit type. */
if newLimit > 0 && libc.Bool(uint64(8) < uint64(8)) {
newLimit = newLimit & libc.Int64FromInt32(0x7FFFFFFF)
}
*(*Ti64)(unsafe.Pointer(pArg)) = (*TunixFile)(unsafe.Pointer(pFile)).FmmapSizeMax
if newLimit >= 0 && newLimit != (*TunixFile)(unsafe.Pointer(pFile)).FmmapSizeMax && (*TunixFile)(unsafe.Pointer(pFile)).FnFetchOut == 0 {
(*TunixFile)(unsafe.Pointer(pFile)).FmmapSizeMax = newLimit
if (*TunixFile)(unsafe.Pointer(pFile)).FmmapSize > 0 {
_unixUnmapfile(tls, pFile)
rc1 = _unixMapfile(tls, pFile, int64(-int32(1)))
}
}
return rc1
case int32(SQLITE_FCNTL_EXTERNAL_READER):
return _unixFcntlExternalReader(tls, id, pArg)
}
return int32(SQLITE_NOTFOUND)
}
// C documentation
//
// /*
// ** If pFd->sectorSize is non-zero when this function is called, it is a
// ** no-op. Otherwise, the values of pFd->sectorSize and
// ** pFd->deviceCharacteristics are set according to the file-system
// ** characteristics.
// **
// ** There are two versions of this function. One for QNX and one for all
// ** other systems.
// */
func _setDeviceCharacteristics(tls *libc.TLS, pFd uintptr) {
if (*TunixFile)(unsafe.Pointer(pFd)).FsectorSize == 0 {
/* Set the POWERSAFE_OVERWRITE flag if requested. */
if int32((*TunixFile)(unsafe.Pointer(pFd)).FctrlFlags)&int32(UNIXFILE_PSOW) != 0 {
*(*int32)(unsafe.Pointer(pFd + 116)) |= int32(SQLITE_IOCAP_POWERSAFE_OVERWRITE)
}
(*TunixFile)(unsafe.Pointer(pFd)).FsectorSize = int32(SQLITE_DEFAULT_SECTOR_SIZE)
}
}
// C documentation
//
// /*
// ** Return the sector size in bytes of the underlying block device for
// ** the specified file. This is almost always 512 bytes, but may be
// ** larger for some devices.
// **
// ** SQLite code assumes this function cannot fail. It also assumes that
// ** if two files are created in the same file-system directory (i.e.
// ** a database and its journal file) that the sector size will be the
// ** same for both.
// */
func _unixSectorSize(tls *libc.TLS, id uintptr) (r int32) {
var pFd uintptr
_ = pFd
pFd = id
_setDeviceCharacteristics(tls, pFd)
return (*TunixFile)(unsafe.Pointer(pFd)).FsectorSize
}
// C documentation
//
// /*
// ** Return the device characteristics for the file.
// **
// ** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default.
// ** However, that choice is controversial since technically the underlying
// ** file system does not always provide powersafe overwrites. (In other
// ** words, after a power-loss event, parts of the file that were never
// ** written might end up being altered.) However, non-PSOW behavior is very,
// ** very rare. And asserting PSOW makes a large reduction in the amount
// ** of required I/O for journaling, since a lot of padding is eliminated.
// ** Hence, while POWERSAFE_OVERWRITE is on by default, there is a file-control
// ** available to turn it off and URI query parameter available to turn it off.
// */
func _unixDeviceCharacteristics(tls *libc.TLS, id uintptr) (r int32) {
var pFd uintptr
_ = pFd
pFd = id
_setDeviceCharacteristics(tls, pFd)
return (*TunixFile)(unsafe.Pointer(pFd)).FdeviceCharacteristics
}
// C documentation
//
// /*
// ** Return the system page size.
// **
// ** This function should not be called directly by other code in this file.
// ** Instead, it should be called via macro osGetpagesize().
// */
func _unixGetpagesize(tls *libc.TLS) (r int32) {
return int32(libc.Xsysconf(tls, int32(_SC_PAGESIZE)))
}
/*
** Object used to represent an shared memory buffer.
**
** When multiple threads all reference the same wal-index, each thread
** has its own unixShm object, but they all point to a single instance
** of this unixShmNode object. In other words, each wal-index is opened
** only once per process.
**
** Each unixShmNode object is connected to a single unixInodeInfo object.
** We could coalesce this object into unixInodeInfo, but that would mean
** every open file that does not use shared memory (in other words, most
** open files) would have to carry around this extra information. So
** the unixInodeInfo object contains a pointer to this unixShmNode object
** and the unixShmNode object is created only when needed.
**
** unixMutexHeld() must be true when creating or destroying
** this object or while reading or writing the following fields:
**
** nRef
**
** The following fields are read-only after the object is created:
**
** hShm
** zFilename
**
** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and
** unixMutexHeld() is true when reading or writing any other field
** in this structure.
**
** aLock[SQLITE_SHM_NLOCK]:
** This array records the various locks held by clients on each of the
** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no
** locks are held by the process on this slot. If it is set to -1, then
** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[]
** value is set to a positive value, then it is the number of shared
** locks currently held on the slot.
**
** aMutex[SQLITE_SHM_NLOCK]:
** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex
** pShmMutex is used to protect the aLock[] array and the right to
** call fcntl() on unixShmNode.hShm to obtain or release locks.
**
** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array
** of mutexes - one for each locking slot. To read or write locking
** slot aLock[iSlot], the caller must hold the corresponding mutex
** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a
** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held.
*/
type TunixShmNode1 = struct {
FpInode uintptr
FpShmMutex uintptr
FzFilename uintptr
FhShm int32
FszRegion int32
FnRegion Tu16
FisReadonly Tu8
FisUnlocked Tu8
FapRegion uintptr
FnRef int32
FpFirst uintptr
FaLock [8]int32
}
type unixShmNode1 = TunixShmNode1
/*
** Structure used internally by this VFS to record the state of an
** open shared memory connection.
**
** The following fields are initialized when this object is created and
** are read-only thereafter:
**
** unixShm.pShmNode
** unixShm.id
**
** All other fields are read/write. The unixShm.pShmNode->pShmMutex must
** be held while accessing any read/write fields.
*/
type TunixShm1 = struct {
FpShmNode uintptr
FpNext uintptr
FhasMutex Tu8
Fid Tu8
FsharedMask Tu16
FexclMask Tu16
}
type unixShm1 = TunixShm1
/*
** Constants used for locking
*/
// C documentation
//
// /*
// ** Use F_GETLK to check whether or not there are any readers with open
// ** wal-mode transactions in other processes on database file pFile. If
// ** no error occurs, return SQLITE_OK and set (*piOut) to 1 if there are
// ** such transactions, or 0 otherwise. If an error occurs, return an
// ** SQLite error code. The final value of *piOut is undefined in this
// ** case.
// */
func _unixFcntlExternalReader(tls *libc.TLS, pFile uintptr, piOut uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var pShmNode uintptr
var rc int32
var _ /* f at bp+0 */ Tflock
_, _ = pShmNode, rc
rc = SQLITE_OK
*(*int32)(unsafe.Pointer(piOut)) = 0
if (*TunixFile)(unsafe.Pointer(pFile)).FpShm != 0 {
pShmNode = (*TunixShm)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFile)).FpShm)).FpShmNode
libc.Xmemset(tls, bp, 0, uint64(32))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_WRLCK)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64((libc.Int32FromInt32(22)+libc.Int32FromInt32(SQLITE_SHM_NLOCK))*libc.Int32FromInt32(4) + libc.Int32FromInt32(3))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(libc.Int32FromInt32(SQLITE_SHM_NLOCK) - libc.Int32FromInt32(3))
Xsqlite3_mutex_enter(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
if (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int32(F_GETLK), libc.VaList(bp+40, bp)) < 0 {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(15)<= 0 {
/* Initialize the locking parameters */
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(lockType)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64(ofst)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(n)
res = (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int32(F_SETLK), libc.VaList(bp+40, bp))
if res == -int32(1) {
rc = int32(SQLITE_BUSY)
}
}
/* Do debug tracing */
return rc
}
// C documentation
//
// /*
// ** Return the minimum number of 32KB shm regions that should be mapped at
// ** a time, assuming that each mapping must be an integer multiple of the
// ** current system page-size.
// **
// ** Usually, this is 1. The exception seems to be systems that are configured
// ** to use 64KB pages - in this case each mapping must cover at least two
// ** shm regions.
// */
func _unixShmRegionPerMap(tls *libc.TLS) (r int32) {
var pgsz, shmsz int32
_, _ = pgsz, shmsz
shmsz = libc.Int32FromInt32(32) * libc.Int32FromInt32(1024) /* SHM region size */
pgsz = (*(*func(*libc.TLS) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(25)].FpCurrent})))(tls) /* System page size */
/* Page size must be a power of 2 */
if pgsz < shmsz {
return int32(1)
}
return pgsz / shmsz
}
// C documentation
//
// /*
// ** Purge the unixShmNodeList list of all entries with unixShmNode.nRef==0.
// **
// ** This is not a VFS shared-memory method; it is a utility function called
// ** by VFS shared-memory methods.
// */
func _unixShmPurge(tls *libc.TLS, pFd uintptr) {
var i, nShmPerMap int32
var p uintptr
_, _, _ = i, nShmPerMap, p
p = (*TunixInodeInfo)(unsafe.Pointer((*TunixFile)(unsafe.Pointer(pFd)).FpInode)).FpShmNode
if p != 0 && (*TunixShmNode)(unsafe.Pointer(p)).FnRef == 0 {
nShmPerMap = _unixShmRegionPerMap(tls)
Xsqlite3_mutex_free(tls, (*TunixShmNode)(unsafe.Pointer(p)).FpShmMutex)
i = 0
for {
if !(i < int32((*TunixShmNode)(unsafe.Pointer(p)).FnRegion)) {
break
}
if (*TunixShmNode)(unsafe.Pointer(p)).FhShm >= 0 {
(*(*func(*libc.TLS, uintptr, Tsize_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(23)].FpCurrent})))(tls, *(*uintptr)(unsafe.Pointer((*TunixShmNode)(unsafe.Pointer(p)).FapRegion + uintptr(i)*8)), uint64((*TunixShmNode)(unsafe.Pointer(p)).FszRegion))
} else {
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer((*TunixShmNode)(unsafe.Pointer(p)).FapRegion + uintptr(i)*8)))
}
goto _1
_1:
;
i += nShmPerMap
}
Xsqlite3_free(tls, (*TunixShmNode)(unsafe.Pointer(p)).FapRegion)
if (*TunixShmNode)(unsafe.Pointer(p)).FhShm >= 0 {
_robust_close(tls, pFd, (*TunixShmNode)(unsafe.Pointer(p)).FhShm, int32(42583))
(*TunixShmNode)(unsafe.Pointer(p)).FhShm = -int32(1)
}
(*TunixInodeInfo)(unsafe.Pointer((*TunixShmNode)(unsafe.Pointer(p)).FpInode)).FpShmNode = uintptr(0)
Xsqlite3_free(tls, p)
}
}
// C documentation
//
// /*
// ** The DMS lock has not yet been taken on shm file pShmNode. Attempt to
// ** take it now. Return SQLITE_OK if successful, or an SQLite error
// ** code otherwise.
// **
// ** If the DMS cannot be locked because this is a readonly_shm=1
// ** connection and no other process already holds a lock, return
// ** SQLITE_READONLY_CANTINIT and set pShmNode->isUnlocked=1.
// */
func _unixLockSharedMemory(tls *libc.TLS, pDbFd uintptr, pShmNode uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var rc int32
var _ /* lock at bp+0 */ Tflock
_ = rc
rc = SQLITE_OK
/* Use F_GETLK to determine the locks other processes are holding
** on the DMS byte. If it indicates that another process is holding
** a SHARED lock, then this process may also take a SHARED lock
** and proceed with opening the *-shm file.
**
** Or, if no other process is holding any lock, then this process
** is the first to open it. In this case take an EXCLUSIVE lock on the
** DMS byte and truncate the *-shm file to zero bytes in size. Then
** downgrade to a SHARED lock on the DMS byte.
**
** If another process is holding an EXCLUSIVE lock on the DMS byte,
** return SQLITE_BUSY to the caller (it will try again). An earlier
** version of this code attempted the SHARED lock at this point. But
** this introduced a subtle race condition: if the process holding
** EXCLUSIVE failed just before truncating the *-shm file, then this
** process might open and use the *-shm file without truncating it.
** And if the *-shm file has been corrupted by a power failure or
** system crash, the database itself may also become corrupt. */
(*(*Tflock)(unsafe.Pointer(bp))).Fl_whence = SEEK_SET
(*(*Tflock)(unsafe.Pointer(bp))).Fl_start = int64((libc.Int32FromInt32(22)+libc.Int32FromInt32(SQLITE_SHM_NLOCK))*libc.Int32FromInt32(4) + libc.Int32FromInt32(SQLITE_SHM_NLOCK))
(*(*Tflock)(unsafe.Pointer(bp))).Fl_len = int64(1)
(*(*Tflock)(unsafe.Pointer(bp))).Fl_type = int16(F_WRLCK)
if (*(*func(*libc.TLS, int32, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(7)].FpCurrent})))(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int32(F_GETLK), libc.VaList(bp+40, bp)) != 0 {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(15)<pFirst. This must be done while holding the
** pShmNode->pShmMutex.
*/
Xsqlite3_mutex_enter(tls, (*TunixShmNode1)(unsafe.Pointer(pShmNode)).FpShmMutex)
(*TunixShm1)(unsafe.Pointer(p)).FpNext = (*TunixShmNode1)(unsafe.Pointer(pShmNode)).FpFirst
(*TunixShmNode1)(unsafe.Pointer(pShmNode)).FpFirst = p
Xsqlite3_mutex_leave(tls, (*TunixShmNode1)(unsafe.Pointer(pShmNode)).FpShmMutex)
return rc
/* Jump here on any error */
goto shm_open_err
shm_open_err:
;
_unixShmPurge(tls, pDbFd) /* This call frees pShmNode if required */
Xsqlite3_free(tls, p)
_unixLeaveMutex(tls)
return rc
}
// C documentation
//
// /*
// ** This function is called to obtain a pointer to region iRegion of the
// ** shared-memory associated with the database file fd. Shared-memory regions
// ** are numbered starting from zero. Each shared-memory region is szRegion
// ** bytes in size.
// **
// ** If an error occurs, an error code is returned and *pp is set to NULL.
// **
// ** Otherwise, if the bExtend parameter is 0 and the requested shared-memory
// ** region has not been allocated (by any client, including one running in a
// ** separate process), then *pp is set to NULL and SQLITE_OK returned. If
// ** bExtend is non-zero and the requested shared-memory region has not yet
// ** been allocated, it is allocated by this function.
// **
// ** If the shared-memory region has already been allocated or is allocated by
// ** this call as described above, then it is mapped into this processes
// ** address space (if it is not already), *pp is set to point to the mapped
// ** memory and SQLITE_OK returned.
// */
func _unixShmMap(tls *libc.TLS, fd uintptr, iRegion int32, szRegion int32, bExtend int32, pp uintptr) (r int32) {
bp := tls.Alloc(240)
defer tls.Free(240)
var apNew, p, pDbFd, pMem, pShmNode, zFile, p4 uintptr
var i, iPg, nByte, nMap, nReqRegion, nShmPerMap, rc, v2 int32
var _ /* sStat at bp+0 */ Tstat
var _ /* x at bp+224 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = apNew, i, iPg, nByte, nMap, nReqRegion, nShmPerMap, p, pDbFd, pMem, pShmNode, rc, zFile, v2, p4
pDbFd = fd
rc = SQLITE_OK
nShmPerMap = _unixShmRegionPerMap(tls)
/* If the shared-memory file has not yet been opened, open it now. */
if (*TunixFile)(unsafe.Pointer(pDbFd)).FpShm == uintptr(0) {
rc = _unixOpenSharedMemory(tls, pDbFd)
if rc != SQLITE_OK {
return rc
}
}
p = (*TunixFile)(unsafe.Pointer(pDbFd)).FpShm
pShmNode = (*TunixShm)(unsafe.Pointer(p)).FpShmNode
Xsqlite3_mutex_enter(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
if (*TunixShmNode)(unsafe.Pointer(pShmNode)).FisUnlocked != 0 {
rc = _unixLockSharedMemory(tls, pDbFd, pShmNode)
if rc != SQLITE_OK {
goto shmpage_out
}
(*TunixShmNode)(unsafe.Pointer(pShmNode)).FisUnlocked = uint8(0)
}
/* Minimum number of regions required to be mapped. */
nReqRegion = (iRegion + nShmPerMap) / nShmPerMap * nShmPerMap
if int32((*TunixShmNode)(unsafe.Pointer(pShmNode)).FnRegion) < nReqRegion { /* New apRegion[] array */
nByte = nReqRegion * szRegion /* Used by fstat() */
(*TunixShmNode)(unsafe.Pointer(pShmNode)).FszRegion = szRegion
if (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm >= 0 {
/* The requested region is not mapped into this processes address space.
** Check to see if it has been allocated (i.e. if the wal-index file is
** large enough to contain the requested region).
*/
if (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm, bp) != 0 {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(19)<= 0 {
if (*TunixShmNode)(unsafe.Pointer(pShmNode)).FisReadonly != 0 {
v2 = int32(PROT_READ)
} else {
v2 = libc.Int32FromInt32(PROT_READ) | libc.Int32FromInt32(PROT_WRITE)
}
pMem = (*(*func(*libc.TLS, uintptr, Tsize_t, int32, int32, int32, Toff_t) uintptr)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(22)].FpCurrent})))(tls, uintptr(0), uint64(nMap), v2, int32(MAP_SHARED), (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm, int64(szRegion)*int64((*TunixShmNode)(unsafe.Pointer(pShmNode)).FnRegion))
if pMem == uintptr(-libc.Int32FromInt32(1)) {
rc = _unixLogErrorAtLine(tls, libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(21)< iRegion {
*(*uintptr)(unsafe.Pointer(pp)) = *(*uintptr)(unsafe.Pointer((*TunixShmNode)(unsafe.Pointer(pShmNode)).FapRegion + uintptr(iRegion)*8))
} else {
*(*uintptr)(unsafe.Pointer(pp)) = uintptr(0)
}
if (*TunixShmNode)(unsafe.Pointer(pShmNode)).FisReadonly != 0 && rc == SQLITE_OK {
rc = int32(SQLITE_READONLY)
}
Xsqlite3_mutex_leave(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
return rc
}
var _pgsz = int32(4096)
/*
** Check that the pShmNode->aLock[] array comports with the locking bitmasks
** held by each client. Return true if it does, or false otherwise. This
** is to be used in an assert(). e.g.
**
** assert( assertLockingArrayOk(pShmNode) );
*/
// C documentation
//
// /*
// ** Change the lock state for a shared-memory segment.
// **
// ** Note that the relationship between SHARED and EXCLUSIVE locks is a little
// ** different here than in posix. In xShmLock(), one can go from unlocked
// ** to shared and back or from unlocked to exclusive and back. But one may
// ** not go from shared to exclusive or from exclusive to shared.
// */
func _unixShmLock(tls *libc.TLS, fd uintptr, ofst int32, n int32, flags int32) (r int32) {
var aLock, p, pDbFd, pShmNode, p1, p2, p3, p4, p6 uintptr
var bUnlock, ii, rc int32
var mask Tu16
_, _, _, _, _, _, _, _, _, _, _, _, _ = aLock, bUnlock, ii, mask, p, pDbFd, pShmNode, rc, p1, p2, p3, p4, p6
pDbFd = fd /* The underlying file iNode */
rc = SQLITE_OK /* Result code */
mask = uint16(int32(1)<<(ofst+n) - int32(1)<=3 && ofst int32(1) {
bUnlock = 0
*(*int32)(unsafe.Pointer(aLock + uintptr(ofst)*4))--
p1 = p + 18
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^int32(mask))
}
}
if bUnlock != 0 {
rc = _unixShmSystemLock(tls, pDbFd, int32(F_UNLCK), ofst+(libc.Int32FromInt32(22)+libc.Int32FromInt32(SQLITE_SHM_NLOCK))*libc.Int32FromInt32(4), n)
if rc == SQLITE_OK {
libc.Xmemset(tls, aLock+uintptr(ofst)*4, 0, uint64(4)*uint64(n))
p2 = p + 18
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^int32(mask))
p3 = p + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^int32(mask))
}
}
} else {
if flags&int32(SQLITE_SHM_SHARED) != 0 {
/* Case (b) - a shared lock. */
if *(*int32)(unsafe.Pointer(aLock + uintptr(ofst)*4)) < 0 {
/* An exclusive lock is held by some other connection. BUSY. */
rc = int32(SQLITE_BUSY)
} else {
if *(*int32)(unsafe.Pointer(aLock + uintptr(ofst)*4)) == 0 {
rc = _unixShmSystemLock(tls, pDbFd, int32(F_RDLCK), ofst+(libc.Int32FromInt32(22)+libc.Int32FromInt32(SQLITE_SHM_NLOCK))*libc.Int32FromInt32(4), n)
}
}
/* Get the local shared locks */
if rc == SQLITE_OK {
p4 = p + 18
*(*Tu16)(unsafe.Pointer(p4)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p4))) | int32(mask))
*(*int32)(unsafe.Pointer(aLock + uintptr(ofst)*4))++
}
} else {
/* Make sure no sibling connections hold locks that will block this
** lock. If any do, return SQLITE_BUSY right away. */
ii = ofst
for {
if !(ii < ofst+n) {
break
}
if *(*int32)(unsafe.Pointer(aLock + uintptr(ii)*4)) != 0 {
rc = int32(SQLITE_BUSY)
break
}
goto _5
_5:
;
ii++
}
/* Get the exclusive locks at the system level. Then if successful
** also update the in-memory values. */
if rc == SQLITE_OK {
rc = _unixShmSystemLock(tls, pDbFd, int32(F_WRLCK), ofst+(libc.Int32FromInt32(22)+libc.Int32FromInt32(SQLITE_SHM_NLOCK))*libc.Int32FromInt32(4), n)
if rc == SQLITE_OK {
p6 = p + 20
*(*Tu16)(unsafe.Pointer(p6)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p6))) | int32(mask))
ii = ofst
for {
if !(ii < ofst+n) {
break
}
*(*int32)(unsafe.Pointer(aLock + uintptr(ii)*4)) = -int32(1)
goto _7
_7:
;
ii++
}
}
}
}
}
}
/* Drop the mutexes acquired above. */
Xsqlite3_mutex_leave(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
}
return rc
}
// C documentation
//
// /*
// ** Implement a memory barrier or memory fence on shared memory.
// **
// ** All loads and stores begun before the barrier must complete before
// ** any load or store begun after the barrier.
// */
func _unixShmBarrier(tls *libc.TLS, fd uintptr) {
_ = fd
/* compiler-defined memory barrier */
_unixEnterMutex(tls) /* Also mutex, for redundancy */
_unixLeaveMutex(tls)
}
// C documentation
//
// /*
// ** Close a connection to shared-memory. Delete the underlying
// ** storage if deleteFlag is true.
// **
// ** If there is no shared memory associated with the connection then this
// ** routine is a harmless no-op.
// */
func _unixShmUnmap(tls *libc.TLS, fd uintptr, deleteFlag int32) (r int32) {
var p, pDbFd, pShmNode, pp uintptr
_, _, _, _ = p, pDbFd, pShmNode, pp /* The underlying database file */
pDbFd = fd
p = (*TunixFile)(unsafe.Pointer(pDbFd)).FpShm
if p == uintptr(0) {
return SQLITE_OK
}
pShmNode = (*TunixShm)(unsafe.Pointer(p)).FpShmNode
/* Remove connection p from the set of connections associated
** with pShmNode */
Xsqlite3_mutex_enter(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
pp = pShmNode + 56
for {
if !(*(*uintptr)(unsafe.Pointer(pp)) != p) {
break
}
goto _1
_1:
;
pp = *(*uintptr)(unsafe.Pointer(pp)) + 8
}
*(*uintptr)(unsafe.Pointer(pp)) = (*TunixShm)(unsafe.Pointer(p)).FpNext
/* Free the connection p */
Xsqlite3_free(tls, p)
(*TunixFile)(unsafe.Pointer(pDbFd)).FpShm = uintptr(0)
Xsqlite3_mutex_leave(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FpShmMutex)
/* If pShmNode->nRef has reached 0, then close the underlying
** shared-memory file, too */
_unixEnterMutex(tls)
(*TunixShmNode)(unsafe.Pointer(pShmNode)).FnRef--
if (*TunixShmNode)(unsafe.Pointer(pShmNode)).FnRef == 0 {
if deleteFlag != 0 && (*TunixShmNode)(unsafe.Pointer(pShmNode)).FhShm >= 0 {
(*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(16)].FpCurrent})))(tls, (*TunixShmNode)(unsafe.Pointer(pShmNode)).FzFilename)
}
_unixShmPurge(tls, pDbFd)
}
_unixLeaveMutex(tls)
return SQLITE_OK
}
// C documentation
//
// /*
// ** If it is currently memory mapped, unmap file pFd.
// */
func _unixUnmapfile(tls *libc.TLS, pFd uintptr) {
if (*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion != 0 {
(*(*func(*libc.TLS, uintptr, Tsize_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(23)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion, uint64((*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeActual))
(*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion = uintptr(0)
(*TunixFile)(unsafe.Pointer(pFd)).FmmapSize = 0
(*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeActual = 0
}
}
// C documentation
//
// /*
// ** Attempt to set the size of the memory mapping maintained by file
// ** descriptor pFd to nNew bytes. Any existing mapping is discarded.
// **
// ** If successful, this function sets the following variables:
// **
// ** unixFile.pMapRegion
// ** unixFile.mmapSize
// ** unixFile.mmapSizeActual
// **
// ** If unsuccessful, an error message is logged via sqlite3_log() and
// ** the three variables above are zeroed. In this case SQLite should
// ** continue accessing the database using the xRead() and xWrite()
// ** methods.
// */
func _unixRemapfile(tls *libc.TLS, pFd uintptr, nNew Ti64) {
var flags, h, szSyspage int32
var nOrig, nReuse Ti64
var pNew, pOrig, pReq, zErr uintptr
var v1 Tsqlite3_int64
_, _, _, _, _, _, _, _, _, _ = flags, h, nOrig, nReuse, pNew, pOrig, pReq, szSyspage, zErr, v1
zErr = __ccgo_ts + 3553
h = (*TunixFile)(unsafe.Pointer(pFd)).Fh /* File descriptor open on db file */
pOrig = (*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion /* Pointer to current file mapping */
nOrig = (*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeActual /* Size of pOrig region in bytes */
pNew = uintptr(0) /* Location of new mapping */
flags = int32(PROT_READ) /* Flags to pass to mmap() */
if pOrig != 0 {
szSyspage = (*(*func(*libc.TLS) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(25)].FpCurrent})))(tls)
nReuse = (*TunixFile)(unsafe.Pointer(pFd)).FmmapSize & int64(^(szSyspage - libc.Int32FromInt32(1)))
pReq = pOrig + uintptr(nReuse)
/* Unmap any pages of the existing mapping that cannot be reused. */
if nReuse != nOrig {
(*(*func(*libc.TLS, uintptr, Tsize_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(23)].FpCurrent})))(tls, pReq, uint64(nOrig-nReuse))
}
pNew = (*(*func(*libc.TLS, uintptr, Tsize_t, int32, int32, int32, Toff_t) uintptr)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(22)].FpCurrent})))(tls, pReq, uint64(nNew-nReuse), flags, int32(MAP_SHARED), h, nReuse)
if pNew != uintptr(-libc.Int32FromInt32(1)) {
if pNew != pReq {
(*(*func(*libc.TLS, uintptr, Tsize_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(23)].FpCurrent})))(tls, pNew, uint64(nNew-nReuse))
pNew = uintptr(0)
} else {
pNew = pOrig
}
}
/* The attempt to extend the existing mapping failed. Free it. */
if pNew == uintptr(-libc.Int32FromInt32(1)) || pNew == uintptr(0) {
(*(*func(*libc.TLS, uintptr, Tsize_t) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(23)].FpCurrent})))(tls, pOrig, uint64(nReuse))
}
}
/* If pNew is still NULL, try to create an entirely new mapping. */
if pNew == uintptr(0) {
pNew = (*(*func(*libc.TLS, uintptr, Tsize_t, int32, int32, int32, Toff_t) uintptr)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(22)].FpCurrent})))(tls, uintptr(0), uint64(nNew), flags, int32(MAP_SHARED), h, 0)
}
if pNew == uintptr(-libc.Int32FromInt32(1)) {
pNew = uintptr(0)
nNew = 0
_unixLogErrorAtLine(tls, SQLITE_OK, zErr, (*TunixFile)(unsafe.Pointer(pFd)).FzPath, int32(43401))
/* If the mmap() above failed, assume that all subsequent mmap() calls
** will probably fail too. Fall back to using xRead/xWrite exclusively
** in this case. */
(*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeMax = 0
}
(*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion = pNew
v1 = nNew
(*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeActual = v1
(*TunixFile)(unsafe.Pointer(pFd)).FmmapSize = v1
}
// C documentation
//
// /*
// ** Memory map or remap the file opened by file-descriptor pFd (if the file
// ** is already mapped, the existing mapping is replaced by the new). Or, if
// ** there already exists a mapping for this file, and there are still
// ** outstanding xFetch() references to it, this function is a no-op.
// **
// ** If parameter nByte is non-negative, then it is the requested size of
// ** the mapping to create. Otherwise, if nByte is less than zero, then the
// ** requested size is the size of the file on disk. The actual size of the
// ** created mapping is either the requested size or the value configured
// ** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller.
// **
// ** SQLITE_OK is returned if no error occurs (even if the mapping is not
// ** recreated as a result of outstanding references) or an SQLite error
// ** code otherwise.
// */
func _unixMapfile(tls *libc.TLS, pFd uintptr, nMap Ti64) (r int32) {
bp := tls.Alloc(224)
defer tls.Free(224)
var _ /* statbuf at bp+0 */ Tstat
if (*TunixFile)(unsafe.Pointer(pFd)).FnFetchOut > 0 {
return SQLITE_OK
}
if nMap < 0 { /* Low-level file information */
if (*(*func(*libc.TLS, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(5)].FpCurrent})))(tls, (*TunixFile)(unsafe.Pointer(pFd)).Fh, bp) != 0 {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(7)< (*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeMax {
nMap = (*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeMax
}
if nMap != (*TunixFile)(unsafe.Pointer(pFd)).FmmapSize {
_unixRemapfile(tls, pFd, nMap)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** If possible, return a pointer to a mapping of file fd starting at offset
// ** iOff. The mapping must be valid for at least nAmt bytes.
// **
// ** If such a pointer can be obtained, store it in *pp and return SQLITE_OK.
// ** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK.
// ** Finally, if an error does occur, return an SQLite error code. The final
// ** value of *pp is undefined in this case.
// **
// ** If this function does return a pointer, the caller must eventually
// ** release the reference by calling unixUnfetch().
// */
func _unixFetch(tls *libc.TLS, fd uintptr, iOff Ti64, nAmt int32, pp uintptr) (r int32) {
var nEofBuffer, rc int32
var pFd uintptr
_, _, _ = nEofBuffer, pFd, rc
pFd = fd /* The underlying database file */
*(*uintptr)(unsafe.Pointer(pp)) = uintptr(0)
if (*TunixFile)(unsafe.Pointer(pFd)).FmmapSizeMax > 0 {
/* Ensure that there is always at least a 256 byte buffer of addressable
** memory following the returned page. If the database is corrupt,
** SQLite may overread the page slightly (in practice only a few bytes,
** but 256 is safe, round, number). */
nEofBuffer = int32(256)
if (*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion == uintptr(0) {
rc = _unixMapfile(tls, pFd, int64(-int32(1)))
if rc != SQLITE_OK {
return rc
}
}
if (*TunixFile)(unsafe.Pointer(pFd)).FmmapSize >= iOff+int64(nAmt)+int64(nEofBuffer) {
*(*uintptr)(unsafe.Pointer(pp)) = (*TunixFile)(unsafe.Pointer(pFd)).FpMapRegion + uintptr(iOff)
(*TunixFile)(unsafe.Pointer(pFd)).FnFetchOut++
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** If the third argument is non-NULL, then this function releases a
// ** reference obtained by an earlier call to unixFetch(). The second
// ** argument passed to this function must be the same as the corresponding
// ** argument that was passed to the unixFetch() invocation.
// **
// ** Or, if the third argument is NULL, then this function is being called
// ** to inform the VFS layer that, according to POSIX, any existing mapping
// ** may now be invalid and should be unmapped.
// */
func _unixUnfetch(tls *libc.TLS, fd uintptr, iOff Ti64, p uintptr) (r int32) {
var pFd uintptr
_ = pFd
pFd = fd /* The underlying database file */
_ = iOff
/* If p==0 (unmap the entire file) then there must be no outstanding
** xFetch references. Or, if p!=0 (meaning it is an xFetch reference),
** then there must be at least one outstanding. */
/* If p!=0, it must match the iOff value. */
if p != 0 {
(*TunixFile)(unsafe.Pointer(pFd)).FnFetchOut--
} else {
_unixUnmapfile(tls, pFd)
}
return SQLITE_OK
}
/*
** Here ends the implementation of all sqlite3_file methods.
**
********************** End sqlite3_file Methods *******************************
******************************************************************************/
/*
** This division contains definitions of sqlite3_io_methods objects that
** implement various file locking strategies. It also contains definitions
** of "finder" functions. A finder-function is used to locate the appropriate
** sqlite3_io_methods object for a particular database file. The pAppData
** field of the sqlite3_vfs VFS objects are initialized to be pointers to
** the correct finder-function for that VFS.
**
** Most finder functions return a pointer to a fixed sqlite3_io_methods
** object. The only interesting finder-function is autolockIoFinder, which
** looks at the filesystem type and tries to guess the best locking
** strategy from that.
**
** For finder-function F, two objects are created:
**
** (1) The real finder-function named "FImpt()".
**
** (2) A constant pointer to this function named just "F".
**
**
** A pointer to the F pointer is used as the pAppData value for VFS
** objects. We have to do this instead of letting pAppData point
** directly at the finder-function since C90 rules prevent a void*
** from be cast into a function pointer.
**
**
** Each instance of this macro generates two objects:
**
** * A constant sqlite3_io_methods object call METHOD that has locking
** methods CLOSE, LOCK, UNLOCK, CKRESLOCK.
**
** * An I/O method finder function called FINDER that returns a pointer
** to the METHOD object in the previous bullet.
*/
// C documentation
//
// /*
// ** Here are all of the sqlite3_io_methods objects for each of the
// ** locking strategies. Functions that return pointers to these methods
// ** are also created.
// */
var _posixIoMethods = Tsqlite3_io_methods{
FiVersion: int32(3),
}
func init() {
p := unsafe.Pointer(&_posixIoMethods)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_unixClose)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_unixRead)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_unixWrite)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_unixTruncate)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_unixSync)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_unixFileSize)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_unixLock)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_unixUnlock)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_unixCheckReservedLock)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_unixFileControl)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_unixSectorSize)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_unixDeviceCharacteristics)
*(*uintptr)(unsafe.Add(p, 104)) = __ccgo_fp(_unixShmMap)
*(*uintptr)(unsafe.Add(p, 112)) = __ccgo_fp(_unixShmLock)
*(*uintptr)(unsafe.Add(p, 120)) = __ccgo_fp(_unixShmBarrier)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(_unixShmUnmap)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_unixFetch)
*(*uintptr)(unsafe.Add(p, 144)) = __ccgo_fp(_unixUnfetch)
}
func _posixIoFinderImpl(tls *libc.TLS, z uintptr, p uintptr) (r uintptr) {
_ = z
_ = p
return uintptr(unsafe.Pointer(&_posixIoMethods))
}
var _posixIoFinder = uintptr(0)
func init() {
p := unsafe.Pointer(&_posixIoFinder)
*(*uintptr)(unsafe.Add(p, 0)) = __ccgo_fp(_posixIoFinderImpl)
}
var _nolockIoMethods = Tsqlite3_io_methods{
FiVersion: int32(3),
}
func init() {
p := unsafe.Pointer(&_nolockIoMethods)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_nolockClose)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_unixRead)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_unixWrite)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_unixTruncate)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_unixSync)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_unixFileSize)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_nolockLock)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_nolockUnlock)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_nolockCheckReservedLock)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_unixFileControl)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_unixSectorSize)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_unixDeviceCharacteristics)
*(*uintptr)(unsafe.Add(p, 112)) = __ccgo_fp(_unixShmLock)
*(*uintptr)(unsafe.Add(p, 120)) = __ccgo_fp(_unixShmBarrier)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(_unixShmUnmap)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_unixFetch)
*(*uintptr)(unsafe.Add(p, 144)) = __ccgo_fp(_unixUnfetch)
}
func _nolockIoFinderImpl(tls *libc.TLS, z uintptr, p uintptr) (r uintptr) {
_ = z
_ = p
return uintptr(unsafe.Pointer(&_nolockIoMethods))
}
var _nolockIoFinder = uintptr(0)
func init() {
p := unsafe.Pointer(&_nolockIoFinder)
*(*uintptr)(unsafe.Add(p, 0)) = __ccgo_fp(_nolockIoFinderImpl)
}
var _dotlockIoMethods = Tsqlite3_io_methods{
FiVersion: int32(1),
}
func init() {
p := unsafe.Pointer(&_dotlockIoMethods)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_dotlockClose)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_unixRead)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_unixWrite)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_unixTruncate)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_unixSync)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_unixFileSize)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_dotlockLock)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_dotlockUnlock)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_dotlockCheckReservedLock)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_unixFileControl)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_unixSectorSize)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_unixDeviceCharacteristics)
*(*uintptr)(unsafe.Add(p, 112)) = __ccgo_fp(_unixShmLock)
*(*uintptr)(unsafe.Add(p, 120)) = __ccgo_fp(_unixShmBarrier)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(_unixShmUnmap)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_unixFetch)
*(*uintptr)(unsafe.Add(p, 144)) = __ccgo_fp(_unixUnfetch)
}
func _dotlockIoFinderImpl(tls *libc.TLS, z uintptr, p uintptr) (r uintptr) {
_ = z
_ = p
return uintptr(unsafe.Pointer(&_dotlockIoMethods))
}
var _dotlockIoFinder = uintptr(0)
func init() {
p := unsafe.Pointer(&_dotlockIoFinder)
*(*uintptr)(unsafe.Add(p, 0)) = __ccgo_fp(_dotlockIoFinderImpl)
}
/*
** The proxy locking method is a "super-method" in the sense that it
** opens secondary file descriptors for the conch and lock files and
** it uses proxy, dot-file, AFP, and flock() locking methods on those
** secondary files. For this reason, the division that implements
** proxy locking is located much further down in the file. But we need
** to go ahead and define the sqlite3_io_methods and finder function
** for proxy locking here. So we forward declare the I/O methods.
*/
/* nfs lockd on OSX 10.3+ doesn't clear write locks when a read lock is set */
// C documentation
//
// /*
// ** An abstract type for a pointer to an IO method finder function:
// */
type Tfinder_type = uintptr
type finder_type = Tfinder_type
/****************************************************************************
**************************** sqlite3_vfs methods ****************************
**
** This division contains the implementation of methods on the
** sqlite3_vfs object.
*/
// C documentation
//
// /*
// ** Initialize the contents of the unixFile structure pointed to by pId.
// */
func _fillInUnixFile(tls *libc.TLS, pVfs uintptr, h int32, pId uintptr, zFilename uintptr, ctrlFlags int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nFilename, rc int32
var pLockingStyle, pNew, zLockFile, v1, p2, p3 uintptr
_, _, _, _, _, _, _, _ = nFilename, pLockingStyle, pNew, rc, zLockFile, v1, p2, p3
pNew = pId
rc = SQLITE_OK
/* No locking occurs in temporary files */
(*TunixFile)(unsafe.Pointer(pNew)).Fh = h
(*TunixFile)(unsafe.Pointer(pNew)).FpVfs = pVfs
(*TunixFile)(unsafe.Pointer(pNew)).FzPath = zFilename
(*TunixFile)(unsafe.Pointer(pNew)).FctrlFlags = uint16(uint8(ctrlFlags))
(*TunixFile)(unsafe.Pointer(pNew)).FmmapSizeMax = _sqlite3Config.FszMmap
if ctrlFlags&int32(UNIXFILE_URI) != 0 {
v1 = zFilename
} else {
v1 = uintptr(0)
}
if Xsqlite3_uri_boolean(tls, v1, __ccgo_ts+3831, int32(SQLITE_POWERSAFE_OVERWRITE)) != 0 {
p2 = pNew + 30
*(*uint16)(unsafe.Pointer(p2)) = uint16(int32(*(*uint16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(UNIXFILE_PSOW))
}
if libc.Xstrcmp(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FzName, __ccgo_ts+3836) == 0 {
p3 = pNew + 30
*(*uint16)(unsafe.Pointer(p3)) = uint16(int32(*(*uint16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(UNIXFILE_EXCL))
}
if ctrlFlags&int32(UNIXFILE_NOLOCK) != 0 {
pLockingStyle = uintptr(unsafe.Pointer(&_nolockIoMethods))
} else {
pLockingStyle = (*(*func(*libc.TLS, uintptr, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{*(*uintptr)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData))})))(tls, zFilename, pNew)
}
if pLockingStyle == uintptr(unsafe.Pointer(&_posixIoMethods)) {
_unixEnterMutex(tls)
rc = _findInodeInfo(tls, pNew, pNew+16)
if rc != SQLITE_OK {
/* If an error occurred in findInodeInfo(), close the file descriptor
** immediately, before releasing the mutex. findInodeInfo() may fail
** in two scenarios:
**
** (a) A call to fstat() failed.
** (b) A malloc failed.
**
** Scenario (b) may only occur if the process is holding no other
** file descriptors open on the same file. If there were other file
** descriptors on this file, then no malloc would be required by
** findInodeInfo(). If this is the case, it is quite safe to close
** handle h - as it is guaranteed that no posix locks will be released
** by doing so.
**
** If scenario (a) caused the error then things are not so safe. The
** implicit assumption here is that if fstat() fails, things are in
** such bad shape that dropping a lock or two doesn't matter much.
*/
_robust_close(tls, pNew, h, int32(43909))
h = -int32(1)
}
_unixLeaveMutex(tls)
} else {
if pLockingStyle == uintptr(unsafe.Pointer(&_dotlockIoMethods)) {
nFilename = int32(libc.Xstrlen(tls, zFilename)) + int32(6)
zLockFile = Xsqlite3_malloc64(tls, uint64(nFilename))
if zLockFile == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
Xsqlite3_snprintf(tls, nFilename, zLockFile, __ccgo_ts+3846, libc.VaList(bp+8, zFilename))
}
(*TunixFile)(unsafe.Pointer(pNew)).FlockingContext = zLockFile
}
}
_storeLastErrno(tls, pNew, 0)
if rc != SQLITE_OK {
if h >= 0 {
_robust_close(tls, pNew, h, int32(43994))
}
} else {
(*Tsqlite3_file)(unsafe.Pointer(pId)).FpMethods = pLockingStyle
_verifyDbFile(tls, pNew)
}
return rc
}
// C documentation
//
// /*
// ** Directories to consider for temp files.
// */
var _azTempDirs = [6]uintptr{
2: __ccgo_ts + 3854,
3: __ccgo_ts + 3863,
4: __ccgo_ts + 3872,
5: __ccgo_ts + 1663,
}
// C documentation
//
// /*
// ** Initialize first two members of azTempDirs[] array.
// */
func _unixTempFileInit(tls *libc.TLS) {
_azTempDirs[0] = libc.Xgetenv(tls, __ccgo_ts+3877)
_azTempDirs[int32(1)] = libc.Xgetenv(tls, __ccgo_ts+3891)
}
// C documentation
//
// /*
// ** Return the name of a directory in which to put temporary files.
// ** If no suitable temporary file directory can be found, return NULL.
// */
func _unixTempFileDir(tls *libc.TLS) (r uintptr) {
bp := tls.Alloc(224)
defer tls.Free(224)
var i, v1 uint32
var zDir uintptr
var _ /* buf at bp+0 */ Tstat
_, _, _ = i, zDir, v1
i = uint32(0)
zDir = Xsqlite3_temp_directory
for int32(1) != 0 {
if zDir != uintptr(0) && (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(4)].FpCurrent})))(tls, zDir, bp) == 0 && int32((*(*Tstat)(unsafe.Pointer(bp))).Fst_mode)&int32(0170000) == int32(0040000) && (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(2)].FpCurrent})))(tls, zDir, int32(03)) == 0 {
return zDir
}
if uint64(i) >= libc.Uint64FromInt64(48)/libc.Uint64FromInt64(8) {
break
}
v1 = i
i++
zDir = _azTempDirs[v1]
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Create a temporary file name in zBuf. zBuf must be allocated
// ** by the calling process and must be big enough to hold at least
// ** pVfs->mxPathname bytes.
// */
func _unixGetTempname(tls *libc.TLS, nBuf int32, zBuf uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var iLimit, rc, v1 int32
var zDir uintptr
var v2 bool
var _ /* r at bp+0 */ Tu64
_, _, _, _, _ = iLimit, rc, zDir, v1, v2
iLimit = 0
rc = SQLITE_OK
/* It's odd to simulate an io-error here, but really this is just
** using the io-error infrastructure to test that SQLite handles this
** function failing.
*/
*(*int8)(unsafe.Pointer(zBuf)) = 0
Xsqlite3_mutex_enter(tls, _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_VFS1)))
zDir = _unixTempFileDir(tls)
if zDir == uintptr(0) {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(25)< int32(10) {
rc = int32(SQLITE_ERROR)
break
}
}
}
Xsqlite3_mutex_leave(tls, _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_VFS1)))
return rc
}
// C documentation
//
// /*
// ** Search for an unused file descriptor that was opened on the database
// ** file (not a journal or super-journal file) identified by pathname
// ** zPath with SQLITE_OPEN_XXX flags matching those passed as the second
// ** argument to this function.
// **
// ** Such a file descriptor may exist if a database connection was closed
// ** but the associated file descriptor could not be closed because some
// ** other file descriptor open on the same file is holding a file-lock.
// ** Refer to comments in the unixClose() function and the lengthy comment
// ** describing "Posix Advisory Locking" at the start of this file for
// ** further details. Also, ticket #4018.
// **
// ** If a suitable file descriptor is found, then it is returned. If no
// ** such file descriptor is located, -1 is returned.
// */
func _findReusableFd(tls *libc.TLS, zPath uintptr, flags int32) (r uintptr) {
bp := tls.Alloc(224)
defer tls.Free(224)
var pInode, pUnused, pp uintptr
var _ /* sStat at bp+0 */ Tstat
_, _, _ = pInode, pUnused, pp
pUnused = uintptr(0) /* Results of stat() call */
_unixEnterMutex(tls)
/* A stat() call may fail for various reasons. If this happens, it is
** almost certain that an open() call on the same path will also fail.
** For this reason, if an error occurs in the stat() call here, it is
** ignored and -1 is returned. The caller will try to open a new file
** descriptor on the same path, fail, and return an error to SQLite.
**
** Even if a subsequent open() call does succeed, the consequences of
** not searching for a reusable file descriptor are not dire. */
if _inodeList != uintptr(0) && 0 == (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(4)].FpCurrent})))(tls, zPath, bp) {
pInode = _inodeList
for pInode != 0 && ((*TunixInodeInfo)(unsafe.Pointer(pInode)).FfileId.Fdev != (*(*Tstat)(unsafe.Pointer(bp))).Fst_dev || (*TunixInodeInfo)(unsafe.Pointer(pInode)).FfileId.Fino != (*(*Tstat)(unsafe.Pointer(bp))).Fst_ino) {
pInode = (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpNext
}
if pInode != 0 {
Xsqlite3_mutex_enter(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
flags &= libc.Int32FromInt32(SQLITE_OPEN_READONLY) | libc.Int32FromInt32(SQLITE_OPEN_READWRITE)
pp = pInode + 40
for {
if !(*(*uintptr)(unsafe.Pointer(pp)) != 0 && (*TUnixUnusedFd)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pp)))).Fflags != flags) {
break
}
goto _1
_1:
;
pp = *(*uintptr)(unsafe.Pointer(pp)) + 8
}
pUnused = *(*uintptr)(unsafe.Pointer(pp))
if pUnused != 0 {
*(*uintptr)(unsafe.Pointer(pp)) = (*TUnixUnusedFd)(unsafe.Pointer(pUnused)).FpNext
}
Xsqlite3_mutex_leave(tls, (*TunixInodeInfo)(unsafe.Pointer(pInode)).FpLockMutex)
}
}
_unixLeaveMutex(tls)
return pUnused
}
// C documentation
//
// /*
// ** Find the mode, uid and gid of file zFile.
// */
func _getFileMode(tls *libc.TLS, zFile uintptr, pMode uintptr, pUid uintptr, pGid uintptr) (r int32) {
bp := tls.Alloc(224)
defer tls.Free(224)
var rc int32
var _ /* sStat at bp+0 */ Tstat
_ = rc /* Output of stat() on database file */
rc = SQLITE_OK
if 0 == (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(4)].FpCurrent})))(tls, zFile, bp) {
*(*Tmode_t)(unsafe.Pointer(pMode)) = uint16(int32((*(*Tstat)(unsafe.Pointer(bp))).Fst_mode) & int32(0777))
*(*Tuid_t)(unsafe.Pointer(pUid)) = (*(*Tstat)(unsafe.Pointer(bp))).Fst_uid
*(*Tgid_t)(unsafe.Pointer(pGid)) = (*(*Tstat)(unsafe.Pointer(bp))).Fst_gid
} else {
rc = libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(7)<-journal"
** "-wal"
** "-journalNN"
** "-walNN"
**
** where NN is a decimal number. The NN naming schemes are
** used by the test_multiplex.c module.
**
** In normal operation, the journal file name will always contain
** a '-' character. However in 8+3 filename mode, or if a corrupt
** rollback journal specifies a super-journal with a goofy name, then
** the '-' might be missing or the '-' might be the first character in
** the filename. In that case, just return SQLITE_OK with *pMode==0.
*/
nDb = _sqlite3Strlen30(tls, zPath) - int32(1)
for nDb > 0 && int32(*(*int8)(unsafe.Pointer(zPath + uintptr(nDb)))) != int32('.') {
if int32(*(*int8)(unsafe.Pointer(zPath + uintptr(nDb)))) == int32('-') {
libc.Xmemcpy(tls, bp, zPath, uint64(nDb))
(*(*[513]int8)(unsafe.Pointer(bp)))[nDb] = int8('\000')
rc = _getFileMode(tls, bp, pMode, pUid, pGid)
break
}
nDb--
}
} else {
if flags&int32(SQLITE_OPEN_DELETEONCLOSE) != 0 {
*(*Tmode_t)(unsafe.Pointer(pMode)) = uint16(0600)
} else {
if flags&int32(SQLITE_OPEN_URI) != 0 {
/* If this is a main database file and the file was opened using a URI
** filename, check for the "modeof" parameter. If present, interpret
** its value as a filename and try to copy the mode, uid and gid from
** that file. */
z = Xsqlite3_uri_parameter(tls, zPath, __ccgo_ts+3915)
if z != 0 {
rc = _getFileMode(tls, z, pMode, pUid, pGid)
}
}
}
}
return rc
}
// C documentation
//
// /*
// ** Open the file zPath.
// **
// ** Previously, the SQLite OS layer used three functions in place of this
// ** one:
// **
// ** sqlite3OsOpenReadWrite();
// ** sqlite3OsOpenReadOnly();
// ** sqlite3OsOpenExclusive();
// **
// ** These calls correspond to the following combinations of flags:
// **
// ** ReadWrite() -> (READWRITE | CREATE)
// ** ReadOnly() -> (READONLY)
// ** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE)
// **
// ** The old OpenExclusive() accepted a boolean argument - "delFlag". If
// ** true, the file was configured to be automatically deleted when the
// ** file handle closed. To achieve the same effect using this new
// ** interface, add the DELETEONCLOSE flag to those specified above for
// ** OpenExclusive().
// */
func _unixOpen(tls *libc.TLS, pVfs uintptr, zPath uintptr, pFile uintptr, flags int32, pOutFlags uintptr) (r int32) {
bp := tls.Alloc(528)
defer tls.Free(528)
var ctrlFlags, eType, fd, isCreate, isDelete, isExclusive, isNewJrnl, isReadWrite, isReadonly, noLock, openFlags, rc, rc2 int32
var p, pUnused, zName uintptr
var _ /* gid at bp+520 */ Tgid_t
var _ /* openMode at bp+514 */ Tmode_t
var _ /* uid at bp+516 */ Tuid_t
var _ /* zTmpname at bp+0 */ [514]int8
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = ctrlFlags, eType, fd, isCreate, isDelete, isExclusive, isNewJrnl, isReadWrite, isReadonly, noLock, openFlags, p, pUnused, rc, rc2, zName
p = pFile
fd = -int32(1) /* File descriptor returned by open() */
openFlags = 0 /* Flags to pass to open() */
eType = flags & int32(0x0FFF00) /* True to omit locking primitives */
rc = SQLITE_OK /* Function Return Code */
ctrlFlags = 0 /* UNIXFILE_* flags */
isExclusive = flags & int32(SQLITE_OPEN_EXCLUSIVE)
isDelete = flags & int32(SQLITE_OPEN_DELETEONCLOSE)
isCreate = flags & int32(SQLITE_OPEN_CREATE)
isReadonly = flags & int32(SQLITE_OPEN_READONLY)
isReadWrite = flags & int32(SQLITE_OPEN_READWRITE)
/* If creating a super- or main-file journal, this function will open
** a file-descriptor on the directory too. The first time unixSync()
** is called the directory file descriptor will be fsync()ed and close()d.
*/
isNewJrnl = libc.BoolInt32(isCreate != 0 && (eType == int32(SQLITE_OPEN_SUPER_JOURNAL) || eType == int32(SQLITE_OPEN_MAIN_JOURNAL) || eType == int32(SQLITE_OPEN_WAL)))
zName = zPath
/* Check the following statements are true:
**
** (a) Exactly one of the READWRITE and READONLY flags must be set, and
** (b) if CREATE is set, then READWRITE must also be set, and
** (c) if EXCLUSIVE is set, then CREATE must also be set.
** (d) if DELETEONCLOSE is set, then CREATE must also be set.
*/
/* The main DB, main journal, WAL file and super-journal are never
** automatically deleted. Nor are they ever temporary files. */
/* Assert that the upper layer has set one of the "file-type" flags. */
/* Detect a pid change and reset the PRNG. There is a race condition
** here such that two or more threads all trying to open databases at
** the same instant might all reset the PRNG. But multiple resets
** are harmless.
*/
if libc.AtomicLoadPInt32(uintptr(unsafe.Pointer(&_randomnessPid))) != libc.Xgetpid(tls) {
libc.AtomicStorePInt32(uintptr(unsafe.Pointer(&_randomnessPid)), libc.Xgetpid(tls))
Xsqlite3_randomness(tls, 0, uintptr(0))
}
libc.Xmemset(tls, p, 0, uint64(120))
if eType == int32(SQLITE_OPEN_MAIN_DB) {
pUnused = _findReusableFd(tls, zName, flags)
if pUnused != 0 {
fd = (*TUnixUnusedFd)(unsafe.Pointer(pUnused)).Ffd
} else {
pUnused = Xsqlite3_malloc64(tls, uint64(16))
if !(pUnused != 0) {
return int32(SQLITE_NOMEM)
}
}
(*TunixFile)(unsafe.Pointer(p)).FpPreallocatedUnused = pUnused
/* Database filenames are double-zero terminated if they are not
** URIs with parameters. Hence, they can always be passed into
** sqlite3_uri_parameter(). */
} else {
if !(zName != 0) {
/* If zName is NULL, the upper layer is requesting a temp file. */
rc = _unixGetTempname(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FmxPathname, bp)
if rc != SQLITE_OK {
return rc
}
zName = bp
/* Generated temporary filenames are always double-zero terminated
** for use by sqlite3_uri_parameter(). */
}
}
/* Determine the value of the flags parameter passed to POSIX function
** open(). These must be calculated even if open() is not called, as
** they may be stored as part of the file handle and used by the
** 'conch file' locking functions later on. */
if isReadonly != 0 {
openFlags |= O_RDONLY
}
if isReadWrite != 0 {
openFlags |= int32(O_RDWR)
}
if isCreate != 0 {
openFlags |= int32(O_CREAT)
}
if isExclusive != 0 {
openFlags |= libc.Int32FromInt32(O_EXCL) | libc.Int32FromInt32(O_NOFOLLOW)
}
openFlags |= libc.Int32FromInt32(O_LARGEFILE) | libc.Int32FromInt32(O_BINARY) | libc.Int32FromInt32(O_NOFOLLOW)
if fd < 0 { /* Groupid for the file */
rc = _findCreateFileMode(tls, zName, flags, bp+514, bp+516, bp+520)
if rc != SQLITE_OK {
return rc
}
fd = _robust_open(tls, zName, openFlags, *(*Tmode_t)(unsafe.Pointer(bp + 514)))
if fd < 0 {
if isNewJrnl != 0 && *(*int32)(unsafe.Pointer(libc.X__error(tls))) == int32(EACCES) && (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(2)].FpCurrent})))(tls, zName, F_OK) != 0 {
/* If unable to create a journal because the directory is not
** writable, change the error code to indicate that. */
rc = libc.Int32FromInt32(SQLITE_READONLY) | libc.Int32FromInt32(6)< 0))
} else {
*(*int32)(unsafe.Pointer(pResOut)) = libc.BoolInt32((*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(2)].FpCurrent})))(tls, zPath, libc.Int32FromInt32(W_OK)|libc.Int32FromInt32(R_OK)) == 0)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** A pathname under construction
// */
type TDbPath = struct {
Frc int32
FnSymlink int32
FzOut uintptr
FnOut int32
FnUsed int32
}
type DbPath = TDbPath
type TDbPath1 = struct {
Frc int32
FnSymlink int32
FzOut uintptr
FnOut int32
FnUsed int32
}
type DbPath1 = TDbPath1
// C documentation
//
// /*
// ** Append a single path element to the DbPath under construction
// */
func _appendOnePathElement(tls *libc.TLS, pPath uintptr, zName uintptr, nName int32) {
bp := tls.Alloc(1264)
defer tls.Free(1264)
var got Tssize_t
var zIn, v2, v4, v6 uintptr
var v1, v3, v5 int32
var _ /* buf at bp+0 */ Tstat
var _ /* zLnk at bp+224 */ [1026]int8
_, _, _, _, _, _, _, _ = got, zIn, v1, v2, v3, v4, v5, v6
if int32(*(*int8)(unsafe.Pointer(zName))) == int32('.') {
if nName == int32(1) {
return
}
if int32(*(*int8)(unsafe.Pointer(zName + 1))) == int32('.') && nName == int32(2) {
if (*TDbPath)(unsafe.Pointer(pPath)).FnUsed > int32(1) {
for {
v2 = pPath + 20
*(*int32)(unsafe.Pointer(v2))--
v1 = *(*int32)(unsafe.Pointer(v2))
if !(int32(*(*int8)(unsafe.Pointer((*TDbPath)(unsafe.Pointer(pPath)).FzOut + uintptr(v1)))) != int32('/')) {
break
}
}
}
return
}
}
if (*TDbPath)(unsafe.Pointer(pPath)).FnUsed+nName+int32(2) >= (*TDbPath)(unsafe.Pointer(pPath)).FnOut {
(*TDbPath)(unsafe.Pointer(pPath)).Frc = int32(SQLITE_ERROR)
return
}
v4 = pPath + 20
v3 = *(*int32)(unsafe.Pointer(v4))
*(*int32)(unsafe.Pointer(v4))++
*(*int8)(unsafe.Pointer((*TDbPath)(unsafe.Pointer(pPath)).FzOut + uintptr(v3))) = int8('/')
libc.Xmemcpy(tls, (*TDbPath)(unsafe.Pointer(pPath)).FzOut+uintptr((*TDbPath)(unsafe.Pointer(pPath)).FnUsed), zName, uint64(nName))
*(*int32)(unsafe.Pointer(pPath + 20)) += nName
if (*TDbPath)(unsafe.Pointer(pPath)).Frc == SQLITE_OK {
*(*int8)(unsafe.Pointer((*TDbPath)(unsafe.Pointer(pPath)).FzOut + uintptr((*TDbPath)(unsafe.Pointer(pPath)).FnUsed))) = 0
zIn = (*TDbPath)(unsafe.Pointer(pPath)).FzOut
if (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(27)].FpCurrent})))(tls, zIn, bp) != 0 {
if *(*int32)(unsafe.Pointer(libc.X__error(tls))) != int32(ENOENT) {
(*TDbPath)(unsafe.Pointer(pPath)).Frc = _unixLogErrorAtLine(tls, _sqlite3CantopenError(tls, int32(44680)), __ccgo_ts+3593, zIn, int32(44680))
}
} else {
if int32((*(*Tstat)(unsafe.Pointer(bp))).Fst_mode)&int32(0170000) == int32(0120000) {
v6 = pPath + 4
v5 = *(*int32)(unsafe.Pointer(v6))
*(*int32)(unsafe.Pointer(v6))++
if v5 > int32(SQLITE_MAX_SYMLINK) {
(*TDbPath)(unsafe.Pointer(pPath)).Frc = _sqlite3CantopenError(tls, int32(44686))
return
}
got = (*(*func(*libc.TLS, uintptr, uintptr, Tsize_t) Tssize_t)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(26)].FpCurrent})))(tls, zIn, bp+224, libc.Uint64FromInt64(1026)-libc.Uint64FromInt32(2))
if got <= 0 || got >= libc.Int64FromInt64(1026)-libc.Int64FromInt32(2) {
(*TDbPath)(unsafe.Pointer(pPath)).Frc = _unixLogErrorAtLine(tls, _sqlite3CantopenError(tls, int32(44691)), __ccgo_ts+3584, zIn, int32(44691))
return
}
(*(*[1026]int8)(unsafe.Pointer(bp + 224)))[got] = 0
if int32((*(*[1026]int8)(unsafe.Pointer(bp + 224)))[0]) == int32('/') {
(*TDbPath)(unsafe.Pointer(pPath)).FnUsed = 0
} else {
*(*int32)(unsafe.Pointer(pPath + 20)) -= nName + int32(1)
}
_appendAllPathElements(tls, pPath, bp+224)
}
}
}
}
// C documentation
//
// /*
// ** Append all path elements in zPath to the DbPath under construction.
// */
func _appendAllPathElements(tls *libc.TLS, pPath uintptr, zPath uintptr) {
var i, j, v1 int32
_, _, _ = i, j, v1
i = 0
j = 0
for {
for *(*int8)(unsafe.Pointer(zPath + uintptr(i))) != 0 && int32(*(*int8)(unsafe.Pointer(zPath + uintptr(i)))) != int32('/') {
i++
}
if i > j {
_appendOnePathElement(tls, pPath, zPath+uintptr(j), i-j)
}
j = i + int32(1)
goto _2
_2:
;
v1 = i
i++
if !(*(*int8)(unsafe.Pointer(zPath + uintptr(v1))) != 0) {
break
}
}
}
// C documentation
//
// /*
// ** Turn a relative pathname into a full pathname. The relative path
// ** is stored as a nul-terminated string in the buffer pointed to by
// ** zPath.
// **
// ** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes
// ** (in this case, MAX_PATHNAME bytes). The full-path is written to
// ** this buffer before returning.
// */
func _unixFullPathname(tls *libc.TLS, pVfs uintptr, zPath uintptr, nOut int32, zOut uintptr) (r int32) {
bp := tls.Alloc(1056)
defer tls.Free(1056)
var _ /* path at bp+0 */ TDbPath
var _ /* zPwd at bp+24 */ [1026]int8
_ = pVfs
(*(*TDbPath)(unsafe.Pointer(bp))).Frc = 0
(*(*TDbPath)(unsafe.Pointer(bp))).FnUsed = 0
(*(*TDbPath)(unsafe.Pointer(bp))).FnSymlink = 0
(*(*TDbPath)(unsafe.Pointer(bp))).FnOut = nOut
(*(*TDbPath)(unsafe.Pointer(bp))).FzOut = zOut
if int32(*(*int8)(unsafe.Pointer(zPath))) != int32('/') {
if (*(*func(*libc.TLS, uintptr, Tsize_t) uintptr)(unsafe.Pointer(&struct{ uintptr }{_aSyscall[int32(3)].FpCurrent})))(tls, bp+24, libc.Uint64FromInt64(1026)-libc.Uint64FromInt32(2)) == uintptr(0) {
return _unixLogErrorAtLine(tls, _sqlite3CantopenError(tls, int32(44749)), __ccgo_ts+3413, zPath, int32(44749))
}
_appendAllPathElements(tls, bp, bp+24)
}
_appendAllPathElements(tls, bp, zPath)
*(*int8)(unsafe.Pointer(zOut + uintptr((*(*TDbPath)(unsafe.Pointer(bp))).FnUsed))) = 0
if (*(*TDbPath)(unsafe.Pointer(bp))).Frc != 0 || (*(*TDbPath)(unsafe.Pointer(bp))).FnUsed < int32(2) {
return _sqlite3CantopenError(tls, int32(44755))
}
if (*(*TDbPath)(unsafe.Pointer(bp))).FnSymlink != 0 {
return libc.Int32FromInt32(SQLITE_OK) | libc.Int32FromInt32(2)< 0x40000200
**
** This works well on the local file system, but shows a nearly 100x
** slowdown in read performance on AFP because the AFP client disables
** the read cache when byte-range locks are present. Enabling the read
** cache exposes a cache coherency problem that is present on all OS X
** supported network file systems. NFS and AFP both observe the
** close-to-open semantics for ensuring cache coherency
** [http://nfs.sourceforge.net/#faq_a8], which does not effectively
** address the requirements for concurrent database access by multiple
** readers and writers
** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html].
**
** To address the performance and cache coherency issues, proxy file locking
** changes the way database access is controlled by limiting access to a
** single host at a time and moving file locks off of the database file
** and onto a proxy file on the local file system.
**
**
** Using proxy locks
** -----------------
**
** C APIs
**
** sqlite3_file_control(db, dbname, SQLITE_FCNTL_SET_LOCKPROXYFILE,
** | ":auto:");
** sqlite3_file_control(db, dbname, SQLITE_FCNTL_GET_LOCKPROXYFILE,
** &);
**
**
** SQL pragmas
**
** PRAGMA [database.]lock_proxy_file= | :auto:
** PRAGMA [database.]lock_proxy_file
**
** Specifying ":auto:" means that if there is a conch file with a matching
** host ID in it, the proxy path in the conch file will be used, otherwise
** a proxy path based on the user's temp dir
** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the
** actual proxy file name is generated from the name and path of the
** database file. For example:
**
** For database path "/Users/me/foo.db"
** The lock path will be "/sqliteplocks/_Users_me_foo.db:auto:")
**
** Once a lock proxy is configured for a database connection, it can not
** be removed, however it may be switched to a different proxy path via
** the above APIs (assuming the conch file is not being held by another
** connection or process).
**
**
** How proxy locking works
** -----------------------
**
** Proxy file locking relies primarily on two new supporting files:
**
** * conch file to limit access to the database file to a single host
** at a time
**
** * proxy file to act as a proxy for the advisory locks normally
** taken on the database
**
** The conch file - to use a proxy file, sqlite must first "hold the conch"
** by taking an sqlite-style shared lock on the conch file, reading the
** contents and comparing the host's unique host ID (see below) and lock
** proxy path against the values stored in the conch. The conch file is
** stored in the same directory as the database file and the file name
** is patterned after the database file name as ".-conch".
** If the conch file does not exist, or its contents do not match the
** host ID and/or proxy path, then the lock is escalated to an exclusive
** lock and the conch file contents is updated with the host ID and proxy
** path and the lock is downgraded to a shared lock again. If the conch
** is held by another process (with a shared lock), the exclusive lock
** will fail and SQLITE_BUSY is returned.
**
** The proxy file - a single-byte file used for all advisory file locks
** normally taken on the database file. This allows for safe sharing
** of the database file for multiple readers and writers on the same
** host (the conch ensures that they all use the same local lock file).
**
** Requesting the lock proxy does not immediately take the conch, it is
** only taken when the first request to lock database file is made.
** This matches the semantics of the traditional locking behavior, where
** opening a connection to a database file does not take a lock on it.
** The shared lock and an open file descriptor are maintained until
** the connection to the database is closed.
**
** The proxy file and the lock file are never deleted so they only need
** to be created the first time they are used.
**
** Configuration options
** ---------------------
**
** SQLITE_PREFER_PROXY_LOCKING
**
** Database files accessed on non-local file systems are
** automatically configured for proxy locking, lock files are
** named automatically using the same logic as
** PRAGMA lock_proxy_file=":auto:"
**
** SQLITE_PROXY_DEBUG
**
** Enables the logging of error messages during host id file
** retrieval and creation
**
** LOCKPROXYDIR
**
** Overrides the default directory used for lock proxy files that
** are named automatically via the ":auto:" setting
**
** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS
**
** Permissions to use when creating a directory for storing the
** lock proxy files, only used when LOCKPROXYDIR is not set.
**
**
** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING,
** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will
** force proxy locking to be used for every database file opened, and 0
** will force automatic proxy locking to be disabled for all database
** files (explicitly calling the SQLITE_FCNTL_SET_LOCKPROXYFILE pragma or
** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING).
*/
/*
** Proxy locking is only available on MacOSX
*/
/*
** The proxy locking style is intended for use with AFP filesystems.
** And since AFP is only supported on MacOSX, the proxy locking is also
** restricted to MacOSX.
**
**
******************* End of the proxy lock implementation **********************
******************************************************************************/
// C documentation
//
// /*
// ** Initialize the operating system interface.
// **
// ** This routine registers all VFS implementations for unix-like operating
// ** systems. This routine, and the sqlite3_os_end() routine that follows,
// ** should be the only routines in this file that are visible from other
// ** files.
// **
// ** This routine is called once during SQLite initialization and by a
// ** single thread. The memory allocation and mutex subsystems have not
// ** necessarily been initialized when this routine is called, and so they
// ** should not be used.
// */
func Xsqlite3_os_init(tls *libc.TLS) (r int32) {
var i uint32
_ = i
/* Double-check that the aSyscall[] array has been constructed
** correctly. See ticket [bb3a86e890c8e96ab] */
/* Register all VFSes defined in the aVfs[] array */
i = uint32(0)
for {
if !(uint64(i) < libc.Uint64FromInt64(672)/libc.Uint64FromInt64(168)) {
break
}
Xsqlite3_vfs_register(tls, uintptr(unsafe.Pointer(&_aVfs))+uintptr(i)*168, libc.BoolInt32(i == uint32(0)))
goto _1
_1:
;
i++
}
_unixBigLock = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_VFS1))
/* Validate lock assumptions */
/* Number of available locks */
/* Start of locking area */
/* Locks:
** WRITE UNIX_SHM_BASE 120
** CKPT UNIX_SHM_BASE+1 121
** RECOVER UNIX_SHM_BASE+2 122
** READ-0 UNIX_SHM_BASE+3 123
** READ-1 UNIX_SHM_BASE+4 124
** READ-2 UNIX_SHM_BASE+5 125
** READ-3 UNIX_SHM_BASE+6 126
** READ-4 UNIX_SHM_BASE+7 127
** DMS UNIX_SHM_BASE+8 128
*/
/* Byte offset of the deadman-switch */
/* Initialize temp file dir array. */
_unixTempFileInit(tls)
return SQLITE_OK
}
/*
** The following macro defines an initializer for an sqlite3_vfs object.
** The name of the VFS is NAME. The pAppData is a pointer to a pointer
** to the "finder" function. (pAppData is a pointer to a pointer because
** silly C90 rules prohibit a void* from being cast to a function pointer
** and so we have to go through the intermediate pointer to avoid problems
** when compiling with -pedantic-errors on GCC.)
**
** The FINDER parameter to this macro is the name of the pointer to the
** finder-function. The finder-function returns a pointer to the
** sqlite_io_methods object that implements the desired locking
** behaviors. See the division above that contains the IOMETHODS
** macro for addition information on finder-functions.
**
** Most finders simply return a pointer to a fixed sqlite3_io_methods
** object. But the "autolockIoFinder" available on MacOSX does a little
** more than that; it looks at the filesystem type that hosts the
** database file and tries to choose an locking method appropriate for
** that filesystem time.
*/
/*
** All default VFSes for unix are contained in the following array.
**
** Note that the sqlite3_vfs.pNext field of the VFS object is modified
** by the SQLite core when the VFS is registered. So the following
** array cannot be const.
*/
var _aVfs = [4]Tsqlite3_vfs{
0: {
FiVersion: int32(3),
FszOsFile: int32(120),
FmxPathname: int32(MAX_PATHNAME),
FzName: __ccgo_ts + 3941,
},
1: {
FiVersion: int32(3),
FszOsFile: int32(120),
FmxPathname: int32(MAX_PATHNAME),
FzName: __ccgo_ts + 3946,
},
2: {
FiVersion: int32(3),
FszOsFile: int32(120),
FmxPathname: int32(MAX_PATHNAME),
FzName: __ccgo_ts + 3956,
},
3: {
FiVersion: int32(3),
FszOsFile: int32(120),
FmxPathname: int32(MAX_PATHNAME),
FzName: __ccgo_ts + 3836,
},
}
func init() {
p := unsafe.Pointer(&_aVfs)
*(*uintptr)(unsafe.Add(p, 32)) = uintptr(unsafe.Pointer(&_posixIoFinder))
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_unixOpen)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_unixDelete)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_unixAccess)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_unixFullPathname)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_unixDlOpen)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_unixDlError)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_unixDlSym)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_unixDlClose)
*(*uintptr)(unsafe.Add(p, 104)) = __ccgo_fp(_unixRandomness)
*(*uintptr)(unsafe.Add(p, 112)) = __ccgo_fp(_unixSleep)
*(*uintptr)(unsafe.Add(p, 120)) = __ccgo_fp(_unixCurrentTime)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(_unixGetLastError)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_unixCurrentTimeInt64)
*(*uintptr)(unsafe.Add(p, 144)) = __ccgo_fp(_unixSetSystemCall)
*(*uintptr)(unsafe.Add(p, 152)) = __ccgo_fp(_unixGetSystemCall)
*(*uintptr)(unsafe.Add(p, 160)) = __ccgo_fp(_unixNextSystemCall)
*(*uintptr)(unsafe.Add(p, 200)) = uintptr(unsafe.Pointer(&_nolockIoFinder))
*(*uintptr)(unsafe.Add(p, 208)) = __ccgo_fp(_unixOpen)
*(*uintptr)(unsafe.Add(p, 216)) = __ccgo_fp(_unixDelete)
*(*uintptr)(unsafe.Add(p, 224)) = __ccgo_fp(_unixAccess)
*(*uintptr)(unsafe.Add(p, 232)) = __ccgo_fp(_unixFullPathname)
*(*uintptr)(unsafe.Add(p, 240)) = __ccgo_fp(_unixDlOpen)
*(*uintptr)(unsafe.Add(p, 248)) = __ccgo_fp(_unixDlError)
*(*uintptr)(unsafe.Add(p, 256)) = __ccgo_fp(_unixDlSym)
*(*uintptr)(unsafe.Add(p, 264)) = __ccgo_fp(_unixDlClose)
*(*uintptr)(unsafe.Add(p, 272)) = __ccgo_fp(_unixRandomness)
*(*uintptr)(unsafe.Add(p, 280)) = __ccgo_fp(_unixSleep)
*(*uintptr)(unsafe.Add(p, 288)) = __ccgo_fp(_unixCurrentTime)
*(*uintptr)(unsafe.Add(p, 296)) = __ccgo_fp(_unixGetLastError)
*(*uintptr)(unsafe.Add(p, 304)) = __ccgo_fp(_unixCurrentTimeInt64)
*(*uintptr)(unsafe.Add(p, 312)) = __ccgo_fp(_unixSetSystemCall)
*(*uintptr)(unsafe.Add(p, 320)) = __ccgo_fp(_unixGetSystemCall)
*(*uintptr)(unsafe.Add(p, 328)) = __ccgo_fp(_unixNextSystemCall)
*(*uintptr)(unsafe.Add(p, 368)) = uintptr(unsafe.Pointer(&_dotlockIoFinder))
*(*uintptr)(unsafe.Add(p, 376)) = __ccgo_fp(_unixOpen)
*(*uintptr)(unsafe.Add(p, 384)) = __ccgo_fp(_unixDelete)
*(*uintptr)(unsafe.Add(p, 392)) = __ccgo_fp(_unixAccess)
*(*uintptr)(unsafe.Add(p, 400)) = __ccgo_fp(_unixFullPathname)
*(*uintptr)(unsafe.Add(p, 408)) = __ccgo_fp(_unixDlOpen)
*(*uintptr)(unsafe.Add(p, 416)) = __ccgo_fp(_unixDlError)
*(*uintptr)(unsafe.Add(p, 424)) = __ccgo_fp(_unixDlSym)
*(*uintptr)(unsafe.Add(p, 432)) = __ccgo_fp(_unixDlClose)
*(*uintptr)(unsafe.Add(p, 440)) = __ccgo_fp(_unixRandomness)
*(*uintptr)(unsafe.Add(p, 448)) = __ccgo_fp(_unixSleep)
*(*uintptr)(unsafe.Add(p, 456)) = __ccgo_fp(_unixCurrentTime)
*(*uintptr)(unsafe.Add(p, 464)) = __ccgo_fp(_unixGetLastError)
*(*uintptr)(unsafe.Add(p, 472)) = __ccgo_fp(_unixCurrentTimeInt64)
*(*uintptr)(unsafe.Add(p, 480)) = __ccgo_fp(_unixSetSystemCall)
*(*uintptr)(unsafe.Add(p, 488)) = __ccgo_fp(_unixGetSystemCall)
*(*uintptr)(unsafe.Add(p, 496)) = __ccgo_fp(_unixNextSystemCall)
*(*uintptr)(unsafe.Add(p, 536)) = uintptr(unsafe.Pointer(&_posixIoFinder))
*(*uintptr)(unsafe.Add(p, 544)) = __ccgo_fp(_unixOpen)
*(*uintptr)(unsafe.Add(p, 552)) = __ccgo_fp(_unixDelete)
*(*uintptr)(unsafe.Add(p, 560)) = __ccgo_fp(_unixAccess)
*(*uintptr)(unsafe.Add(p, 568)) = __ccgo_fp(_unixFullPathname)
*(*uintptr)(unsafe.Add(p, 576)) = __ccgo_fp(_unixDlOpen)
*(*uintptr)(unsafe.Add(p, 584)) = __ccgo_fp(_unixDlError)
*(*uintptr)(unsafe.Add(p, 592)) = __ccgo_fp(_unixDlSym)
*(*uintptr)(unsafe.Add(p, 600)) = __ccgo_fp(_unixDlClose)
*(*uintptr)(unsafe.Add(p, 608)) = __ccgo_fp(_unixRandomness)
*(*uintptr)(unsafe.Add(p, 616)) = __ccgo_fp(_unixSleep)
*(*uintptr)(unsafe.Add(p, 624)) = __ccgo_fp(_unixCurrentTime)
*(*uintptr)(unsafe.Add(p, 632)) = __ccgo_fp(_unixGetLastError)
*(*uintptr)(unsafe.Add(p, 640)) = __ccgo_fp(_unixCurrentTimeInt64)
*(*uintptr)(unsafe.Add(p, 648)) = __ccgo_fp(_unixSetSystemCall)
*(*uintptr)(unsafe.Add(p, 656)) = __ccgo_fp(_unixGetSystemCall)
*(*uintptr)(unsafe.Add(p, 664)) = __ccgo_fp(_unixNextSystemCall)
}
// C documentation
//
// /*
// ** Shutdown the operating system interface.
// **
// ** Some operating systems might need to do some cleanup in this routine,
// ** to release dynamically allocated objects. But not on unix.
// ** This routine is a no-op for unix.
// */
func Xsqlite3_os_end(tls *libc.TLS) (r int32) {
_unixBigLock = uintptr(0)
return SQLITE_OK
}
/************** End of os_unix.c *********************************************/
/************** Begin file os_win.c ******************************************/
/*
** 2004 May 22
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** This file contains code that is specific to Windows.
*/
/* #include "sqliteInt.h" */
/************** End of os_win.c **********************************************/
/************** Begin file memdb.c *******************************************/
/*
** 2016-09-07
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
******************************************************************************
**
** This file implements an in-memory VFS. A database is held as a contiguous
** block of memory.
**
** This file also implements interface sqlite3_serialize() and
** sqlite3_deserialize().
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /*
// ** Forward declaration of objects used by this utility
// */
type TMemVfs = struct {
FiVersion int32
FszOsFile int32
FmxPathname int32
FpNext uintptr
FzName uintptr
FpAppData uintptr
FxOpen uintptr
FxDelete uintptr
FxAccess uintptr
FxFullPathname uintptr
FxDlOpen uintptr
FxDlError uintptr
FxDlSym uintptr
FxDlClose uintptr
FxRandomness uintptr
FxSleep uintptr
FxCurrentTime uintptr
FxGetLastError uintptr
FxCurrentTimeInt64 uintptr
FxSetSystemCall uintptr
FxGetSystemCall uintptr
FxNextSystemCall uintptr
}
type MemVfs = TMemVfs
type TMemFile = struct {
Fbase Tsqlite3_file
FpStore uintptr
FeLock int32
}
type MemFile = TMemFile
type TMemStore = struct {
Fsz Tsqlite3_int64
FszAlloc Tsqlite3_int64
FszMax Tsqlite3_int64
FaData uintptr
FpMutex uintptr
FnMmap int32
FmFlags uint32
FnRdLock int32
FnWrLock int32
FnRef int32
FzFName uintptr
}
type MemStore = TMemStore
/* Access to a lower-level VFS that (might) implement dynamic loading,
** access to randomness, etc.
*/
/* Storage for a memdb file.
**
** An memdb object can be shared or separate. Shared memdb objects can be
** used by more than one database connection. Mutexes are used by shared
** memdb objects to coordinate access. Separate memdb objects are only
** connected to a single database connection and do not require additional
** mutexes.
**
** Shared memdb objects have .zFName!=0 and .pMutex!=0. They are created
** using "file:/name?vfs=memdb". The first character of the name must be
** "/" or else the object will be a separate memdb object. All shared
** memdb objects are stored in memdb_g.apMemStore[] in an arbitrary order.
**
** Separate memdb objects are created using a name that does not begin
** with "/" or using sqlite3_deserialize().
**
** Access rules for shared MemStore objects:
**
** * .zFName is initialized when the object is created and afterwards
** is unchanged until the object is destroyed. So it can be accessed
** at any time as long as we know the object is not being destroyed,
** which means while either the SQLITE_MUTEX_STATIC_VFS1 or
** .pMutex is held or the object is not part of memdb_g.apMemStore[].
**
** * Can .pMutex can only be changed while holding the
** SQLITE_MUTEX_STATIC_VFS1 mutex or while the object is not part
** of memdb_g.apMemStore[].
**
** * Other fields can only be changed while holding the .pMutex mutex
** or when the .nRef is less than zero and the object is not part of
** memdb_g.apMemStore[].
**
** * The .aData pointer has the added requirement that it can can only
** be changed (for resizing) when nMmap is zero.
**
*/
type TMemStore1 = struct {
Fsz Tsqlite3_int64
FszAlloc Tsqlite3_int64
FszMax Tsqlite3_int64
FaData uintptr
FpMutex uintptr
FnMmap int32
FmFlags uint32
FnRdLock int32
FnWrLock int32
FnRef int32
FzFName uintptr
}
type MemStore1 = TMemStore1
/* An open file */
type TMemFile1 = struct {
Fbase Tsqlite3_file
FpStore uintptr
FeLock int32
}
type MemFile1 = TMemFile1
// C documentation
//
// /*
// ** File-scope variables for holding the memdb files that are accessible
// ** to multiple database connections in separate threads.
// **
// ** Must hold SQLITE_MUTEX_STATIC_VFS1 to access any part of this object.
// */
type TMemFS = struct {
FnMemStore int32
FapMemStore uintptr
}
type MemFS = TMemFS
// C documentation
//
// /*
// ** File-scope variables for holding the memdb files that are accessible
// ** to multiple database connections in separate threads.
// **
// ** Must hold SQLITE_MUTEX_STATIC_VFS1 to access any part of this object.
// */
var _memdb_g TMemFS
var _memdb_vfs = Tsqlite3_vfs{
FiVersion: int32(2),
FmxPathname: int32(1024),
FzName: __ccgo_ts + 3969,
}
func init() {
p := unsafe.Pointer(&_memdb_vfs)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_memdbOpen)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_memdbAccess)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_memdbFullPathname)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_memdbDlOpen)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_memdbDlError)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_memdbDlSym)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_memdbDlClose)
*(*uintptr)(unsafe.Add(p, 104)) = __ccgo_fp(_memdbRandomness)
*(*uintptr)(unsafe.Add(p, 112)) = __ccgo_fp(_memdbSleep)
*(*uintptr)(unsafe.Add(p, 128)) = __ccgo_fp(_memdbGetLastError)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_memdbCurrentTimeInt64)
}
var _memdb_io_methods = Tsqlite3_io_methods{
FiVersion: int32(3),
}
func init() {
p := unsafe.Pointer(&_memdb_io_methods)
*(*uintptr)(unsafe.Add(p, 8)) = __ccgo_fp(_memdbClose)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_memdbRead)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_memdbWrite)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_memdbTruncate)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_memdbSync)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_memdbFileSize)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_memdbLock)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_memdbUnlock)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_memdbFileControl)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_memdbDeviceCharacteristics)
*(*uintptr)(unsafe.Add(p, 136)) = __ccgo_fp(_memdbFetch)
*(*uintptr)(unsafe.Add(p, 144)) = __ccgo_fp(_memdbUnfetch)
}
// C documentation
//
// /*
// ** Enter/leave the mutex on a MemStore
// */
func _memdbEnter(tls *libc.TLS, p uintptr) {
Xsqlite3_mutex_enter(tls, (*TMemStore)(unsafe.Pointer(p)).FpMutex)
}
func _memdbLeave(tls *libc.TLS, p uintptr) {
Xsqlite3_mutex_leave(tls, (*TMemStore)(unsafe.Pointer(p)).FpMutex)
}
// C documentation
//
// /*
// ** Close an memdb-file.
// ** Free the underlying MemStore object when its refcount drops to zero
// ** or less.
// */
func _memdbClose(tls *libc.TLS, pFile uintptr) (r int32) {
var i, v2 int32
var p, pVfsMutex, v3 uintptr
_, _, _, _, _ = i, p, pVfsMutex, v2, v3
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
if (*TMemStore)(unsafe.Pointer(p)).FzFName != 0 {
pVfsMutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_VFS1))
Xsqlite3_mutex_enter(tls, pVfsMutex)
i = 0
for {
if !(i < _memdb_g.FnMemStore) {
break
}
if *(*uintptr)(unsafe.Pointer(_memdb_g.FapMemStore + uintptr(i)*8)) == p {
_memdbEnter(tls, p)
if (*TMemStore)(unsafe.Pointer(p)).FnRef == int32(1) {
v3 = uintptr(unsafe.Pointer(&_memdb_g))
*(*int32)(unsafe.Pointer(v3))--
v2 = *(*int32)(unsafe.Pointer(v3))
*(*uintptr)(unsafe.Pointer(_memdb_g.FapMemStore + uintptr(i)*8)) = *(*uintptr)(unsafe.Pointer(_memdb_g.FapMemStore + uintptr(v2)*8))
if _memdb_g.FnMemStore == 0 {
Xsqlite3_free(tls, _memdb_g.FapMemStore)
_memdb_g.FapMemStore = uintptr(0)
}
}
break
}
goto _1
_1:
;
i++
}
Xsqlite3_mutex_leave(tls, pVfsMutex)
} else {
_memdbEnter(tls, p)
}
(*TMemStore)(unsafe.Pointer(p)).FnRef--
if (*TMemStore)(unsafe.Pointer(p)).FnRef <= 0 {
if (*TMemStore)(unsafe.Pointer(p)).FmFlags&uint32(SQLITE_DESERIALIZE_FREEONCLOSE) != 0 {
Xsqlite3_free(tls, (*TMemStore)(unsafe.Pointer(p)).FaData)
}
_memdbLeave(tls, p)
Xsqlite3_mutex_free(tls, (*TMemStore)(unsafe.Pointer(p)).FpMutex)
Xsqlite3_free(tls, p)
} else {
_memdbLeave(tls, p)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Read data from an memdb-file.
// */
func _memdbRead(tls *libc.TLS, pFile uintptr, zBuf uintptr, iAmt int32, iOfst Tsqlite_int64) (r int32) {
var p uintptr
_ = p
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
_memdbEnter(tls, p)
if iOfst+int64(iAmt) > (*TMemStore)(unsafe.Pointer(p)).Fsz {
libc.Xmemset(tls, zBuf, 0, uint64(iAmt))
if iOfst < (*TMemStore)(unsafe.Pointer(p)).Fsz {
libc.Xmemcpy(tls, zBuf, (*TMemStore)(unsafe.Pointer(p)).FaData+uintptr(iOfst), uint64((*TMemStore)(unsafe.Pointer(p)).Fsz-iOfst))
}
_memdbLeave(tls, p)
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(2)< 0 {
return int32(SQLITE_FULL)
}
if newSz > (*TMemStore)(unsafe.Pointer(p)).FszMax {
return int32(SQLITE_FULL)
}
newSz *= int64(2)
if newSz > (*TMemStore)(unsafe.Pointer(p)).FszMax {
newSz = (*TMemStore)(unsafe.Pointer(p)).FszMax
}
pNew = _sqlite3Realloc(tls, (*TMemStore)(unsafe.Pointer(p)).FaData, uint64(newSz))
if pNew == uintptr(0) {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(12)< (*TMemStore)(unsafe.Pointer(p)).Fsz {
if v2 = iOfst+int64(iAmt) > (*TMemStore)(unsafe.Pointer(p)).FszAlloc; v2 {
v1 = _memdbEnlarge(tls, p, iOfst+int64(iAmt))
rc = v1
}
if v2 && v1 != SQLITE_OK {
_memdbLeave(tls, p)
return rc
}
if iOfst > (*TMemStore)(unsafe.Pointer(p)).Fsz {
libc.Xmemset(tls, (*TMemStore)(unsafe.Pointer(p)).FaData+uintptr((*TMemStore)(unsafe.Pointer(p)).Fsz), 0, uint64(iOfst-(*TMemStore)(unsafe.Pointer(p)).Fsz))
}
(*TMemStore)(unsafe.Pointer(p)).Fsz = iOfst + int64(iAmt)
}
libc.Xmemcpy(tls, (*TMemStore)(unsafe.Pointer(p)).FaData+uintptr(iOfst), z, uint64(iAmt))
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Truncate an memdb-file.
// **
// ** In rollback mode (which is always the case for memdb, as it does not
// ** support WAL mode) the truncate() method is only used to reduce
// ** the size of a file, never to increase the size.
// */
func _memdbTruncate(tls *libc.TLS, pFile uintptr, size Tsqlite_int64) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
rc = SQLITE_OK
_memdbEnter(tls, p)
if size > (*TMemStore)(unsafe.Pointer(p)).Fsz {
/* This can only happen with a corrupt wal mode db */
rc = int32(SQLITE_CORRUPT)
} else {
(*TMemStore)(unsafe.Pointer(p)).Fsz = size
}
_memdbLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Sync an memdb-file.
// */
func _memdbSync(tls *libc.TLS, pFile uintptr, flags int32) (r int32) {
_ = pFile
_ = flags
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return the current file-size of an memdb-file.
// */
func _memdbFileSize(tls *libc.TLS, pFile uintptr, pSize uintptr) (r int32) {
var p uintptr
_ = p
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
_memdbEnter(tls, p)
*(*Tsqlite_int64)(unsafe.Pointer(pSize)) = (*TMemStore)(unsafe.Pointer(p)).Fsz
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Lock an memdb-file.
// */
func _memdbLock(tls *libc.TLS, pFile uintptr, eLock int32) (r int32) {
var p, pThis uintptr
var rc int32
_, _, _ = p, pThis, rc
pThis = pFile
p = (*TMemFile)(unsafe.Pointer(pThis)).FpStore
rc = SQLITE_OK
if eLock <= (*TMemFile)(unsafe.Pointer(pThis)).FeLock {
return SQLITE_OK
}
_memdbEnter(tls, p)
if eLock > int32(SQLITE_LOCK_SHARED) && (*TMemStore)(unsafe.Pointer(p)).FmFlags&uint32(SQLITE_DESERIALIZE_READONLY) != 0 {
rc = int32(SQLITE_READONLY)
} else {
switch eLock {
case int32(SQLITE_LOCK_SHARED):
if (*TMemStore)(unsafe.Pointer(p)).FnWrLock > 0 {
rc = int32(SQLITE_BUSY)
} else {
(*TMemStore)(unsafe.Pointer(p)).FnRdLock++
}
case int32(SQLITE_LOCK_RESERVED):
fallthrough
case int32(SQLITE_LOCK_PENDING):
if (*TMemFile)(unsafe.Pointer(pThis)).FeLock == int32(SQLITE_LOCK_SHARED) {
if (*TMemStore)(unsafe.Pointer(p)).FnWrLock > 0 {
rc = int32(SQLITE_BUSY)
} else {
(*TMemStore)(unsafe.Pointer(p)).FnWrLock = int32(1)
}
}
default:
if (*TMemStore)(unsafe.Pointer(p)).FnRdLock > int32(1) {
rc = int32(SQLITE_BUSY)
} else {
if (*TMemFile)(unsafe.Pointer(pThis)).FeLock == int32(SQLITE_LOCK_SHARED) {
(*TMemStore)(unsafe.Pointer(p)).FnWrLock = int32(1)
}
}
break
}
}
if rc == SQLITE_OK {
(*TMemFile)(unsafe.Pointer(pThis)).FeLock = eLock
}
_memdbLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Unlock an memdb-file.
// */
func _memdbUnlock(tls *libc.TLS, pFile uintptr, eLock int32) (r int32) {
var p, pThis uintptr
_, _ = p, pThis
pThis = pFile
p = (*TMemFile)(unsafe.Pointer(pThis)).FpStore
if eLock >= (*TMemFile)(unsafe.Pointer(pThis)).FeLock {
return SQLITE_OK
}
_memdbEnter(tls, p)
if eLock == int32(SQLITE_LOCK_SHARED) {
if (*TMemFile)(unsafe.Pointer(pThis)).FeLock > int32(SQLITE_LOCK_SHARED) {
(*TMemStore)(unsafe.Pointer(p)).FnWrLock--
}
} else {
if (*TMemFile)(unsafe.Pointer(pThis)).FeLock > int32(SQLITE_LOCK_SHARED) {
(*TMemStore)(unsafe.Pointer(p)).FnWrLock--
}
(*TMemStore)(unsafe.Pointer(p)).FnRdLock--
}
(*TMemFile)(unsafe.Pointer(pThis)).FeLock = eLock
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** File control method. For custom operations on an memdb-file.
// */
func _memdbFileControl(tls *libc.TLS, pFile uintptr, op int32, pArg uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iLimit Tsqlite3_int64
var p uintptr
var rc int32
_, _, _ = iLimit, p, rc
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
rc = int32(SQLITE_NOTFOUND)
_memdbEnter(tls, p)
if op == int32(SQLITE_FCNTL_VFSNAME) {
*(*uintptr)(unsafe.Pointer(pArg)) = Xsqlite3_mprintf(tls, __ccgo_ts+3975, libc.VaList(bp+8, (*TMemStore)(unsafe.Pointer(p)).FaData, (*TMemStore)(unsafe.Pointer(p)).Fsz))
rc = SQLITE_OK
}
if op == int32(SQLITE_FCNTL_SIZE_LIMIT) {
iLimit = *(*Tsqlite3_int64)(unsafe.Pointer(pArg))
if iLimit < (*TMemStore)(unsafe.Pointer(p)).Fsz {
if iLimit < 0 {
iLimit = (*TMemStore)(unsafe.Pointer(p)).FszMax
} else {
iLimit = (*TMemStore)(unsafe.Pointer(p)).Fsz
}
}
(*TMemStore)(unsafe.Pointer(p)).FszMax = iLimit
*(*Tsqlite3_int64)(unsafe.Pointer(pArg)) = iLimit
rc = SQLITE_OK
}
_memdbLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Return the device characteristic flags supported by an memdb-file.
// */
func _memdbDeviceCharacteristics(tls *libc.TLS, pFile uintptr) (r int32) {
_ = pFile
return libc.Int32FromInt32(SQLITE_IOCAP_ATOMIC) | libc.Int32FromInt32(SQLITE_IOCAP_POWERSAFE_OVERWRITE) | libc.Int32FromInt32(SQLITE_IOCAP_SAFE_APPEND) | libc.Int32FromInt32(SQLITE_IOCAP_SEQUENTIAL)
}
// C documentation
//
// /* Fetch a page of a memory-mapped file */
func _memdbFetch(tls *libc.TLS, pFile uintptr, iOfst Tsqlite3_int64, iAmt int32, pp uintptr) (r int32) {
var p uintptr
_ = p
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
_memdbEnter(tls, p)
if iOfst+int64(iAmt) > (*TMemStore)(unsafe.Pointer(p)).Fsz || (*TMemStore)(unsafe.Pointer(p)).FmFlags&uint32(SQLITE_DESERIALIZE_RESIZEABLE) != uint32(0) {
*(*uintptr)(unsafe.Pointer(pp)) = uintptr(0)
} else {
(*TMemStore)(unsafe.Pointer(p)).FnMmap++
*(*uintptr)(unsafe.Pointer(pp)) = (*TMemStore)(unsafe.Pointer(p)).FaData + uintptr(iOfst)
}
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /* Release a memory-mapped page */
func _memdbUnfetch(tls *libc.TLS, pFile uintptr, iOfst Tsqlite3_int64, pPage uintptr) (r int32) {
var p uintptr
_ = p
p = (*TMemFile)(unsafe.Pointer(pFile)).FpStore
_ = iOfst
_ = pPage
_memdbEnter(tls, p)
(*TMemStore)(unsafe.Pointer(p)).FnMmap--
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Open an mem file handle.
// */
func _memdbOpen(tls *libc.TLS, pVfs uintptr, zName uintptr, pFd uintptr, flags int32, pOutFlags uintptr) (r int32) {
var apNew, p, pFile, pVfsMutex, v3 uintptr
var i, szName, v2 int32
_, _, _, _, _, _, _, _ = apNew, i, p, pFile, pVfsMutex, szName, v2, v3
pFile = pFd
p = uintptr(0)
_ = pVfs
libc.Xmemset(tls, pFile, 0, uint64(24))
szName = _sqlite3Strlen30(tls, zName)
if szName > int32(1) && (int32(*(*int8)(unsafe.Pointer(zName))) == int32('/') || int32(*(*int8)(unsafe.Pointer(zName))) == int32('\\')) {
pVfsMutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_VFS1))
Xsqlite3_mutex_enter(tls, pVfsMutex)
i = 0
for {
if !(i < _memdb_g.FnMemStore) {
break
}
if libc.Xstrcmp(tls, (*TMemStore)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(_memdb_g.FapMemStore + uintptr(i)*8)))).FzFName, zName) == 0 {
p = *(*uintptr)(unsafe.Pointer(_memdb_g.FapMemStore + uintptr(i)*8))
break
}
goto _1
_1:
;
i++
}
if p == uintptr(0) {
p = _sqlite3Malloc(tls, uint64(72)+uint64(szName)+uint64(3))
if p == uintptr(0) {
Xsqlite3_mutex_leave(tls, pVfsMutex)
return int32(SQLITE_NOMEM)
}
apNew = _sqlite3Realloc(tls, _memdb_g.FapMemStore, uint64(8)*uint64(_memdb_g.FnMemStore+libc.Int32FromInt32(1)))
if apNew == uintptr(0) {
Xsqlite3_free(tls, p)
Xsqlite3_mutex_leave(tls, pVfsMutex)
return int32(SQLITE_NOMEM)
}
v3 = uintptr(unsafe.Pointer(&_memdb_g))
v2 = *(*int32)(unsafe.Pointer(v3))
*(*int32)(unsafe.Pointer(v3))++
*(*uintptr)(unsafe.Pointer(apNew + uintptr(v2)*8)) = p
_memdb_g.FapMemStore = apNew
libc.Xmemset(tls, p, 0, uint64(72))
(*TMemStore)(unsafe.Pointer(p)).FmFlags = uint32(libc.Int32FromInt32(SQLITE_DESERIALIZE_RESIZEABLE) | libc.Int32FromInt32(SQLITE_DESERIALIZE_FREEONCLOSE))
(*TMemStore)(unsafe.Pointer(p)).FszMax = _sqlite3Config.FmxMemdbSize
(*TMemStore)(unsafe.Pointer(p)).FzFName = p + 1*72
libc.Xmemcpy(tls, (*TMemStore)(unsafe.Pointer(p)).FzFName, zName, uint64(szName+int32(1)))
(*TMemStore)(unsafe.Pointer(p)).FpMutex = Xsqlite3_mutex_alloc(tls, SQLITE_MUTEX_FAST)
if (*TMemStore)(unsafe.Pointer(p)).FpMutex == uintptr(0) {
_memdb_g.FnMemStore--
Xsqlite3_free(tls, p)
Xsqlite3_mutex_leave(tls, pVfsMutex)
return int32(SQLITE_NOMEM)
}
(*TMemStore)(unsafe.Pointer(p)).FnRef = int32(1)
_memdbEnter(tls, p)
} else {
_memdbEnter(tls, p)
(*TMemStore)(unsafe.Pointer(p)).FnRef++
}
Xsqlite3_mutex_leave(tls, pVfsMutex)
} else {
p = _sqlite3Malloc(tls, uint64(72))
if p == uintptr(0) {
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, p, 0, uint64(72))
(*TMemStore)(unsafe.Pointer(p)).FmFlags = uint32(libc.Int32FromInt32(SQLITE_DESERIALIZE_RESIZEABLE) | libc.Int32FromInt32(SQLITE_DESERIALIZE_FREEONCLOSE))
(*TMemStore)(unsafe.Pointer(p)).FszMax = _sqlite3Config.FmxMemdbSize
}
(*TMemFile)(unsafe.Pointer(pFile)).FpStore = p
if pOutFlags != uintptr(0) {
*(*int32)(unsafe.Pointer(pOutFlags)) = flags | int32(SQLITE_OPEN_MEMORY)
}
(*Tsqlite3_file)(unsafe.Pointer(pFd)).FpMethods = uintptr(unsafe.Pointer(&_memdb_io_methods))
_memdbLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Test for access permissions. Return true if the requested permission
// ** is available, or false otherwise.
// **
// ** With memdb, no files ever exist on disk. So always return false.
// */
func _memdbAccess(tls *libc.TLS, pVfs uintptr, zPath uintptr, flags int32, pResOut uintptr) (r int32) {
_ = pVfs
_ = zPath
_ = flags
*(*int32)(unsafe.Pointer(pResOut)) = 0
return SQLITE_OK
}
// C documentation
//
// /*
// ** Populate buffer zOut with the full canonical pathname corresponding
// ** to the pathname in zPath. zOut is guaranteed to point to a buffer
// ** of at least (INST_MAX_PATHNAME+1) bytes.
// */
func _memdbFullPathname(tls *libc.TLS, pVfs uintptr, zPath uintptr, nOut int32, zOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
_ = pVfs
Xsqlite3_snprintf(tls, nOut, zOut, __ccgo_ts+3797, libc.VaList(bp+8, zPath))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Open the dynamic library located at zPath and return a handle.
// */
func _memdbDlOpen(tls *libc.TLS, pVfs uintptr, zPath uintptr) (r uintptr) {
return (*(*func(*libc.TLS, uintptr, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxDlOpen})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, zPath)
}
// C documentation
//
// /*
// ** Populate the buffer zErrMsg (size nByte bytes) with a human readable
// ** utf-8 string describing the most recent error encountered associated
// ** with dynamic libraries.
// */
func _memdbDlError(tls *libc.TLS, pVfs uintptr, nByte int32, zErrMsg uintptr) {
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxDlError})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, nByte, zErrMsg)
}
// C documentation
//
// /*
// ** Return a pointer to the symbol zSymbol in the dynamic library pHandle.
// */
func _memdbDlSym(tls *libc.TLS, pVfs uintptr, p uintptr, zSym uintptr) (r uintptr) {
return (*(*func(*libc.TLS, uintptr, uintptr, uintptr) uintptr)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxDlSym})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, p, zSym)
}
// C documentation
//
// /*
// ** Close the dynamic library handle pHandle.
// */
func _memdbDlClose(tls *libc.TLS, pVfs uintptr, pHandle uintptr) {
(*(*func(*libc.TLS, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxDlClose})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, pHandle)
}
// C documentation
//
// /*
// ** Populate the buffer pointed to by zBufOut with nByte bytes of
// ** random data.
// */
func _memdbRandomness(tls *libc.TLS, pVfs uintptr, nByte int32, zBufOut uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxRandomness})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, nByte, zBufOut)
}
// C documentation
//
// /*
// ** Sleep for nMicro microseconds. Return the number of microseconds
// ** actually slept.
// */
func _memdbSleep(tls *libc.TLS, pVfs uintptr, nMicro int32) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxSleep})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, nMicro)
}
func _memdbGetLastError(tls *libc.TLS, pVfs uintptr, a int32, b uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxGetLastError})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, a, b)
}
func _memdbCurrentTimeInt64(tls *libc.TLS, pVfs uintptr, p uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_vfs)(unsafe.Pointer((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData)).FxCurrentTimeInt64})))(tls, (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FpAppData, p)
}
// C documentation
//
// /*
// ** Translate a database connection pointer and schema name into a
// ** MemFile pointer.
// */
func _memdbFromDbSchema(tls *libc.TLS, db uintptr, zSchema uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pStore uintptr
var rc int32
var _ /* p at bp+0 */ uintptr
_, _ = pStore, rc
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
rc = Xsqlite3_file_control(tls, db, zSchema, int32(SQLITE_FCNTL_FILE_POINTER), bp)
if rc != 0 {
return uintptr(0)
}
if (*TMemFile)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fbase.FpMethods != uintptr(unsafe.Pointer(&_memdb_io_methods)) {
return uintptr(0)
}
pStore = (*TMemFile)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpStore
_memdbEnter(tls, pStore)
if (*TMemStore)(unsafe.Pointer(pStore)).FzFName != uintptr(0) {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
}
_memdbLeave(tls, pStore)
return *(*uintptr)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Return the serialization of a database
// */
func Xsqlite3_serialize(tls *libc.TLS, db uintptr, zSchema uintptr, piSize uintptr, mFlags uint32) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iDb, nPage, pgno, rc, szPage, v1 int32
var p, pBt, pOut, pPager, pStore, pTo, zSql uintptr
var sz Tsqlite3_int64
var _ /* pPage at bp+8 */ uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = iDb, nPage, p, pBt, pOut, pPager, pStore, pTo, pgno, rc, sz, szPage, zSql, v1
szPage = 0
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
if zSchema == uintptr(0) {
zSchema = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FzDbSName
}
p = _memdbFromDbSchema(tls, db, zSchema)
iDb = _sqlite3FindDbName(tls, db, zSchema)
if piSize != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(piSize)) = int64(-int32(1))
}
if iDb < 0 {
return uintptr(0)
}
if p != 0 {
pStore = (*TMemFile)(unsafe.Pointer(p)).FpStore
if piSize != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(piSize)) = (*TMemStore)(unsafe.Pointer(pStore)).Fsz
}
if mFlags&uint32(SQLITE_SERIALIZE_NOCOPY) != 0 {
pOut = (*TMemStore)(unsafe.Pointer(pStore)).FaData
} else {
pOut = Xsqlite3_malloc64(tls, uint64((*TMemStore)(unsafe.Pointer(pStore)).Fsz))
if pOut != 0 {
libc.Xmemcpy(tls, pOut, (*TMemStore)(unsafe.Pointer(pStore)).FaData, uint64((*TMemStore)(unsafe.Pointer(pStore)).Fsz))
}
}
return pOut
}
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32))).FpBt
if pBt == uintptr(0) {
return uintptr(0)
}
szPage = _sqlite3BtreeGetPageSize(tls, pBt)
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+3990, libc.VaList(bp+24, zSchema))
if zSql != 0 {
v1 = Xsqlite3_prepare_v2(tls, db, zSql, -int32(1), bp, uintptr(0))
} else {
v1 = int32(SQLITE_NOMEM)
}
rc = v1
Xsqlite3_free(tls, zSql)
if rc != 0 {
return uintptr(0)
}
rc = Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc != int32(SQLITE_ROW) {
pOut = uintptr(0)
} else {
sz = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) * int64(szPage)
if sz == 0 {
Xsqlite3_reset(tls, *(*uintptr)(unsafe.Pointer(bp)))
Xsqlite3_exec(tls, db, __ccgo_ts+4013, uintptr(0), uintptr(0), uintptr(0))
rc = Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == int32(SQLITE_ROW) {
sz = Xsqlite3_column_int64(tls, *(*uintptr)(unsafe.Pointer(bp)), 0) * int64(szPage)
}
}
if piSize != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(piSize)) = sz
}
if mFlags&uint32(SQLITE_SERIALIZE_NOCOPY) != 0 {
pOut = uintptr(0)
} else {
pOut = Xsqlite3_malloc64(tls, uint64(sz))
if pOut != 0 {
nPage = Xsqlite3_column_int(tls, *(*uintptr)(unsafe.Pointer(bp)), 0)
pPager = _sqlite3BtreePager(tls, pBt)
pgno = int32(1)
for {
if !(pgno <= nPage) {
break
}
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
pTo = pOut + uintptr(int64(szPage)*int64(pgno-libc.Int32FromInt32(1)))
rc = _sqlite3PagerGet(tls, pPager, uint32(pgno), bp+8, 0)
if rc == SQLITE_OK {
libc.Xmemcpy(tls, pTo, _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp + 8))), uint64(szPage))
} else {
libc.Xmemset(tls, pTo, 0, uint64(szPage))
}
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
goto _2
_2:
;
pgno++
}
}
}
}
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
return pOut
}
// C documentation
//
// /* Convert zSchema to a MemDB and initialize its content.
// */
func Xsqlite3_deserialize(tls *libc.TLS, db uintptr, zSchema uintptr, pData uintptr, szDb Tsqlite3_int64, szBuf Tsqlite3_int64, mFlags uint32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iDb, rc int32
var p, pStore, zSql uintptr
var _ /* pStmt at bp+0 */ uintptr
_, _, _, _, _ = iDb, p, pStore, rc, zSql
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
if zSchema == uintptr(0) {
zSchema = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FzDbSName
}
iDb = _sqlite3FindDbName(tls, db, zSchema)
if iDb < int32(2) && iDb != 0 {
rc = int32(SQLITE_ERROR)
goto end_deserialize
}
zSql = Xsqlite3_mprintf(tls, __ccgo_ts+4038, libc.VaList(bp+16, zSchema))
if zSql == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
rc = Xsqlite3_prepare_v2(tls, db, zSql, -int32(1), bp, uintptr(0))
Xsqlite3_free(tls, zSql)
}
if rc != 0 {
goto end_deserialize
}
(*Tsqlite3)(unsafe.Pointer(db)).Finit1.FiDb = uint8(iDb)
libc.SetBitFieldPtr8Uint32(db+192+8, libc.Uint32FromInt32(1), 2, 0x4)
rc = Xsqlite3_step(tls, *(*uintptr)(unsafe.Pointer(bp)))
libc.SetBitFieldPtr8Uint32(db+192+8, libc.Uint32FromInt32(0), 2, 0x4)
if rc != int32(SQLITE_DONE) {
rc = int32(SQLITE_ERROR)
goto end_deserialize
}
p = _memdbFromDbSchema(tls, db, zSchema)
if p == uintptr(0) {
rc = int32(SQLITE_ERROR)
} else {
pStore = (*TMemFile)(unsafe.Pointer(p)).FpStore
(*TMemStore)(unsafe.Pointer(pStore)).FaData = pData
pData = uintptr(0)
(*TMemStore)(unsafe.Pointer(pStore)).Fsz = szDb
(*TMemStore)(unsafe.Pointer(pStore)).FszAlloc = szBuf
(*TMemStore)(unsafe.Pointer(pStore)).FszMax = szBuf
if (*TMemStore)(unsafe.Pointer(pStore)).FszMax < _sqlite3Config.FmxMemdbSize {
(*TMemStore)(unsafe.Pointer(pStore)).FszMax = _sqlite3Config.FmxMemdbSize
}
(*TMemStore)(unsafe.Pointer(pStore)).FmFlags = mFlags
rc = SQLITE_OK
}
goto end_deserialize
end_deserialize:
;
Xsqlite3_finalize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if pData != 0 && mFlags&uint32(SQLITE_DESERIALIZE_FREEONCLOSE) != uint32(0) {
Xsqlite3_free(tls, pData)
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
// C documentation
//
// /*
// ** Return true if the VFS is the memvfs.
// */
func _sqlite3IsMemdb(tls *libc.TLS, pVfs uintptr) (r int32) {
return libc.BoolInt32(pVfs == uintptr(unsafe.Pointer(&_memdb_vfs)))
}
// C documentation
//
// /*
// ** This routine is called when the extension is loaded.
// ** Register the new VFS.
// */
func _sqlite3MemdbInit(tls *libc.TLS) (r int32) {
var pLower uintptr
var sz uint32
_, _ = pLower, sz
pLower = Xsqlite3_vfs_find(tls, uintptr(0))
if pLower == uintptr(0) {
return int32(SQLITE_ERROR)
}
sz = uint32((*Tsqlite3_vfs)(unsafe.Pointer(pLower)).FszOsFile)
_memdb_vfs.FpAppData = pLower
/* The following conditional can only be true when compiled for
** Windows x86 and SQLITE_MAX_MMAP_SIZE=0. We always leave
** it in, to be safe, but it is marked as NO_TEST since there
** is no way to reach it under most builds. */
if uint64(sz) < uint64(24) {
sz = uint32(24)
} /*NO_TEST*/
_memdb_vfs.FszOsFile = int32(sz)
return Xsqlite3_vfs_register(tls, uintptr(unsafe.Pointer(&_memdb_vfs)), 0)
}
/************** End of memdb.c ***********************************************/
/************** Begin file bitvec.c ******************************************/
/*
** 2008 February 16
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file implements an object that represents a fixed-length
** bitmap. Bits are numbered starting with 1.
**
** A bitmap is used to record which pages of a database file have been
** journalled during a transaction, or which pages have the "dont-write"
** property. Usually only a few pages are meet either condition.
** So the bitmap is usually sparse and has low cardinality.
** But sometimes (for example when during a DROP of a large table) most
** or all of the pages in a database can get journalled. In those cases,
** the bitmap becomes dense with high cardinality. The algorithm needs
** to handle both cases well.
**
** The size of the bitmap is fixed when the object is created.
**
** All bits are clear when the bitmap is created. Individual bits
** may be set or cleared one at a time.
**
** Test operations are about 100 times more common that set operations.
** Clear operations are exceedingly rare. There are usually between
** 5 and 500 set operations per Bitvec object, though the number of sets can
** sometimes grow into tens of thousands or larger. The size of the
** Bitvec object is the number of pages in the database file at the
** start of a transaction, and is thus usually less than a few thousand,
** but can be as large as 2 billion for a really big database.
*/
/* #include "sqliteInt.h" */
/* Size of the Bitvec structure in bytes. */
/* Round the union size down to the nearest pointer boundary, since that's how
** it will be aligned within the Bitvec struct. */
/* Type of the array "element" for the bitmap representation.
** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE.
** Setting this to the "natural word" size of your CPU may improve
** performance. */
/* Size, in bits, of the bitmap element. */
/* Number of elements in a bitmap array. */
/* Number of bits in the bitmap array. */
/* Number of u32 values in hash table. */
/* Maximum number of entries in hash table before
** sub-dividing and re-hashing. */
/* Hashing function for the aHash representation.
** Empirical testing showed that the *37 multiplier
** (an arbitrary prime)in the hash function provided
** no fewer collisions than the no-op *1. */
/*
** A bitmap is an instance of the following structure.
**
** This bitmap records the existence of zero or more bits
** with values between 1 and iSize, inclusive.
**
** There are three possible representations of the bitmap.
** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight
** bitmap. The least significant bit is bit 1.
**
** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is
** a hash table that will hold up to BITVEC_MXHASH distinct values.
**
** Otherwise, the value i is redirected into one of BITVEC_NPTR
** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap
** handles up to iDivisor separate values of i. apSub[0] holds
** values between 1 and iDivisor. apSub[1] holds values between
** iDivisor+1 and 2*iDivisor. apSub[N] holds values between
** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized
** to hold deal with values between 1 and iDivisor.
*/
type TBitvec1 = struct {
FiSize Tu32
FnSet Tu32
FiDivisor Tu32
Fu struct {
FaHash [0][124]Tu32
FapSub [0][62]uintptr
FaBitmap [496]Tu8
}
}
type Bitvec1 = TBitvec1
// C documentation
//
// /*
// ** Create a new bitmap object able to handle bits between 0 and iSize,
// ** inclusive. Return a pointer to the new object. Return NULL if
// ** malloc fails.
// */
func _sqlite3BitvecCreate(tls *libc.TLS, iSize Tu32) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3MallocZero(tls, uint64(512))
if p != 0 {
(*TBitvec)(unsafe.Pointer(p)).FiSize = iSize
}
return p
}
// C documentation
//
// /*
// ** Check to see if the i-th bit is set. Return true or false.
// ** If p is NULL (if the bitmap has not been created) or if
// ** i is out of range, then return false.
// */
func _sqlite3BitvecTestNotNull(tls *libc.TLS, p uintptr, i Tu32) (r int32) {
var bin, h, v1 Tu32
_, _, _ = bin, h, v1
i--
if i >= (*TBitvec)(unsafe.Pointer(p)).FiSize {
return 0
}
for (*TBitvec)(unsafe.Pointer(p)).FiDivisor != 0 {
bin = i / (*TBitvec)(unsafe.Pointer(p)).FiDivisor
i = i % (*TBitvec)(unsafe.Pointer(p)).FiDivisor
p = *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8))
if !(p != 0) {
return 0
}
}
if uint64((*TBitvec)(unsafe.Pointer(p)).FiSize) <= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(1)*libc.Uint64FromInt32(BITVEC_SZELEM) {
return libc.BoolInt32(int32(*(*Tu8)(unsafe.Pointer(p + 16 + uintptr(i/uint32(BITVEC_SZELEM)))))&(int32(1)<<(i&uint32(libc.Int32FromInt32(BITVEC_SZELEM)-libc.Int32FromInt32(1)))) != 0)
} else {
v1 = i
i++
h = uint32(uint64(v1*libc.Uint32FromInt32(1)) % ((libc.Uint64FromInt32(BITVEC_SZ) - libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4)) / libc.Uint64FromInt64(8) * libc.Uint64FromInt64(8) / libc.Uint64FromInt64(4)))
for *(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) != 0 {
if *(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) == i {
return int32(1)
}
h = uint32(uint64(h+libc.Uint32FromInt32(1)) % ((libc.Uint64FromInt32(BITVEC_SZ) - libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4)) / libc.Uint64FromInt64(8) * libc.Uint64FromInt64(8) / libc.Uint64FromInt64(4)))
}
return 0
}
return r
}
func _sqlite3BitvecTest(tls *libc.TLS, p uintptr, i Tu32) (r int32) {
return libc.BoolInt32(p != uintptr(0) && _sqlite3BitvecTestNotNull(tls, p, i) != 0)
}
// C documentation
//
// /*
// ** Set the i-th bit. Return 0 on success and an error code if
// ** anything goes wrong.
// **
// ** This routine might cause sub-bitmaps to be allocated. Failing
// ** to get the memory needed to hold the sub-bitmap is the only
// ** that can go wrong with an insert, assuming p and i are valid.
// **
// ** The calling function must ensure that p is a valid Bitvec object
// ** and that the value for "i" is within range of the Bitvec object.
// ** Otherwise the behavior is undefined.
// */
func _sqlite3BitvecSet(tls *libc.TLS, p uintptr, i Tu32) (r int32) {
var aiValues, p1 uintptr
var bin, h, v2 Tu32
var j uint32
var rc int32
_, _, _, _, _, _, _ = aiValues, bin, h, j, rc, v2, p1
if p == uintptr(0) {
return SQLITE_OK
}
i--
for uint64((*TBitvec)(unsafe.Pointer(p)).FiSize) > (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(1)*libc.Uint64FromInt32(BITVEC_SZELEM) && (*TBitvec)(unsafe.Pointer(p)).FiDivisor != 0 {
bin = i / (*TBitvec)(unsafe.Pointer(p)).FiDivisor
i = i % (*TBitvec)(unsafe.Pointer(p)).FiDivisor
if *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8)) == uintptr(0) {
*(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8)) = _sqlite3BitvecCreate(tls, (*TBitvec)(unsafe.Pointer(p)).FiDivisor)
if *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8)) == uintptr(0) {
return int32(SQLITE_NOMEM)
}
}
p = *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8))
}
if uint64((*TBitvec)(unsafe.Pointer(p)).FiSize) <= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(1)*libc.Uint64FromInt32(BITVEC_SZELEM) {
p1 = p + 16 + uintptr(i/uint32(BITVEC_SZELEM))
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(1)<<(i&uint32(libc.Int32FromInt32(BITVEC_SZELEM)-libc.Int32FromInt32(1))))
return SQLITE_OK
}
v2 = i
i++
h = uint32(uint64(v2*libc.Uint32FromInt32(1)) % ((libc.Uint64FromInt32(BITVEC_SZ) - libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4)) / libc.Uint64FromInt64(8) * libc.Uint64FromInt64(8) / libc.Uint64FromInt64(4)))
/* if there wasn't a hash collision, and this doesn't */
/* completely fill the hash, then just add it without */
/* worrying about sub-dividing and re-hashing. */
if !(*(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) != 0) {
if uint64((*TBitvec)(unsafe.Pointer(p)).FnSet) < (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4)-libc.Uint64FromInt32(1) {
goto bitvec_set_end
} else {
goto bitvec_set_rehash
}
}
/* there was a collision, check to see if it's already */
/* in hash, if not, try to find a spot for it */
for cond := true; cond; cond = *(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) != 0 {
if *(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) == i {
return SQLITE_OK
}
h++
if uint64(h) >= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4) {
h = uint32(0)
}
}
/* we didn't find it in the hash. h points to the first */
/* available free spot. check to see if this is going to */
/* make our hash too "full". */
goto bitvec_set_rehash
bitvec_set_rehash:
;
if uint64((*TBitvec)(unsafe.Pointer(p)).FnSet) >= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4)/libc.Uint64FromInt32(2) {
aiValues = _sqlite3DbMallocRaw(tls, uintptr(0), uint64(496))
if aiValues == uintptr(0) {
return int32(SQLITE_NOMEM)
} else {
libc.Xmemcpy(tls, aiValues, p+16, uint64(496))
libc.Xmemset(tls, p+16, 0, uint64(496))
(*TBitvec)(unsafe.Pointer(p)).FiDivisor = uint32((uint64((*TBitvec)(unsafe.Pointer(p)).FiSize) + (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(8) - uint64(1)) / ((libc.Uint64FromInt32(BITVEC_SZ) - libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4)) / libc.Uint64FromInt64(8) * libc.Uint64FromInt64(8) / libc.Uint64FromInt64(8)))
rc = _sqlite3BitvecSet(tls, p, i)
j = uint32(0)
for {
if !(uint64(j) < (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4)) {
break
}
if *(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4)) != 0 {
rc |= _sqlite3BitvecSet(tls, p, *(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4)))
}
goto _3
_3:
;
j++
}
_sqlite3DbFree(tls, uintptr(0), aiValues)
return rc
}
}
goto bitvec_set_end
bitvec_set_end:
;
(*TBitvec)(unsafe.Pointer(p)).FnSet++
*(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) = i
return SQLITE_OK
}
// C documentation
//
// /*
// ** Clear the i-th bit.
// **
// ** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage
// ** that BitvecClear can use to rebuilt its hash table.
// */
func _sqlite3BitvecClear(tls *libc.TLS, p uintptr, i Tu32, pBuf uintptr) {
var aiValues, p1 uintptr
var bin, h Tu32
var j uint32
_, _, _, _, _ = aiValues, bin, h, j, p1
if p == uintptr(0) {
return
}
i--
for (*TBitvec)(unsafe.Pointer(p)).FiDivisor != 0 {
bin = i / (*TBitvec)(unsafe.Pointer(p)).FiDivisor
i = i % (*TBitvec)(unsafe.Pointer(p)).FiDivisor
p = *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(bin)*8))
if !(p != 0) {
return
}
}
if uint64((*TBitvec)(unsafe.Pointer(p)).FiSize) <= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(1)*libc.Uint64FromInt32(BITVEC_SZELEM) {
p1 = p + 16 + uintptr(i/uint32(BITVEC_SZELEM))
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(1) << (i & uint32(libc.Int32FromInt32(BITVEC_SZELEM)-libc.Int32FromInt32(1)))))
} else {
aiValues = pBuf
libc.Xmemcpy(tls, aiValues, p+16, uint64(496))
libc.Xmemset(tls, p+16, 0, uint64(496))
(*TBitvec)(unsafe.Pointer(p)).FnSet = uint32(0)
j = uint32(0)
for {
if !(uint64(j) < (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4)) {
break
}
if *(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4)) != 0 && *(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4)) != i+uint32(1) {
h = uint32(uint64((*(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4))-libc.Uint32FromInt32(1))*libc.Uint32FromInt32(1)) % ((libc.Uint64FromInt32(BITVEC_SZ) - libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4)) / libc.Uint64FromInt64(8) * libc.Uint64FromInt64(8) / libc.Uint64FromInt64(4)))
(*TBitvec)(unsafe.Pointer(p)).FnSet++
for *(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) != 0 {
h++
if uint64(h) >= (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(4) {
h = uint32(0)
}
}
*(*Tu32)(unsafe.Pointer(p + 16 + uintptr(h)*4)) = *(*Tu32)(unsafe.Pointer(aiValues + uintptr(j)*4))
}
goto _2
_2:
;
j++
}
}
}
// C documentation
//
// /*
// ** Destroy a bitmap object. Reclaim all memory used.
// */
func _sqlite3BitvecDestroy(tls *libc.TLS, p uintptr) {
var i uint32
_ = i
if p == uintptr(0) {
return
}
if (*TBitvec)(unsafe.Pointer(p)).FiDivisor != 0 {
i = uint32(0)
for {
if !(uint64(i) < (libc.Uint64FromInt32(BITVEC_SZ)-libc.Uint64FromInt32(3)*libc.Uint64FromInt64(4))/libc.Uint64FromInt64(8)*libc.Uint64FromInt64(8)/libc.Uint64FromInt64(8)) {
break
}
_sqlite3BitvecDestroy(tls, *(*uintptr)(unsafe.Pointer(p + 16 + uintptr(i)*8)))
goto _1
_1:
;
i++
}
}
Xsqlite3_free(tls, p)
}
// C documentation
//
// /*
// ** Return the value of the iSize parameter specified when Bitvec *p
// ** was created.
// */
func _sqlite3BitvecSize(tls *libc.TLS, p uintptr) (r Tu32) {
return (*TBitvec)(unsafe.Pointer(p)).FiSize
}
/*
** Let V[] be an array of unsigned characters sufficient to hold
** up to N bits. Let I be an integer between 0 and N. 0<=I 0 {
nx = 0
}
pc += nx
*(*int32)(unsafe.Pointer(bp)) = *(*int32)(unsafe.Pointer(bp)) & int32(0x7fffffff) % sz
if op&int32(1) != 0 {
p5 = pV + uintptr((*(*int32)(unsafe.Pointer(bp))+int32(1))>>int32(3))
*(*uint8)(unsafe.Pointer(p5)) = uint8(int32(*(*uint8)(unsafe.Pointer(p5))) | libc.Int32FromInt32(1)<<((*(*int32)(unsafe.Pointer(bp))+libc.Int32FromInt32(1))&libc.Int32FromInt32(7)))
if op != int32(5) {
if _sqlite3BitvecSet(tls, pBitvec, uint32(*(*int32)(unsafe.Pointer(bp))+int32(1))) != 0 {
goto bitvec_end
}
}
} else {
p6 = pV + uintptr((*(*int32)(unsafe.Pointer(bp))+int32(1))>>int32(3))
*(*uint8)(unsafe.Pointer(p6)) = uint8(int32(*(*uint8)(unsafe.Pointer(p6))) & ^(libc.Int32FromInt32(1) << ((*(*int32)(unsafe.Pointer(bp)) + libc.Int32FromInt32(1)) & libc.Int32FromInt32(7))))
_sqlite3BitvecClear(tls, pBitvec, uint32(*(*int32)(unsafe.Pointer(bp))+int32(1)), pTmpSpace)
}
}
/* Test to make sure the linear array exactly matches the
** Bitvec object. Start with the assumption that they do
** match (rc==0). Change rc to non-zero if a discrepancy
** is found.
*/
rc = int32(uint32(_sqlite3BitvecTest(tls, uintptr(0), uint32(0))+_sqlite3BitvecTest(tls, pBitvec, uint32(sz+int32(1)))+_sqlite3BitvecTest(tls, pBitvec, uint32(0))) + (_sqlite3BitvecSize(tls, pBitvec) - uint32(sz)))
*(*int32)(unsafe.Pointer(bp)) = int32(1)
for {
if !(*(*int32)(unsafe.Pointer(bp)) <= sz) {
break
}
if libc.BoolInt32(int32(*(*uint8)(unsafe.Pointer(pV + uintptr(*(*int32)(unsafe.Pointer(bp))>>int32(3)))))&(int32(1)<<(*(*int32)(unsafe.Pointer(bp))&int32(7))) != 0) != _sqlite3BitvecTest(tls, pBitvec, uint32(*(*int32)(unsafe.Pointer(bp)))) {
rc = *(*int32)(unsafe.Pointer(bp))
break
}
goto _7
_7:
;
*(*int32)(unsafe.Pointer(bp))++
}
/* Free allocated structure */
goto bitvec_end
bitvec_end:
;
Xsqlite3_free(tls, pTmpSpace)
Xsqlite3_free(tls, pV)
_sqlite3BitvecDestroy(tls, pBitvec)
return rc
}
/************** End of bitvec.c **********************************************/
/************** Begin file pcache.c ******************************************/
/*
** 2008 August 05
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file implements that page cache.
*/
/* #include "sqliteInt.h" */
/*
** A complete page cache is an instance of this structure. Every
** entry in the cache holds a single page of the database file. The
** btree layer only operates on the cached copy of the database pages.
**
** A page cache entry is "clean" if it exactly matches what is currently
** on disk. A page is "dirty" if it has been modified and needs to be
** persisted to disk.
**
** pDirty, pDirtyTail, pSynced:
** All dirty pages are linked into the doubly linked list using
** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order
** such that p was added to the list more recently than p->pDirtyNext.
** PCache.pDirty points to the first (newest) element in the list and
** pDirtyTail to the last (oldest).
**
** The PCache.pSynced variable is used to optimize searching for a dirty
** page to eject from the cache mid-transaction. It is better to eject
** a page that does not require a journal sync than one that does.
** Therefore, pSynced is maintained so that it *almost* always points
** to either the oldest page in the pDirty/pDirtyTail list that has a
** clear PGHDR_NEED_SYNC flag or to a page that is older than this one
** (so that the right page to eject can be found by following pDirtyPrev
** pointers).
*/
type TPCache2 = struct {
FpDirty uintptr
FpDirtyTail uintptr
FpSynced uintptr
FnRefSum Ti64
FszCache int32
FszSpill int32
FszPage int32
FszExtra int32
FbPurgeable Tu8
FeCreate Tu8
FxStress uintptr
FpStress uintptr
FpCache uintptr
}
type PCache2 = TPCache2
/********************************** Test and Debug Logic **********************/
/*
** Debug tracing macros. Enable by by changing the "0" to "1" and
** recompiling.
**
** When sqlite3PcacheTrace is 1, single line trace messages are issued.
** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries
** is displayed for many operations, resulting in a lot of output.
*/
/*
** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
** This routine runs inside of assert() statements only.
*/
/*
** Check invariants on a PgHdr entry. Return true if everything is OK.
** Return false if any invariant is violated.
**
** This routine is for use inside of assert() statements only. For
** example:
**
** assert( sqlite3PcachePageSanity(pPg) );
*/
/********************************** Linked List Management ********************/
/* Allowed values for second argument to pcacheManageDirtyList() */
// C documentation
//
// /*
// ** Manage pPage's participation on the dirty list. Bits of the addRemove
// ** argument determines what operation to do. The 0x01 bit means first
// ** remove pPage from the dirty list. The 0x02 means add pPage back to
// ** the dirty list. Doing both moves pPage to the front of the dirty list.
// */
func _pcacheManageDirtyList(tls *libc.TLS, pPage uintptr, addRemove Tu8) {
var p uintptr
_ = p
p = (*TPgHdr)(unsafe.Pointer(pPage)).FpCache
if int32(addRemove)&int32(PCACHE_DIRTYLIST_REMOVE) != 0 {
/* Update the PCache1.pSynced variable if necessary. */
if (*TPCache)(unsafe.Pointer(p)).FpSynced == pPage {
(*TPCache)(unsafe.Pointer(p)).FpSynced = (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev
}
if (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext != 0 {
(*TPgHdr)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext)).FpDirtyPrev = (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev
} else {
(*TPCache)(unsafe.Pointer(p)).FpDirtyTail = (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev
}
if (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev != 0 {
(*TPgHdr)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev)).FpDirtyNext = (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext
} else {
/* If there are now no dirty pages in the cache, set eCreate to 2.
** This is an optimization that allows sqlite3PcacheFetch() to skip
** searching for a dirty page to eject from the cache when it might
** otherwise have to. */
(*TPCache)(unsafe.Pointer(p)).FpDirty = (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext
if (*TPCache)(unsafe.Pointer(p)).FpDirty == uintptr(0) { /*OPTIMIZATION-IF-TRUE*/
(*TPCache)(unsafe.Pointer(p)).FeCreate = uint8(2)
}
}
}
if int32(addRemove)&int32(PCACHE_DIRTYLIST_ADD) != 0 {
(*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyPrev = uintptr(0)
(*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext = (*TPCache)(unsafe.Pointer(p)).FpDirty
if (*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext != 0 {
(*TPgHdr)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(pPage)).FpDirtyNext)).FpDirtyPrev = pPage
} else {
(*TPCache)(unsafe.Pointer(p)).FpDirtyTail = pPage
if (*TPCache)(unsafe.Pointer(p)).FbPurgeable != 0 {
(*TPCache)(unsafe.Pointer(p)).FeCreate = uint8(1)
}
}
(*TPCache)(unsafe.Pointer(p)).FpDirty = pPage
/* If pSynced is NULL and this page has a clear NEED_SYNC flag, set
** pSynced to point to it. Checking the NEED_SYNC flag is an
** optimization, as if pSynced points to a page with the NEED_SYNC
** flag set sqlite3PcacheFetchStress() searches through all newer
** entries of the dirty-list for a page with NEED_SYNC clear anyway. */
if !((*TPCache)(unsafe.Pointer(p)).FpSynced != 0) && 0 == int32((*TPgHdr)(unsafe.Pointer(pPage)).Fflags)&int32(PGHDR_NEED_SYNC) {
(*TPCache)(unsafe.Pointer(p)).FpSynced = pPage
}
}
}
// C documentation
//
// /*
// ** Wrapper around the pluggable caches xUnpin method. If the cache is
// ** being used for an in-memory database, this function is a no-op.
// */
func _pcacheUnpin(tls *libc.TLS, p uintptr) {
if (*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FbPurgeable != 0 {
(*(*func(*libc.TLS, uintptr, uintptr, int32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxUnpin})))(tls, (*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FpCache, (*TPgHdr)(unsafe.Pointer(p)).FpPage, 0)
}
}
// C documentation
//
// /*
// ** Compute the number of pages of cache requested. p->szCache is the
// ** cache size requested by the "PRAGMA cache_size" statement.
// */
func _numberOfCachePages(tls *libc.TLS, p uintptr) (r int32) {
var n Ti64
_ = n
if (*TPCache)(unsafe.Pointer(p)).FszCache >= 0 {
/* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
** suggested cache size is set to N. */
return (*TPCache)(unsafe.Pointer(p)).FszCache
} else {
/* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the
** number of cache pages is adjusted to be a number of pages that would
** use approximately abs(N*1024) bytes of memory based on the current
** page size. */
n = int64(-libc.Int32FromInt32(1024)) * int64((*TPCache)(unsafe.Pointer(p)).FszCache) / int64((*TPCache)(unsafe.Pointer(p)).FszPage+(*TPCache)(unsafe.Pointer(p)).FszExtra)
if n > int64(1000000000) {
n = int64(1000000000)
}
return int32(n)
}
return r
}
// C documentation
//
// /*************************************************** General Interfaces ******
// **
// ** Initialize and shutdown the page cache subsystem. Neither of these
// ** functions are threadsafe.
// */
func _sqlite3PcacheInitialize(tls *libc.TLS) (r int32) {
if _sqlite3Config.Fpcache2.FxInit == uintptr(0) {
/* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the
** built-in default page cache is used instead of the application defined
** page cache. */
_sqlite3PCacheSetDefault(tls)
}
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxInit})))(tls, _sqlite3Config.Fpcache2.FpArg)
}
func _sqlite3PcacheShutdown(tls *libc.TLS) {
if _sqlite3Config.Fpcache2.FxShutdown != 0 {
/* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxShutdown})))(tls, _sqlite3Config.Fpcache2.FpArg)
}
}
// C documentation
//
// /*
// ** Return the size in bytes of a PCache object.
// */
func _sqlite3PcacheSize(tls *libc.TLS) (r int32) {
return int32(80)
}
// C documentation
//
// /*
// ** Create a new PCache object. Storage space to hold the object
// ** has already been allocated and is passed in as the p pointer.
// ** The caller discovers how much space needs to be allocated by
// ** calling sqlite3PcacheSize().
// **
// ** szExtra is some extra space allocated for each page. The first
// ** 8 bytes of the extra space will be zeroed as the page is allocated,
// ** but remaining content will be uninitialized. Though it is opaque
// ** to this module, the extra space really ends up being the MemPage
// ** structure in the pager.
// */
func _sqlite3PcacheOpen(tls *libc.TLS, szPage int32, szExtra int32, bPurgeable int32, xStress uintptr, pStress uintptr, p uintptr) (r int32) {
libc.Xmemset(tls, p, 0, uint64(80))
(*TPCache)(unsafe.Pointer(p)).FszPage = int32(1)
(*TPCache)(unsafe.Pointer(p)).FszExtra = szExtra
/* First 8 bytes will be zeroed */
(*TPCache)(unsafe.Pointer(p)).FbPurgeable = uint8(bPurgeable)
(*TPCache)(unsafe.Pointer(p)).FeCreate = uint8(2)
(*TPCache)(unsafe.Pointer(p)).FxStress = xStress
(*TPCache)(unsafe.Pointer(p)).FpStress = pStress
(*TPCache)(unsafe.Pointer(p)).FszCache = int32(100)
(*TPCache)(unsafe.Pointer(p)).FszSpill = int32(1)
return _sqlite3PcacheSetPageSize(tls, p, szPage)
}
// C documentation
//
// /*
// ** Change the page size for PCache object. The caller must ensure that there
// ** are no outstanding page references when this function is called.
// */
func _sqlite3PcacheSetPageSize(tls *libc.TLS, pCache uintptr, szPage int32) (r int32) {
var pNew uintptr
_ = pNew
if (*TPCache)(unsafe.Pointer(pCache)).FszPage != 0 {
pNew = (*(*func(*libc.TLS, int32, int32, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxCreate})))(tls, szPage, int32(uint64((*TPCache)(unsafe.Pointer(pCache)).FszExtra)+(libc.Uint64FromInt64(80)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7))), int32((*TPCache)(unsafe.Pointer(pCache)).FbPurgeable))
if pNew == uintptr(0) {
return int32(SQLITE_NOMEM)
}
(*(*func(*libc.TLS, uintptr, int32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxCachesize})))(tls, pNew, _numberOfCachePages(tls, pCache))
if (*TPCache)(unsafe.Pointer(pCache)).FpCache != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxDestroy})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache)
}
(*TPCache)(unsafe.Pointer(pCache)).FpCache = pNew
(*TPCache)(unsafe.Pointer(pCache)).FszPage = szPage
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Try to obtain a page from the cache.
// **
// ** This routine returns a pointer to an sqlite3_pcache_page object if
// ** such an object is already in cache, or if a new one is created.
// ** This routine returns a NULL pointer if the object was not in cache
// ** and could not be created.
// **
// ** The createFlags should be 0 to check for existing pages and should
// ** be 3 (not 1, but 3) to try to create a new page.
// **
// ** If the createFlag is 0, then NULL is always returned if the page
// ** is not already in the cache. If createFlag is 1, then a new page
// ** is created only if that can be done without spilling dirty pages
// ** and without exceeding the cache size limit.
// **
// ** The caller needs to invoke sqlite3PcacheFetchFinish() to properly
// ** initialize the sqlite3_pcache_page object and convert it into a
// ** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish()
// ** routines are split this way for performance reasons. When separated
// ** they can both (usually) operate without having to push values to
// ** the stack on entry and pop them back off on exit, which saves a
// ** lot of pushing and popping.
// */
func _sqlite3PcacheFetch(tls *libc.TLS, pCache uintptr, pgno TPgno, createFlag int32) (r uintptr) {
var eCreate int32
var pRes uintptr
_, _ = eCreate, pRes
/* eCreate defines what to do if the page does not exist.
** 0 Do not allocate a new page. (createFlag==0)
** 1 Allocate a new page if doing so is inexpensive.
** (createFlag==1 AND bPurgeable AND pDirty)
** 2 Allocate a new page even it doing so is difficult.
** (createFlag==1 AND !(bPurgeable AND pDirty)
*/
eCreate = createFlag & int32((*TPCache)(unsafe.Pointer(pCache)).FeCreate)
pRes = (*(*func(*libc.TLS, uintptr, uint32, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxFetch})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, pgno, eCreate)
return pRes
}
// C documentation
//
// /*
// ** If the sqlite3PcacheFetch() routine is unable to allocate a new
// ** page because no clean pages are available for reuse and the cache
// ** size limit has been reached, then this routine can be invoked to
// ** try harder to allocate a page. This routine might invoke the stress
// ** callback to spill dirty pages to the journal. It will then try to
// ** allocate the new page and will only fail to allocate a new page on
// ** an OOM error.
// **
// ** This routine should be invoked only after sqlite3PcacheFetch() fails.
// */
func _sqlite3PcacheFetchStress(tls *libc.TLS, pCache uintptr, pgno TPgno, ppPage uintptr) (r int32) {
var pPg uintptr
var rc, v3 int32
_, _, _ = pPg, rc, v3
if int32((*TPCache)(unsafe.Pointer(pCache)).FeCreate) == int32(2) {
return 0
}
if _sqlite3PcachePagecount(tls, pCache) > (*TPCache)(unsafe.Pointer(pCache)).FszSpill {
/* Find a dirty page to write-out and recycle. First try to find a
** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
** cleared), but if that is not possible settle for any other
** unreferenced dirty page.
**
** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC
** flag is currently referenced, then the following may leave pSynced
** set incorrectly (pointing to other than the LRU page with NEED_SYNC
** cleared). This is Ok, as pSynced is just an optimization. */
pPg = (*TPCache)(unsafe.Pointer(pCache)).FpSynced
for {
if !(pPg != 0 && ((*TPgHdr)(unsafe.Pointer(pPg)).FnRef != 0 || int32((*TPgHdr)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_NEED_SYNC) != 0)) {
break
}
goto _1
_1:
;
pPg = (*TPgHdr)(unsafe.Pointer(pPg)).FpDirtyPrev
}
(*TPCache)(unsafe.Pointer(pCache)).FpSynced = pPg
if !(pPg != 0) {
pPg = (*TPCache)(unsafe.Pointer(pCache)).FpDirtyTail
for {
if !(pPg != 0 && (*TPgHdr)(unsafe.Pointer(pPg)).FnRef != 0) {
break
}
goto _2
_2:
;
pPg = (*TPgHdr)(unsafe.Pointer(pPg)).FpDirtyPrev
}
}
if pPg != 0 {
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TPCache)(unsafe.Pointer(pCache)).FxStress})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpStress, pPg)
if rc != SQLITE_OK && rc != int32(SQLITE_BUSY) {
return rc
}
}
}
*(*uintptr)(unsafe.Pointer(ppPage)) = (*(*func(*libc.TLS, uintptr, uint32, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxFetch})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, pgno, int32(2))
if *(*uintptr)(unsafe.Pointer(ppPage)) == uintptr(0) {
v3 = int32(SQLITE_NOMEM)
} else {
v3 = SQLITE_OK
}
return v3
}
// C documentation
//
// /*
// ** This is a helper routine for sqlite3PcacheFetchFinish()
// **
// ** In the uncommon case where the page being fetched has not been
// ** initialized, this routine is invoked to do the initialization.
// ** This routine is broken out into a separate function since it
// ** requires extra stack manipulation that can be avoided in the common
// ** case.
// */
func _pcacheFetchFinishWithInit(tls *libc.TLS, pCache uintptr, pgno TPgno, pPage uintptr) (r uintptr) {
var pPgHdr uintptr
_ = pPgHdr
pPgHdr = (*Tsqlite3_pcache_page)(unsafe.Pointer(pPage)).FpExtra
libc.Xmemset(tls, pPgHdr+32, 0, libc.Uint64FromInt64(80)-uint64(libc.UintptrFromInt32(0)+32))
(*TPgHdr)(unsafe.Pointer(pPgHdr)).FpPage = pPage
(*TPgHdr)(unsafe.Pointer(pPgHdr)).FpData = (*Tsqlite3_pcache_page)(unsafe.Pointer(pPage)).FpBuf
(*TPgHdr)(unsafe.Pointer(pPgHdr)).FpExtra = pPgHdr + 1*80
libc.Xmemset(tls, (*TPgHdr)(unsafe.Pointer(pPgHdr)).FpExtra, 0, uint64(8))
(*TPgHdr)(unsafe.Pointer(pPgHdr)).FpCache = pCache
(*TPgHdr)(unsafe.Pointer(pPgHdr)).Fpgno = pgno
(*TPgHdr)(unsafe.Pointer(pPgHdr)).Fflags = uint16(PGHDR_CLEAN)
return _sqlite3PcacheFetchFinish(tls, pCache, pgno, pPage)
}
// C documentation
//
// /*
// ** This routine converts the sqlite3_pcache_page object returned by
// ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine
// ** must be called after sqlite3PcacheFetch() in order to get a usable
// ** result.
// */
func _sqlite3PcacheFetchFinish(tls *libc.TLS, pCache uintptr, pgno TPgno, pPage uintptr) (r uintptr) {
var pPgHdr uintptr
_ = pPgHdr
pPgHdr = (*Tsqlite3_pcache_page)(unsafe.Pointer(pPage)).FpExtra
if !((*TPgHdr)(unsafe.Pointer(pPgHdr)).FpPage != 0) {
return _pcacheFetchFinishWithInit(tls, pCache, pgno, pPage)
}
(*TPCache)(unsafe.Pointer(pCache)).FnRefSum++
(*TPgHdr)(unsafe.Pointer(pPgHdr)).FnRef++
return pPgHdr
}
// C documentation
//
// /*
// ** Decrement the reference count on a page. If the page is clean and the
// ** reference count drops to 0, then it is made eligible for recycling.
// */
func _sqlite3PcacheRelease(tls *libc.TLS, p uintptr) {
var v1 Ti64
var v2 uintptr
_, _ = v1, v2
(*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum--
v2 = p + 56
*(*Ti64)(unsafe.Pointer(v2))--
v1 = *(*Ti64)(unsafe.Pointer(v2))
if v1 == 0 {
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_CLEAN) != 0 {
_pcacheUnpin(tls, p)
} else {
_pcacheManageDirtyList(tls, p, uint8(PCACHE_DIRTYLIST_FRONT))
}
}
}
// C documentation
//
// /*
// ** Increase the reference count of a supplied page by 1.
// */
func _sqlite3PcacheRef(tls *libc.TLS, p uintptr) {
(*TPgHdr)(unsafe.Pointer(p)).FnRef++
(*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum++
}
// C documentation
//
// /*
// ** Drop a page from the cache. There must be exactly one reference to the
// ** page. This function deletes that reference, so after it returns the
// ** page pointed to by p is invalid.
// */
func _sqlite3PcacheDrop(tls *libc.TLS, p uintptr) {
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_DIRTY) != 0 {
_pcacheManageDirtyList(tls, p, uint8(PCACHE_DIRTYLIST_REMOVE))
}
(*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FnRefSum--
(*(*func(*libc.TLS, uintptr, uintptr, int32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxUnpin})))(tls, (*TPCache)(unsafe.Pointer((*TPgHdr)(unsafe.Pointer(p)).FpCache)).FpCache, (*TPgHdr)(unsafe.Pointer(p)).FpPage, int32(1))
}
// C documentation
//
// /*
// ** Make sure the page is marked as dirty. If it isn't dirty already,
// ** make it so.
// */
func _sqlite3PcacheMakeDirty(tls *libc.TLS, p uintptr) {
var p1, p2 uintptr
_, _ = p1, p2
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(PGHDR_CLEAN)|libc.Int32FromInt32(PGHDR_DONT_WRITE)) != 0 { /*OPTIMIZATION-IF-FALSE*/
p1 = p + 52
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(PGHDR_DONT_WRITE))
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_CLEAN) != 0 {
p2 = p + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) ^ (libc.Int32FromInt32(PGHDR_DIRTY) | libc.Int32FromInt32(PGHDR_CLEAN)))
_pcacheManageDirtyList(tls, p, uint8(PCACHE_DIRTYLIST_ADD))
}
}
}
// C documentation
//
// /*
// ** Make sure the page is marked as clean. If it isn't clean already,
// ** make it so.
// */
func _sqlite3PcacheMakeClean(tls *libc.TLS, p uintptr) {
var p1, p2 uintptr
_, _ = p1, p2
_pcacheManageDirtyList(tls, p, uint8(PCACHE_DIRTYLIST_REMOVE))
p1 = p + 52
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(PGHDR_DIRTY) | libc.Int32FromInt32(PGHDR_NEED_SYNC) | libc.Int32FromInt32(PGHDR_WRITEABLE)))
p2 = p + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(PGHDR_CLEAN))
if (*TPgHdr)(unsafe.Pointer(p)).FnRef == 0 {
_pcacheUnpin(tls, p)
}
}
// C documentation
//
// /*
// ** Make every page in the cache clean.
// */
func _sqlite3PcacheCleanAll(tls *libc.TLS, pCache uintptr) {
var p, v1 uintptr
_, _ = p, v1
for {
v1 = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
p = v1
if !(v1 != uintptr(0)) {
break
}
_sqlite3PcacheMakeClean(tls, p)
}
}
// C documentation
//
// /*
// ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
// */
func _sqlite3PcacheClearWritable(tls *libc.TLS, pCache uintptr) {
var p, p2 uintptr
_, _ = p, p2
p = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
for {
if !(p != 0) {
break
}
p2 = p + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^(libc.Int32FromInt32(PGHDR_NEED_SYNC) | libc.Int32FromInt32(PGHDR_WRITEABLE)))
goto _1
_1:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirtyNext
}
(*TPCache)(unsafe.Pointer(pCache)).FpSynced = (*TPCache)(unsafe.Pointer(pCache)).FpDirtyTail
}
// C documentation
//
// /*
// ** Clear the PGHDR_NEED_SYNC flag from all dirty pages.
// */
func _sqlite3PcacheClearSyncFlags(tls *libc.TLS, pCache uintptr) {
var p, p2 uintptr
_, _ = p, p2
p = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
for {
if !(p != 0) {
break
}
p2 = p + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^libc.Int32FromInt32(PGHDR_NEED_SYNC))
goto _1
_1:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirtyNext
}
(*TPCache)(unsafe.Pointer(pCache)).FpSynced = (*TPCache)(unsafe.Pointer(pCache)).FpDirtyTail
}
// C documentation
//
// /*
// ** Change the page number of page p to newPgno.
// */
func _sqlite3PcacheMove(tls *libc.TLS, p uintptr, newPgno TPgno) {
var pCache, pOther, pXPage uintptr
_, _, _ = pCache, pOther, pXPage
pCache = (*TPgHdr)(unsafe.Pointer(p)).FpCache
pOther = (*(*func(*libc.TLS, uintptr, uint32, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxFetch})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, newPgno, 0)
if pOther != 0 {
pXPage = (*Tsqlite3_pcache_page)(unsafe.Pointer(pOther)).FpExtra
(*TPgHdr)(unsafe.Pointer(pXPage)).FnRef++
(*TPCache)(unsafe.Pointer(pCache)).FnRefSum++
_sqlite3PcacheDrop(tls, pXPage)
}
(*(*func(*libc.TLS, uintptr, uintptr, uint32, uint32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxRekey})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, (*TPgHdr)(unsafe.Pointer(p)).FpPage, (*TPgHdr)(unsafe.Pointer(p)).Fpgno, newPgno)
(*TPgHdr)(unsafe.Pointer(p)).Fpgno = newPgno
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_DIRTY) != 0 && int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_NEED_SYNC) != 0 {
_pcacheManageDirtyList(tls, p, uint8(PCACHE_DIRTYLIST_FRONT))
}
}
// C documentation
//
// /*
// ** Drop every cache entry whose page number is greater than "pgno". The
// ** caller must ensure that there are no outstanding references to any pages
// ** other than page 1 with a page number greater than pgno.
// **
// ** If there is a reference to page 1 and the pgno parameter passed to this
// ** function is 0, then the data area associated with page 1 is zeroed, but
// ** the page object is not dropped.
// */
func _sqlite3PcacheTruncate(tls *libc.TLS, pCache uintptr, pgno TPgno) {
var p, pNext, pPage1 uintptr
_, _, _ = p, pNext, pPage1
if (*TPCache)(unsafe.Pointer(pCache)).FpCache != 0 {
p = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
for {
if !(p != 0) {
break
}
pNext = (*TPgHdr)(unsafe.Pointer(p)).FpDirtyNext
/* This routine never gets call with a positive pgno except right
** after sqlite3PcacheCleanAll(). So if there are dirty pages,
** it must be that pgno==0.
*/
if (*TPgHdr)(unsafe.Pointer(p)).Fpgno > pgno {
_sqlite3PcacheMakeClean(tls, p)
}
goto _1
_1:
;
p = pNext
}
if pgno == uint32(0) && (*TPCache)(unsafe.Pointer(pCache)).FnRefSum != 0 {
pPage1 = (*(*func(*libc.TLS, uintptr, uint32, int32) uintptr)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxFetch})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, uint32(1), 0)
if pPage1 != 0 { /* Page 1 is always available in cache, because
** pCache->nRefSum>0 */
libc.Xmemset(tls, (*Tsqlite3_pcache_page)(unsafe.Pointer(pPage1)).FpBuf, 0, uint64((*TPCache)(unsafe.Pointer(pCache)).FszPage))
pgno = uint32(1)
}
}
(*(*func(*libc.TLS, uintptr, uint32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxTruncate})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, pgno+uint32(1))
}
}
// C documentation
//
// /*
// ** Close a cache.
// */
func _sqlite3PcacheClose(tls *libc.TLS, pCache uintptr) {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxDestroy})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache)
}
// C documentation
//
// /*
// ** Discard the contents of the cache.
// */
func _sqlite3PcacheClear(tls *libc.TLS, pCache uintptr) {
_sqlite3PcacheTruncate(tls, pCache, uint32(0))
}
// C documentation
//
// /*
// ** Merge two lists of pages connected by pDirty and in pgno order.
// ** Do not bother fixing the pDirtyPrev pointers.
// */
func _pcacheMergeDirtyList(tls *libc.TLS, pA uintptr, pB uintptr) (r uintptr) {
bp := tls.Alloc(80)
defer tls.Free(80)
var pTail uintptr
var _ /* result at bp+0 */ TPgHdr
_ = pTail
pTail = bp
for {
if (*TPgHdr)(unsafe.Pointer(pA)).Fpgno < (*TPgHdr)(unsafe.Pointer(pB)).Fpgno {
(*TPgHdr)(unsafe.Pointer(pTail)).FpDirty = pA
pTail = pA
pA = (*TPgHdr)(unsafe.Pointer(pA)).FpDirty
if pA == uintptr(0) {
(*TPgHdr)(unsafe.Pointer(pTail)).FpDirty = pB
break
}
} else {
(*TPgHdr)(unsafe.Pointer(pTail)).FpDirty = pB
pTail = pB
pB = (*TPgHdr)(unsafe.Pointer(pB)).FpDirty
if pB == uintptr(0) {
(*TPgHdr)(unsafe.Pointer(pTail)).FpDirty = pA
break
}
}
goto _1
_1:
}
return (*(*TPgHdr)(unsafe.Pointer(bp))).FpDirty
}
// C documentation
//
// /*
// ** Sort the list of pages in ascending order by pgno. Pages are
// ** connected by pDirty pointers. The pDirtyPrev pointers are
// ** corrupted by this sort.
// **
// ** Since there cannot be more than 2^31 distinct pages in a database,
// ** there cannot be more than 31 buckets required by the merge sorter.
// ** One extra bucket is added to catch overflow in case something
// ** ever changes to make the previous sentence incorrect.
// */
func _pcacheSortDirtyList(tls *libc.TLS, pIn uintptr) (r uintptr) {
bp := tls.Alloc(256)
defer tls.Free(256)
var i int32
var p, v3 uintptr
var _ /* a at bp+0 */ [32]uintptr
_, _, _ = i, p, v3
libc.Xmemset(tls, bp, 0, uint64(256))
for pIn != 0 {
p = pIn
pIn = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
(*TPgHdr)(unsafe.Pointer(p)).FpDirty = uintptr(0)
i = 0
for {
if !(i < libc.Int32FromInt32(N_SORT_BUCKET)-libc.Int32FromInt32(1)) {
break
}
if (*(*[32]uintptr)(unsafe.Pointer(bp)))[i] == uintptr(0) {
(*(*[32]uintptr)(unsafe.Pointer(bp)))[i] = p
break
} else {
p = _pcacheMergeDirtyList(tls, (*(*[32]uintptr)(unsafe.Pointer(bp)))[i], p)
(*(*[32]uintptr)(unsafe.Pointer(bp)))[i] = uintptr(0)
}
goto _1
_1:
;
i++
}
if i == libc.Int32FromInt32(N_SORT_BUCKET)-libc.Int32FromInt32(1) {
/* To get here, there need to be 2^(N_SORT_BUCKET) elements in
** the input list. But that is impossible.
*/
(*(*[32]uintptr)(unsafe.Pointer(bp)))[i] = _pcacheMergeDirtyList(tls, (*(*[32]uintptr)(unsafe.Pointer(bp)))[i], p)
}
}
p = (*(*[32]uintptr)(unsafe.Pointer(bp)))[0]
i = int32(1)
for {
if !(i < int32(N_SORT_BUCKET)) {
break
}
if (*(*[32]uintptr)(unsafe.Pointer(bp)))[i] == uintptr(0) {
goto _2
}
if p != 0 {
v3 = _pcacheMergeDirtyList(tls, p, (*(*[32]uintptr)(unsafe.Pointer(bp)))[i])
} else {
v3 = (*(*[32]uintptr)(unsafe.Pointer(bp)))[i]
}
p = v3
goto _2
_2:
;
i++
}
return p
}
// C documentation
//
// /*
// ** Return a list of all dirty pages in the cache, sorted by page number.
// */
func _sqlite3PcacheDirtyList(tls *libc.TLS, pCache uintptr) (r uintptr) {
var p uintptr
_ = p
p = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
for {
if !(p != 0) {
break
}
(*TPgHdr)(unsafe.Pointer(p)).FpDirty = (*TPgHdr)(unsafe.Pointer(p)).FpDirtyNext
goto _1
_1:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirtyNext
}
return _pcacheSortDirtyList(tls, (*TPCache)(unsafe.Pointer(pCache)).FpDirty)
}
// C documentation
//
// /*
// ** Return the total number of references to all pages held by the cache.
// **
// ** This is not the total number of pages referenced, but the sum of the
// ** reference count for all pages.
// */
func _sqlite3PcacheRefCount(tls *libc.TLS, pCache uintptr) (r Ti64) {
return (*TPCache)(unsafe.Pointer(pCache)).FnRefSum
}
// C documentation
//
// /*
// ** Return the number of references to the page supplied as an argument.
// */
func _sqlite3PcachePageRefcount(tls *libc.TLS, p uintptr) (r Ti64) {
return (*TPgHdr)(unsafe.Pointer(p)).FnRef
}
// C documentation
//
// /*
// ** Return the total number of pages in the cache.
// */
func _sqlite3PcachePagecount(tls *libc.TLS, pCache uintptr) (r int32) {
return (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxPagecount})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache)
}
// C documentation
//
// /*
// ** Set the suggested cache-size value.
// */
func _sqlite3PcacheSetCachesize(tls *libc.TLS, pCache uintptr, mxPage int32) {
(*TPCache)(unsafe.Pointer(pCache)).FszCache = mxPage
(*(*func(*libc.TLS, uintptr, int32))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxCachesize})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache, _numberOfCachePages(tls, pCache))
}
// C documentation
//
// /*
// ** Set the suggested cache-spill value. Make no changes if if the
// ** argument is zero. Return the effective cache-spill size, which will
// ** be the larger of the szSpill and szCache.
// */
func _sqlite3PcacheSetSpillsize(tls *libc.TLS, p uintptr, mxPage int32) (r int32) {
var res int32
_ = res
if mxPage != 0 {
if mxPage < 0 {
mxPage = int32(int64(-libc.Int32FromInt32(1024)) * int64(mxPage) / int64((*TPCache)(unsafe.Pointer(p)).FszPage+(*TPCache)(unsafe.Pointer(p)).FszExtra))
}
(*TPCache)(unsafe.Pointer(p)).FszSpill = mxPage
}
res = _numberOfCachePages(tls, p)
if res < (*TPCache)(unsafe.Pointer(p)).FszSpill {
res = (*TPCache)(unsafe.Pointer(p)).FszSpill
}
return res
}
// C documentation
//
// /*
// ** Free up as much memory as possible from the page cache.
// */
func _sqlite3PcacheShrink(tls *libc.TLS, pCache uintptr) {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{_sqlite3Config.Fpcache2.FxShrink})))(tls, (*TPCache)(unsafe.Pointer(pCache)).FpCache)
}
// C documentation
//
// /*
// ** Return the size of the header added by this middleware layer
// ** in the page-cache hierarchy.
// */
func _sqlite3HeaderSizePcache(tls *libc.TLS) (r int32) {
return int32((libc.Uint64FromInt64(80) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
}
// C documentation
//
// /*
// ** Return the number of dirty pages currently in the cache, as a percentage
// ** of the configured cache size.
// */
func _sqlite3PCachePercentDirty(tls *libc.TLS, pCache uintptr) (r int32) {
var nCache, nDirty, v2 int32
var pDirty uintptr
_, _, _, _ = nCache, nDirty, pDirty, v2
nDirty = 0
nCache = _numberOfCachePages(tls, pCache)
pDirty = (*TPCache)(unsafe.Pointer(pCache)).FpDirty
for {
if !(pDirty != 0) {
break
}
nDirty++
goto _1
_1:
;
pDirty = (*TPgHdr)(unsafe.Pointer(pDirty)).FpDirtyNext
}
if nCache != 0 {
v2 = int32(int64(nDirty) * libc.Int64FromInt32(100) / int64(nCache))
} else {
v2 = 0
}
return v2
}
// C documentation
//
// /*
// ** Return true if there are one or more dirty pages in the cache. Else false.
// */
func _sqlite3PCacheIsDirty(tls *libc.TLS, pCache uintptr) (r int32) {
return libc.BoolInt32((*TPCache)(unsafe.Pointer(pCache)).FpDirty != uintptr(0))
}
/************** End of pcache.c **********************************************/
/************** Begin file pcache1.c *****************************************/
/*
** 2008 November 05
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file implements the default page cache implementation (the
** sqlite3_pcache interface). It also contains part of the implementation
** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features.
** If the default page cache implementation is overridden, then neither of
** these two features are available.
**
** A Page cache line looks like this:
**
** -------------------------------------------------------------
** | database page content | PgHdr1 | MemPage | PgHdr |
** -------------------------------------------------------------
**
** The database page content is up front (so that buffer overreads tend to
** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage
** is the extension added by the btree.c module containing information such
** as the database page number and how that database page is used. PgHdr
** is added by the pcache.c layer and contains information used to keep track
** of which pages are "dirty". PgHdr1 is an extension added by this
** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page.
** PgHdr1 contains information needed to look up a page by its page number.
** The superclass sqlite3_pcache_page.pBuf points to the start of the
** database page content and sqlite3_pcache_page.pExtra points to PgHdr.
**
** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at
** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The
** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this
** size can vary according to architecture, compile-time options, and
** SQLite library version number.
**
** Historical note: It used to be that if the SQLITE_PCACHE_SEPARATE_HEADER
** was defined, then the page content would be held in a separate memory
** allocation from the PgHdr1. This was intended to avoid clownshoe memory
** allocations. However, the btree layer needs a small (16-byte) overrun
** area after the page content buffer. The header serves as that overrun
** area. Therefore SQLITE_PCACHE_SEPARATE_HEADER was discontinued to avoid
** any possibility of a memory error.
**
** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates
** with this module. Information is passed back and forth as PgHdr1 pointers.
**
** The pcache.c and pager.c modules deal pointers to PgHdr objects.
** The btree.c module deals with pointers to MemPage objects.
**
** SOURCE OF PAGE CACHE MEMORY:
**
** Memory for a page might come from any of three sources:
**
** (1) The general-purpose memory allocator - sqlite3Malloc()
** (2) Global page-cache memory provided using sqlite3_config() with
** SQLITE_CONFIG_PAGECACHE.
** (3) PCache-local bulk allocation.
**
** The third case is a chunk of heap memory (defaulting to 100 pages worth)
** that is allocated when the page cache is created. The size of the local
** bulk allocation can be adjusted using
**
** sqlite3_config(SQLITE_CONFIG_PAGECACHE, (void*)0, 0, N).
**
** If N is positive, then N pages worth of memory are allocated using a single
** sqlite3Malloc() call and that memory is used for the first N pages allocated.
** Or if N is negative, then -1024*N bytes of memory are allocated and used
** for as many pages as can be accommodated.
**
** Only one of (2) or (3) can be used. Once the memory available to (2) or
** (3) is exhausted, subsequent allocations fail over to the general-purpose
** memory allocator (1).
**
** Earlier versions of SQLite used only methods (1) and (2). But experiments
** show that method (3) with N==100 provides about a 5% performance boost for
** common workloads.
*/
/* #include "sqliteInt.h" */
type TPCache1 = struct {
FpGroup uintptr
FpnPurgeable uintptr
FszPage int32
FszExtra int32
FszAlloc int32
FbPurgeable int32
FnMin uint32
FnMax uint32
Fn90pct uint32
FiMaxKey uint32
FnPurgeableDummy uint32
FnRecyclable uint32
FnPage uint32
FnHash uint32
FapHash uintptr
FpFree uintptr
FpBulk uintptr
}
type PCache1 = TPCache1
type TPgHdr1 = struct {
Fpage Tsqlite3_pcache_page
FiKey uint32
FisBulkLocal Tu16
FisAnchor Tu16
FpNext uintptr
FpCache uintptr
FpLruNext uintptr
FpLruPrev uintptr
}
type PgHdr1 = TPgHdr1
type TPgFreeslot = struct {
FpNext uintptr
}
type PgFreeslot = TPgFreeslot
type TPGroup = struct {
Fmutex uintptr
FnMaxPage uint32
FnMinPage uint32
FmxPinned uint32
FnPurgeable uint32
Flru TPgHdr1
}
type PGroup = TPGroup
/*
** Each cache entry is represented by an instance of the following
** structure. A buffer of PgHdr1.pCache->szPage bytes is allocated
** directly before this structure and is used to cache the page content.
**
** When reading a corrupt database file, it is possible that SQLite might
** read a few bytes (no more than 16 bytes) past the end of the page buffer.
** It will only read past the end of the page buffer, never write. This
** object is positioned immediately after the page buffer to serve as an
** overrun area, so that overreads are harmless.
**
** Variables isBulkLocal and isAnchor were once type "u8". That works,
** but causes a 2-byte gap in the structure for most architectures (since
** pointers must be either 4 or 8-byte aligned). As this structure is located
** in memory directly after the associated page data, if the database is
** corrupt, code at the b-tree layer may overread the page buffer and
** read part of this structure before the corruption is detected. This
** can cause a valgrind error if the uninitialized gap is accessed. Using u16
** ensures there is no such gap, and therefore no bytes of uninitialized
** memory in the structure.
**
** The pLruNext and pLruPrev pointers form a double-linked circular list
** of all pages that are unpinned. The PGroup.lru element (which should be
** the only element on the list with PgHdr1.isAnchor set to 1) forms the
** beginning and the end of the list.
*/
type TPgHdr11 = struct {
Fpage Tsqlite3_pcache_page
FiKey uint32
FisBulkLocal Tu16
FisAnchor Tu16
FpNext uintptr
FpCache uintptr
FpLruNext uintptr
FpLruPrev uintptr
}
type PgHdr11 = TPgHdr11
/*
** A page is pinned if it is not on the LRU list. To be "pinned" means
** that the page is in active use and must not be deallocated.
*/
/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set
** of one or more PCaches that are able to recycle each other's unpinned
** pages when they are under memory pressure. A PGroup is an instance of
** the following object.
**
** This page cache implementation works in one of two modes:
**
** (1) Every PCache is the sole member of its own PGroup. There is
** one PGroup per PCache.
**
** (2) There is a single global PGroup that all PCaches are a member
** of.
**
** Mode 1 uses more memory (since PCache instances are not able to rob
** unused pages from other PCaches) but it also operates without a mutex,
** and is therefore often faster. Mode 2 requires a mutex in order to be
** threadsafe, but recycles pages more efficiently.
**
** For mode (1), PGroup.mutex is NULL. For mode (2) there is only a single
** PGroup which is the pcache1.grp global variable and its mutex is
** SQLITE_MUTEX_STATIC_LRU.
*/
type TPGroup1 = struct {
Fmutex uintptr
FnMaxPage uint32
FnMinPage uint32
FmxPinned uint32
FnPurgeable uint32
Flru TPgHdr1
}
type PGroup1 = TPGroup1
/* Each page cache is an instance of the following object. Every
** open database file (including each in-memory database and each
** temporary or transient database) has a single page cache which
** is an instance of this object.
**
** Pointers to structures of this type are cast and returned as
** opaque sqlite3_pcache* handles.
*/
type TPCache11 = struct {
FpGroup uintptr
FpnPurgeable uintptr
FszPage int32
FszExtra int32
FszAlloc int32
FbPurgeable int32
FnMin uint32
FnMax uint32
Fn90pct uint32
FiMaxKey uint32
FnPurgeableDummy uint32
FnRecyclable uint32
FnPage uint32
FnHash uint32
FapHash uintptr
FpFree uintptr
FpBulk uintptr
}
type PCache11 = TPCache11
/*
** Free slots in the allocator used to divide up the global page cache
** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism.
*/
type TPgFreeslot1 = struct {
FpNext uintptr
}
type PgFreeslot1 = TPgFreeslot1
// C documentation
//
// /*
// ** Global data used by this cache.
// */
type TPCacheGlobal = struct {
Fgrp TPGroup
FisInit int32
FseparateCache int32
FnInitPage int32
FszSlot int32
FnSlot int32
FnReserve int32
FpStart uintptr
FpEnd uintptr
Fmutex uintptr
FpFree uintptr
FnFreeSlot int32
FbUnderPressure int32
}
type PCacheGlobal = TPCacheGlobal
// C documentation
//
// /*
// ** Global data used by this cache.
// */
var _pcache1_g TPCacheGlobal
/*
** All code in this file should access the global structure above via the
** alias "pcache1". This ensures that the WSD emulation is used when
** compiling for systems that do not support real WSD.
*/
/*
** Macros to enter and leave the PCache LRU mutex.
*/
/******************************************************************************/
/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
// C documentation
//
// /*
// ** This function is called during initialization if a static buffer is
// ** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE
// ** verb to sqlite3_config(). Parameter pBuf points to an allocation large
// ** enough to contain 'n' buffers of 'sz' bytes each.
// **
// ** This routine is called from sqlite3_initialize() and so it is guaranteed
// ** to be serialized already. There is no need for further mutexing.
// */
func _sqlite3PCacheBufferSetup(tls *libc.TLS, pBuf uintptr, sz int32, n int32) {
var p uintptr
var v1, v2, v3, v4 int32
_, _, _, _, _ = p, v1, v2, v3, v4
if libc.AtomicLoadPInt32(uintptr(unsafe.Pointer(&_pcache1_g))+80) != 0 {
if pBuf == uintptr(0) {
v1 = libc.Int32FromInt32(0)
n = v1
sz = v1
}
if n == 0 {
sz = 0
}
sz = sz & ^libc.Int32FromInt32(7)
_pcache1_g.FszSlot = sz
v2 = n
_pcache1_g.FnFreeSlot = v2
_pcache1_g.FnSlot = v2
if n > int32(90) {
v3 = int32(10)
} else {
v3 = n/int32(10) + int32(1)
}
_pcache1_g.FnReserve = v3
_pcache1_g.FpStart = pBuf
_pcache1_g.FpFree = uintptr(0)
_pcache1_g.FbUnderPressure = 0
for {
v4 = n
n--
if !(v4 != 0) {
break
}
p = pBuf
(*TPgFreeslot)(unsafe.Pointer(p)).FpNext = _pcache1_g.FpFree
_pcache1_g.FpFree = p
pBuf = pBuf + uintptr(sz)
}
_pcache1_g.FpEnd = pBuf
}
}
// C documentation
//
// /*
// ** Try to initialize the pCache->pFree and pCache->pBulk fields. Return
// ** true if pCache->pFree ends up containing one or more free pages.
// */
func _pcache1InitBulk(tls *libc.TLS, pCache uintptr) (r int32) {
var nBulk, v2 int32
var pX, zBulk, v1 uintptr
var szBulk Ti64
_, _, _, _, _, _ = nBulk, pX, szBulk, zBulk, v1, v2
if _pcache1_g.FnInitPage == 0 {
return 0
}
/* Do not bother with a bulk allocation if the cache size very small */
if (*TPCache1)(unsafe.Pointer(pCache)).FnMax < uint32(3) {
return 0
}
_sqlite3BeginBenignMalloc(tls)
if _pcache1_g.FnInitPage > 0 {
szBulk = int64((*TPCache1)(unsafe.Pointer(pCache)).FszAlloc) * int64(_pcache1_g.FnInitPage)
} else {
szBulk = int64(-int32(1024)) * int64(_pcache1_g.FnInitPage)
}
if szBulk > int64((*TPCache1)(unsafe.Pointer(pCache)).FszAlloc)*int64((*TPCache1)(unsafe.Pointer(pCache)).FnMax) {
szBulk = int64((*TPCache1)(unsafe.Pointer(pCache)).FszAlloc) * int64((*TPCache1)(unsafe.Pointer(pCache)).FnMax)
}
v1 = _sqlite3Malloc(tls, uint64(szBulk))
(*TPCache1)(unsafe.Pointer(pCache)).FpBulk = v1
zBulk = v1
_sqlite3EndBenignMalloc(tls)
if zBulk != 0 {
nBulk = _sqlite3MallocSize(tls, zBulk) / (*TPCache1)(unsafe.Pointer(pCache)).FszAlloc
for {
pX = zBulk + uintptr((*TPCache1)(unsafe.Pointer(pCache)).FszPage)
(*TPgHdr1)(unsafe.Pointer(pX)).Fpage.FpBuf = zBulk
(*TPgHdr1)(unsafe.Pointer(pX)).Fpage.FpExtra = pX + 1*56
(*TPgHdr1)(unsafe.Pointer(pX)).FisBulkLocal = uint16(1)
(*TPgHdr1)(unsafe.Pointer(pX)).FisAnchor = uint16(0)
(*TPgHdr1)(unsafe.Pointer(pX)).FpNext = (*TPCache1)(unsafe.Pointer(pCache)).FpFree
(*TPgHdr1)(unsafe.Pointer(pX)).FpLruPrev = uintptr(0) /* Initializing this saves a valgrind error */
(*TPCache1)(unsafe.Pointer(pCache)).FpFree = pX
zBulk += uintptr((*TPCache1)(unsafe.Pointer(pCache)).FszAlloc)
goto _3
_3:
;
nBulk--
v2 = nBulk
if !(v2 != 0) {
break
}
}
}
return libc.BoolInt32((*TPCache1)(unsafe.Pointer(pCache)).FpFree != uintptr(0))
}
// C documentation
//
// /*
// ** Malloc function used within this file to allocate space from the buffer
// ** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no
// ** such buffer exists or there is no space left in it, this function falls
// ** back to sqlite3Malloc().
// **
// ** Multiple threads can run this routine at the same time. Global variables
// ** in pcache1 need to be protected via mutex.
// */
func _pcache1Alloc(tls *libc.TLS, nByte int32) (r uintptr) {
var p uintptr
var sz int32
_, _ = p, sz
p = uintptr(0)
if nByte <= _pcache1_g.FszSlot {
Xsqlite3_mutex_enter(tls, _pcache1_g.Fmutex)
p = _pcache1_g.FpFree
if p != 0 {
_pcache1_g.FpFree = (*TPgFreeslot)(unsafe.Pointer(_pcache1_g.FpFree)).FpNext
_pcache1_g.FnFreeSlot--
_pcache1_g.FbUnderPressure = libc.BoolInt32(_pcache1_g.FnFreeSlot < _pcache1_g.FnReserve)
_sqlite3StatusHighwater(tls, int32(SQLITE_STATUS_PAGECACHE_SIZE), nByte)
_sqlite3StatusUp(tls, int32(SQLITE_STATUS_PAGECACHE_USED), int32(1))
}
Xsqlite3_mutex_leave(tls, _pcache1_g.Fmutex)
}
if p == uintptr(0) {
/* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get
** it from sqlite3Malloc instead.
*/
p = _sqlite3Malloc(tls, uint64(nByte))
if p != 0 {
sz = _sqlite3MallocSize(tls, p)
Xsqlite3_mutex_enter(tls, _pcache1_g.Fmutex)
_sqlite3StatusHighwater(tls, int32(SQLITE_STATUS_PAGECACHE_SIZE), nByte)
_sqlite3StatusUp(tls, int32(SQLITE_STATUS_PAGECACHE_OVERFLOW), sz)
Xsqlite3_mutex_leave(tls, _pcache1_g.Fmutex)
}
}
return p
}
// C documentation
//
// /*
// ** Free an allocated buffer obtained from pcache1Alloc().
// */
func _pcache1Free(tls *libc.TLS, p uintptr) {
var nFreed int32
var pSlot uintptr
_, _ = nFreed, pSlot
if p == uintptr(0) {
return
}
if uint64(p) >= uint64(_pcache1_g.FpStart) && uint64(p) < uint64(_pcache1_g.FpEnd) {
Xsqlite3_mutex_enter(tls, _pcache1_g.Fmutex)
_sqlite3StatusDown(tls, int32(SQLITE_STATUS_PAGECACHE_USED), int32(1))
pSlot = p
(*TPgFreeslot)(unsafe.Pointer(pSlot)).FpNext = _pcache1_g.FpFree
_pcache1_g.FpFree = pSlot
_pcache1_g.FnFreeSlot++
_pcache1_g.FbUnderPressure = libc.BoolInt32(_pcache1_g.FnFreeSlot < _pcache1_g.FnReserve)
Xsqlite3_mutex_leave(tls, _pcache1_g.Fmutex)
} else {
nFreed = 0
nFreed = _sqlite3MallocSize(tls, p)
Xsqlite3_mutex_enter(tls, _pcache1_g.Fmutex)
_sqlite3StatusDown(tls, int32(SQLITE_STATUS_PAGECACHE_OVERFLOW), nFreed)
Xsqlite3_mutex_leave(tls, _pcache1_g.Fmutex)
Xsqlite3_free(tls, p)
}
}
// C documentation
//
// /*
// ** Return the size of a pcache allocation
// */
func _pcache1MemSize(tls *libc.TLS, p uintptr) (r int32) {
var iSize int32
_ = iSize
if p >= _pcache1_g.FpStart && p < _pcache1_g.FpEnd {
return _pcache1_g.FszSlot
} else {
iSize = _sqlite3MallocSize(tls, p)
return iSize
}
return r
}
// C documentation
//
// /*
// ** Allocate a new page object initially associated with cache pCache.
// */
func _pcache1AllocPage(tls *libc.TLS, pCache uintptr, benignMalloc int32) (r uintptr) {
var p, pPg uintptr
_, _ = p, pPg
p = uintptr(0)
if (*TPCache1)(unsafe.Pointer(pCache)).FpFree != 0 || (*TPCache1)(unsafe.Pointer(pCache)).FnPage == uint32(0) && _pcache1InitBulk(tls, pCache) != 0 {
p = (*TPCache1)(unsafe.Pointer(pCache)).FpFree
(*TPCache1)(unsafe.Pointer(pCache)).FpFree = (*TPgHdr1)(unsafe.Pointer(p)).FpNext
(*TPgHdr1)(unsafe.Pointer(p)).FpNext = uintptr(0)
} else {
/* The group mutex must be released before pcache1Alloc() is called. This
** is because it might call sqlite3_release_memory(), which assumes that
** this mutex is not held. */
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
if benignMalloc != 0 {
_sqlite3BeginBenignMalloc(tls)
}
pPg = _pcache1Alloc(tls, (*TPCache1)(unsafe.Pointer(pCache)).FszAlloc)
if benignMalloc != 0 {
_sqlite3EndBenignMalloc(tls)
}
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
if pPg == uintptr(0) {
return uintptr(0)
}
p = pPg + uintptr((*TPCache1)(unsafe.Pointer(pCache)).FszPage)
(*TPgHdr1)(unsafe.Pointer(p)).Fpage.FpBuf = pPg
(*TPgHdr1)(unsafe.Pointer(p)).Fpage.FpExtra = p + 1*56
(*TPgHdr1)(unsafe.Pointer(p)).FisBulkLocal = uint16(0)
(*TPgHdr1)(unsafe.Pointer(p)).FisAnchor = uint16(0)
(*TPgHdr1)(unsafe.Pointer(p)).FpLruPrev = uintptr(0) /* Initializing this saves a valgrind error */
}
*(*uint32)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpnPurgeable))++
return p
}
// C documentation
//
// /*
// ** Free a page object allocated by pcache1AllocPage().
// */
func _pcache1FreePage(tls *libc.TLS, p uintptr) {
var pCache uintptr
_ = pCache
pCache = (*TPgHdr1)(unsafe.Pointer(p)).FpCache
if (*TPgHdr1)(unsafe.Pointer(p)).FisBulkLocal != 0 {
(*TPgHdr1)(unsafe.Pointer(p)).FpNext = (*TPCache1)(unsafe.Pointer(pCache)).FpFree
(*TPCache1)(unsafe.Pointer(pCache)).FpFree = p
} else {
_pcache1Free(tls, (*TPgHdr1)(unsafe.Pointer(p)).Fpage.FpBuf)
}
*(*uint32)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpnPurgeable))--
}
// C documentation
//
// /*
// ** Malloc function used by SQLite to obtain space from the buffer configured
// ** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer
// ** exists, this function falls back to sqlite3Malloc().
// */
func _sqlite3PageMalloc(tls *libc.TLS, sz int32) (r uintptr) {
/* These allocations are never very large */
return _pcache1Alloc(tls, sz)
}
// C documentation
//
// /*
// ** Free an allocated buffer obtained from sqlite3PageMalloc().
// */
func _sqlite3PageFree(tls *libc.TLS, p uintptr) {
_pcache1Free(tls, p)
}
// C documentation
//
// /*
// ** Return true if it desirable to avoid allocating a new page cache
// ** entry.
// **
// ** If memory was allocated specifically to the page cache using
// ** SQLITE_CONFIG_PAGECACHE but that memory has all been used, then
// ** it is desirable to avoid allocating a new page cache entry because
// ** presumably SQLITE_CONFIG_PAGECACHE was suppose to be sufficient
// ** for all page cache needs and we should not need to spill the
// ** allocation onto the heap.
// **
// ** Or, the heap is used for all page cache memory but the heap is
// ** under memory pressure, then again it is desirable to avoid
// ** allocating a new page cache entry in order to avoid stressing
// ** the heap even further.
// */
func _pcache1UnderMemoryPressure(tls *libc.TLS, pCache uintptr) (r int32) {
if _pcache1_g.FnSlot != 0 && (*TPCache1)(unsafe.Pointer(pCache)).FszPage+(*TPCache1)(unsafe.Pointer(pCache)).FszExtra <= _pcache1_g.FszSlot {
return _pcache1_g.FbUnderPressure
} else {
return _sqlite3HeapNearlyFull(tls)
}
return r
}
/******************************************************************************/
/******** General Implementation Functions ************************************/
// C documentation
//
// /*
// ** This function is used to resize the hash table used by the cache passed
// ** as the first argument.
// **
// ** The PCache mutex must be held when this function is called.
// */
func _pcache1ResizeHash(tls *libc.TLS, p uintptr) {
var apNew, pNext, pPage, v2 uintptr
var h, i, nNew uint32
_, _, _, _, _, _, _ = apNew, h, i, nNew, pNext, pPage, v2
nNew = (*TPCache1)(unsafe.Pointer(p)).FnHash * uint32(2)
if nNew < uint32(256) {
nNew = uint32(256)
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(p)).FpGroup)).Fmutex)
if (*TPCache1)(unsafe.Pointer(p)).FnHash != 0 {
_sqlite3BeginBenignMalloc(tls)
}
apNew = _sqlite3MallocZero(tls, uint64(8)*uint64(nNew))
if (*TPCache1)(unsafe.Pointer(p)).FnHash != 0 {
_sqlite3EndBenignMalloc(tls)
}
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(p)).FpGroup)).Fmutex)
if apNew != 0 {
i = uint32(0)
for {
if !(i < (*TPCache1)(unsafe.Pointer(p)).FnHash) {
break
}
pNext = *(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(p)).FapHash + uintptr(i)*8))
for {
v2 = pNext
pPage = v2
if !(v2 != uintptr(0)) {
break
}
h = (*TPgHdr1)(unsafe.Pointer(pPage)).FiKey % nNew
pNext = (*TPgHdr1)(unsafe.Pointer(pPage)).FpNext
(*TPgHdr1)(unsafe.Pointer(pPage)).FpNext = *(*uintptr)(unsafe.Pointer(apNew + uintptr(h)*8))
*(*uintptr)(unsafe.Pointer(apNew + uintptr(h)*8)) = pPage
}
goto _1
_1:
;
i++
}
Xsqlite3_free(tls, (*TPCache1)(unsafe.Pointer(p)).FapHash)
(*TPCache1)(unsafe.Pointer(p)).FapHash = apNew
(*TPCache1)(unsafe.Pointer(p)).FnHash = nNew
}
}
// C documentation
//
// /*
// ** This function is used internally to remove the page pPage from the
// ** PGroup LRU list, if is part of it. If pPage is not part of the PGroup
// ** LRU list, then this function is a no-op.
// **
// ** The PGroup mutex must be held when this function is called.
// */
func _pcache1PinPage(tls *libc.TLS, pPage uintptr) (r uintptr) {
(*TPgHdr1)(unsafe.Pointer((*TPgHdr1)(unsafe.Pointer(pPage)).FpLruPrev)).FpLruNext = (*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext
(*TPgHdr1)(unsafe.Pointer((*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext)).FpLruPrev = (*TPgHdr1)(unsafe.Pointer(pPage)).FpLruPrev
(*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext = uintptr(0)
/* pPage->pLruPrev = 0;
** No need to clear pLruPrev as it is never accessed if pLruNext is 0 */
(*TPCache1)(unsafe.Pointer((*TPgHdr1)(unsafe.Pointer(pPage)).FpCache)).FnRecyclable--
return pPage
}
// C documentation
//
// /*
// ** Remove the page supplied as an argument from the hash table
// ** (PCache1.apHash structure) that it is currently stored in.
// ** Also free the page if freePage is true.
// **
// ** The PGroup mutex must be held when this function is called.
// */
func _pcache1RemoveFromHash(tls *libc.TLS, pPage uintptr, freeFlag int32) {
var h uint32
var pCache, pp uintptr
_, _, _ = h, pCache, pp
pCache = (*TPgHdr1)(unsafe.Pointer(pPage)).FpCache
h = (*TPgHdr1)(unsafe.Pointer(pPage)).FiKey % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
pp = (*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(h)*8
for {
if !(*(*uintptr)(unsafe.Pointer(pp)) != pPage) {
break
}
goto _1
_1:
;
pp = *(*uintptr)(unsafe.Pointer(pp)) + 24
}
*(*uintptr)(unsafe.Pointer(pp)) = (*TPgHdr1)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pp)))).FpNext
(*TPCache1)(unsafe.Pointer(pCache)).FnPage--
if freeFlag != 0 {
_pcache1FreePage(tls, pPage)
}
}
// C documentation
//
// /*
// ** If there are currently more than nMaxPage pages allocated, try
// ** to recycle pages to reduce the number allocated to nMaxPage.
// */
func _pcache1EnforceMaxPage(tls *libc.TLS, pCache uintptr) {
var p, pGroup, v1, v3 uintptr
var v2 bool
_, _, _, _, _ = p, pGroup, v1, v2, v3
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
for {
if v2 = (*TPGroup)(unsafe.Pointer(pGroup)).FnPurgeable > (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage; v2 {
v1 = (*TPGroup)(unsafe.Pointer(pGroup)).Flru.FpLruPrev
p = v1
}
if !(v2 && int32((*TPgHdr1)(unsafe.Pointer(v1)).FisAnchor) == 0) {
break
}
_pcache1PinPage(tls, p)
_pcache1RemoveFromHash(tls, p, int32(1))
}
if (*TPCache1)(unsafe.Pointer(pCache)).FnPage == uint32(0) && (*TPCache1)(unsafe.Pointer(pCache)).FpBulk != 0 {
Xsqlite3_free(tls, (*TPCache1)(unsafe.Pointer(pCache)).FpBulk)
v3 = libc.UintptrFromInt32(0)
(*TPCache1)(unsafe.Pointer(pCache)).FpFree = v3
(*TPCache1)(unsafe.Pointer(pCache)).FpBulk = v3
}
}
// C documentation
//
// /*
// ** Discard all pages from cache pCache with a page number (key value)
// ** greater than or equal to iLimit. Any pinned pages that meet this
// ** criteria are unpinned before they are discarded.
// **
// ** The PCache mutex must be held when this function is called.
// */
func _pcache1TruncateUnsafe(tls *libc.TLS, pCache uintptr, iLimit uint32) {
var h, iStop uint32
var pPage, pp, v2 uintptr
_, _, _, _, _ = h, iStop, pPage, pp, v2
if (*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey-iLimit < (*TPCache1)(unsafe.Pointer(pCache)).FnHash {
/* If we are just shaving the last few pages off the end of the
** cache, then there is no point in scanning the entire hash table.
** Only scan those hash slots that might contain pages that need to
** be removed. */
h = iLimit % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
iStop = (*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
/* Disable the pCache->nPage validity check */
} else {
/* This is the general case where many pages are being removed.
** It is necessary to scan the entire hash table */
h = (*TPCache1)(unsafe.Pointer(pCache)).FnHash / uint32(2)
iStop = h - uint32(1)
}
for {
pp = (*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(h)*8
for {
v2 = *(*uintptr)(unsafe.Pointer(pp))
pPage = v2
if !(v2 != uintptr(0)) {
break
}
if (*TPgHdr1)(unsafe.Pointer(pPage)).FiKey >= iLimit {
(*TPCache1)(unsafe.Pointer(pCache)).FnPage--
*(*uintptr)(unsafe.Pointer(pp)) = (*TPgHdr1)(unsafe.Pointer(pPage)).FpNext
if (*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext != uintptr(0) {
_pcache1PinPage(tls, pPage)
}
_pcache1FreePage(tls, pPage)
} else {
pp = pPage + 24
}
}
if h == iStop {
break
}
h = (h + uint32(1)) % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
goto _1
_1:
}
}
/******************************************************************************/
/******** sqlite3_pcache Methods **********************************************/
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xInit method.
// */
func _pcache1Init(tls *libc.TLS, NotUsed uintptr) (r int32) {
_ = NotUsed
libc.Xmemset(tls, uintptr(unsafe.Pointer(&_pcache1_g)), 0, uint64(144))
/*
** The pcache1.separateCache variable is true if each PCache has its own
** private PGroup (mode-1). pcache1.separateCache is false if the single
** PGroup in pcache1.grp is used for all page caches (mode-2).
**
** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
**
** * Use a unified cache in single-threaded applications that have
** configured a start-time buffer for use as page-cache memory using
** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL
** pBuf argument.
**
** * Otherwise use separate caches (mode-1)
*/
_pcache1_g.FseparateCache = 0
if _sqlite3Config.FbCoreMutex != 0 {
_pcache1_g.Fgrp.Fmutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_LRU))
_pcache1_g.Fmutex = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_PMEM))
}
if _pcache1_g.FseparateCache != 0 && _sqlite3Config.FnPage != 0 && _sqlite3Config.FpPage == uintptr(0) {
_pcache1_g.FnInitPage = _sqlite3Config.FnPage
} else {
_pcache1_g.FnInitPage = 0
}
_pcache1_g.Fgrp.FmxPinned = uint32(10)
libc.AtomicStorePInt32(uintptr(unsafe.Pointer(&_pcache1_g))+80, int32(1))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xShutdown method.
// ** Note that the static mutex allocated in xInit does
// ** not need to be freed.
// */
func _pcache1Shutdown(tls *libc.TLS, NotUsed uintptr) {
_ = NotUsed
libc.Xmemset(tls, uintptr(unsafe.Pointer(&_pcache1_g)), 0, uint64(144))
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xCreate method.
// **
// ** Allocate a new cache.
// */
func _pcache1Create(tls *libc.TLS, szPage int32, szExtra int32, bPurgeable int32) (r uintptr) {
var pCache, pGroup, v1 uintptr
var sz, v2 int32
_, _, _, _, _ = pCache, pGroup, sz, v1, v2 /* Bytes of memory required to allocate the new cache */
sz = int32(uint64(88) + uint64(80)*uint64(_pcache1_g.FseparateCache))
pCache = _sqlite3MallocZero(tls, uint64(sz))
if pCache != 0 {
if _pcache1_g.FseparateCache != 0 {
pGroup = pCache + 1*88
(*TPGroup)(unsafe.Pointer(pGroup)).FmxPinned = uint32(10)
} else {
pGroup = uintptr(unsafe.Pointer(&_pcache1_g))
}
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
if int32((*TPGroup)(unsafe.Pointer(pGroup)).Flru.FisAnchor) == 0 {
(*TPGroup)(unsafe.Pointer(pGroup)).Flru.FisAnchor = uint16(1)
v1 = pGroup + 24
(*TPGroup)(unsafe.Pointer(pGroup)).Flru.FpLruNext = v1
(*TPGroup)(unsafe.Pointer(pGroup)).Flru.FpLruPrev = v1
}
(*TPCache1)(unsafe.Pointer(pCache)).FpGroup = pGroup
(*TPCache1)(unsafe.Pointer(pCache)).FszPage = szPage
(*TPCache1)(unsafe.Pointer(pCache)).FszExtra = szExtra
(*TPCache1)(unsafe.Pointer(pCache)).FszAlloc = int32(uint64(szPage+szExtra) + (libc.Uint64FromInt64(56)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
if bPurgeable != 0 {
v2 = int32(1)
} else {
v2 = 0
}
(*TPCache1)(unsafe.Pointer(pCache)).FbPurgeable = v2
_pcache1ResizeHash(tls, pCache)
if bPurgeable != 0 {
(*TPCache1)(unsafe.Pointer(pCache)).FnMin = uint32(10)
*(*uint32)(unsafe.Pointer(pGroup + 12)) += (*TPCache1)(unsafe.Pointer(pCache)).FnMin
(*TPGroup)(unsafe.Pointer(pGroup)).FmxPinned = (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage + uint32(10) - (*TPGroup)(unsafe.Pointer(pGroup)).FnMinPage
(*TPCache1)(unsafe.Pointer(pCache)).FpnPurgeable = pGroup + 20
} else {
(*TPCache1)(unsafe.Pointer(pCache)).FpnPurgeable = pCache + 48
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
if (*TPCache1)(unsafe.Pointer(pCache)).FnHash == uint32(0) {
_pcache1Destroy(tls, pCache)
pCache = uintptr(0)
}
}
return pCache
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xCachesize method.
// **
// ** Configure the cache_size limit for a cache.
// */
func _pcache1Cachesize(tls *libc.TLS, p uintptr, nMax int32) {
var n Tu32
var pCache, pGroup uintptr
_, _, _ = n, pCache, pGroup
pCache = p
if (*TPCache1)(unsafe.Pointer(pCache)).FbPurgeable != 0 {
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
n = uint32(nMax)
if n > uint32(0x7fff0000)-(*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage+(*TPCache1)(unsafe.Pointer(pCache)).FnMax {
n = uint32(0x7fff0000) - (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage + (*TPCache1)(unsafe.Pointer(pCache)).FnMax
}
*(*uint32)(unsafe.Pointer(pGroup + 8)) += n - (*TPCache1)(unsafe.Pointer(pCache)).FnMax
(*TPGroup)(unsafe.Pointer(pGroup)).FmxPinned = (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage + uint32(10) - (*TPGroup)(unsafe.Pointer(pGroup)).FnMinPage
(*TPCache1)(unsafe.Pointer(pCache)).FnMax = n
(*TPCache1)(unsafe.Pointer(pCache)).Fn90pct = (*TPCache1)(unsafe.Pointer(pCache)).FnMax * uint32(9) / uint32(10)
_pcache1EnforceMaxPage(tls, pCache)
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
}
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xShrink method.
// **
// ** Free up as much memory as possible.
// */
func _pcache1Shrink(tls *libc.TLS, p uintptr) {
var pCache, pGroup uintptr
var savedMaxPage uint32
_, _, _ = pCache, pGroup, savedMaxPage
pCache = p
if (*TPCache1)(unsafe.Pointer(pCache)).FbPurgeable != 0 {
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
savedMaxPage = (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage
(*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage = uint32(0)
_pcache1EnforceMaxPage(tls, pCache)
(*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage = savedMaxPage
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
}
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xPagecount method.
// */
func _pcache1Pagecount(tls *libc.TLS, p uintptr) (r int32) {
var n int32
var pCache uintptr
_, _ = n, pCache
pCache = p
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
n = int32((*TPCache1)(unsafe.Pointer(pCache)).FnPage)
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
return n
}
// C documentation
//
// /*
// ** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described
// ** in the header of the pcache1Fetch() procedure.
// **
// ** This steps are broken out into a separate procedure because they are
// ** usually not needed, and by avoiding the stack initialization required
// ** for these steps, the main pcache1Fetch() procedure can run faster.
// */
func _pcache1FetchStage2(tls *libc.TLS, pCache uintptr, iKey uint32, createFlag int32) (r uintptr) {
var h, nPinned uint32
var pGroup, pOther, pPage uintptr
_, _, _, _, _ = h, nPinned, pGroup, pOther, pPage
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
pPage = uintptr(0)
/* Step 3: Abort if createFlag is 1 but the cache is nearly full */
nPinned = (*TPCache1)(unsafe.Pointer(pCache)).FnPage - (*TPCache1)(unsafe.Pointer(pCache)).FnRecyclable
if createFlag == int32(1) && (nPinned >= (*TPGroup)(unsafe.Pointer(pGroup)).FmxPinned || nPinned >= (*TPCache1)(unsafe.Pointer(pCache)).Fn90pct || _pcache1UnderMemoryPressure(tls, pCache) != 0 && (*TPCache1)(unsafe.Pointer(pCache)).FnRecyclable < nPinned) {
return uintptr(0)
}
if (*TPCache1)(unsafe.Pointer(pCache)).FnPage >= (*TPCache1)(unsafe.Pointer(pCache)).FnHash {
_pcache1ResizeHash(tls, pCache)
}
/* Step 4. Try to recycle a page. */
if (*TPCache1)(unsafe.Pointer(pCache)).FbPurgeable != 0 && !((*TPgHdr1)(unsafe.Pointer((*TPGroup)(unsafe.Pointer(pGroup)).Flru.FpLruPrev)).FisAnchor != 0) && ((*TPCache1)(unsafe.Pointer(pCache)).FnPage+uint32(1) >= (*TPCache1)(unsafe.Pointer(pCache)).FnMax || _pcache1UnderMemoryPressure(tls, pCache) != 0) {
pPage = (*TPGroup)(unsafe.Pointer(pGroup)).Flru.FpLruPrev
_pcache1RemoveFromHash(tls, pPage, 0)
_pcache1PinPage(tls, pPage)
pOther = (*TPgHdr1)(unsafe.Pointer(pPage)).FpCache
if (*TPCache1)(unsafe.Pointer(pOther)).FszAlloc != (*TPCache1)(unsafe.Pointer(pCache)).FszAlloc {
_pcache1FreePage(tls, pPage)
pPage = uintptr(0)
} else {
*(*uint32)(unsafe.Pointer(pGroup + 20)) -= uint32((*TPCache1)(unsafe.Pointer(pOther)).FbPurgeable - (*TPCache1)(unsafe.Pointer(pCache)).FbPurgeable)
}
}
/* Step 5. If a usable page buffer has still not been found,
** attempt to allocate a new one.
*/
if !(pPage != 0) {
pPage = _pcache1AllocPage(tls, pCache, libc.BoolInt32(createFlag == int32(1)))
}
if pPage != 0 {
h = iKey % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
(*TPCache1)(unsafe.Pointer(pCache)).FnPage++
(*TPgHdr1)(unsafe.Pointer(pPage)).FiKey = iKey
(*TPgHdr1)(unsafe.Pointer(pPage)).FpNext = *(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(h)*8))
(*TPgHdr1)(unsafe.Pointer(pPage)).FpCache = pCache
(*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext = uintptr(0)
/* pPage->pLruPrev = 0;
** No need to clear pLruPrev since it is not accessed when pLruNext==0 */
*(*uintptr)(unsafe.Pointer((*TPgHdr1)(unsafe.Pointer(pPage)).Fpage.FpExtra)) = uintptr(0)
*(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(h)*8)) = pPage
if iKey > (*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey {
(*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey = iKey
}
}
return pPage
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xFetch method.
// **
// ** Fetch a page by key value.
// **
// ** Whether or not a new page may be allocated by this function depends on
// ** the value of the createFlag argument. 0 means do not allocate a new
// ** page. 1 means allocate a new page if space is easily available. 2
// ** means to try really hard to allocate a new page.
// **
// ** For a non-purgeable cache (a cache used as the storage for an in-memory
// ** database) there is really no difference between createFlag 1 and 2. So
// ** the calling function (pcache.c) will never have a createFlag of 1 on
// ** a non-purgeable cache.
// **
// ** There are three different approaches to obtaining space for a page,
// ** depending on the value of parameter createFlag (which may be 0, 1 or 2).
// **
// ** 1. Regardless of the value of createFlag, the cache is searched for a
// ** copy of the requested page. If one is found, it is returned.
// **
// ** 2. If createFlag==0 and the page is not already in the cache, NULL is
// ** returned.
// **
// ** 3. If createFlag is 1, and the page is not already in the cache, then
// ** return NULL (do not allocate a new page) if any of the following
// ** conditions are true:
// **
// ** (a) the number of pages pinned by the cache is greater than
// ** PCache1.nMax, or
// **
// ** (b) the number of pages pinned by the cache is greater than
// ** the sum of nMax for all purgeable caches, less the sum of
// ** nMin for all other purgeable caches, or
// **
// ** 4. If none of the first three conditions apply and the cache is marked
// ** as purgeable, and if one of the following is true:
// **
// ** (a) The number of pages allocated for the cache is already
// ** PCache1.nMax, or
// **
// ** (b) The number of pages allocated for all purgeable caches is
// ** already equal to or greater than the sum of nMax for all
// ** purgeable caches,
// **
// ** (c) The system is under memory pressure and wants to avoid
// ** unnecessary pages cache entry allocations
// **
// ** then attempt to recycle a page from the LRU list. If it is the right
// ** size, return the recycled buffer. Otherwise, free the buffer and
// ** proceed to step 5.
// **
// ** 5. Otherwise, allocate and return a new page buffer.
// **
// ** There are two versions of this routine. pcache1FetchWithMutex() is
// ** the general case. pcache1FetchNoMutex() is a faster implementation for
// ** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper
// ** invokes the appropriate routine.
// */
func _pcache1FetchNoMutex(tls *libc.TLS, p uintptr, iKey uint32, createFlag int32) (r uintptr) {
var pCache, pPage uintptr
_, _ = pCache, pPage
pCache = p
pPage = uintptr(0)
/* Step 1: Search the hash table for an existing entry. */
pPage = *(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(iKey%(*TPCache1)(unsafe.Pointer(pCache)).FnHash)*8))
for pPage != 0 && (*TPgHdr1)(unsafe.Pointer(pPage)).FiKey != iKey {
pPage = (*TPgHdr1)(unsafe.Pointer(pPage)).FpNext
}
/* Step 2: If the page was found in the hash table, then return it.
** If the page was not in the hash table and createFlag is 0, abort.
** Otherwise (page not in hash and createFlag!=0) continue with
** subsequent steps to try to create the page. */
if pPage != 0 {
if (*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext != uintptr(0) {
return _pcache1PinPage(tls, pPage)
} else {
return pPage
}
} else {
if createFlag != 0 {
/* Steps 3, 4, and 5 implemented by this subroutine */
return _pcache1FetchStage2(tls, pCache, iKey, createFlag)
} else {
return uintptr(0)
}
}
return r
}
func _pcache1FetchWithMutex(tls *libc.TLS, p uintptr, iKey uint32, createFlag int32) (r uintptr) {
var pCache, pPage uintptr
_, _ = pCache, pPage
pCache = p
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
pPage = _pcache1FetchNoMutex(tls, p, iKey, createFlag)
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
return pPage
}
func _pcache1Fetch(tls *libc.TLS, p uintptr, iKey uint32, createFlag int32) (r uintptr) {
var pCache uintptr
_ = pCache
pCache = p
if (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex != 0 {
return _pcache1FetchWithMutex(tls, p, iKey, createFlag)
} else {
return _pcache1FetchNoMutex(tls, p, iKey, createFlag)
}
return r
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xUnpin method.
// **
// ** Mark a page as unpinned (eligible for asynchronous recycling).
// */
func _pcache1Unpin(tls *libc.TLS, p uintptr, pPg uintptr, reuseUnlikely int32) {
var pCache, pGroup, pPage, ppFirst, v1 uintptr
_, _, _, _, _ = pCache, pGroup, pPage, ppFirst, v1
pCache = p
pPage = pPg
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
/* It is an error to call this function if the page is already
** part of the PGroup LRU list.
*/
if reuseUnlikely != 0 || (*TPGroup)(unsafe.Pointer(pGroup)).FnPurgeable > (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage {
_pcache1RemoveFromHash(tls, pPage, int32(1))
} else {
/* Add the page to the PGroup LRU list. */
ppFirst = pGroup + 24 + 40
(*TPgHdr1)(unsafe.Pointer(pPage)).FpLruPrev = pGroup + 24
v1 = *(*uintptr)(unsafe.Pointer(ppFirst))
(*TPgHdr1)(unsafe.Pointer(pPage)).FpLruNext = v1
(*TPgHdr1)(unsafe.Pointer(v1)).FpLruPrev = pPage
*(*uintptr)(unsafe.Pointer(ppFirst)) = pPage
(*TPCache1)(unsafe.Pointer(pCache)).FnRecyclable++
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xRekey method.
// */
func _pcache1Rekey(tls *libc.TLS, p uintptr, pPg uintptr, iOld uint32, iNew uint32) {
var hNew, hOld uint32
var pCache, pPage, pp uintptr
_, _, _, _, _ = hNew, hOld, pCache, pPage, pp
pCache = p
pPage = pPg
/* The page number really is changing */
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
/* pPg really is iOld */
hOld = iOld % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
pp = (*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(hOld)*8
for *(*uintptr)(unsafe.Pointer(pp)) != pPage {
pp = *(*uintptr)(unsafe.Pointer(pp)) + 24
}
*(*uintptr)(unsafe.Pointer(pp)) = (*TPgHdr1)(unsafe.Pointer(pPage)).FpNext
/* iNew not in cache */
hNew = iNew % (*TPCache1)(unsafe.Pointer(pCache)).FnHash
(*TPgHdr1)(unsafe.Pointer(pPage)).FiKey = iNew
(*TPgHdr1)(unsafe.Pointer(pPage)).FpNext = *(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(hNew)*8))
*(*uintptr)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FapHash + uintptr(hNew)*8)) = pPage
if iNew > (*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey {
(*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey = iNew
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xTruncate method.
// **
// ** Discard all unpinned pages in the cache with a page number equal to
// ** or greater than parameter iLimit. Any pinned pages with a page number
// ** equal to or greater than iLimit are implicitly unpinned.
// */
func _pcache1Truncate(tls *libc.TLS, p uintptr, iLimit uint32) {
var pCache uintptr
_ = pCache
pCache = p
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
if iLimit <= (*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey {
_pcache1TruncateUnsafe(tls, pCache, iLimit)
(*TPCache1)(unsafe.Pointer(pCache)).FiMaxKey = iLimit - uint32(1)
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer((*TPCache1)(unsafe.Pointer(pCache)).FpGroup)).Fmutex)
}
// C documentation
//
// /*
// ** Implementation of the sqlite3_pcache.xDestroy method.
// **
// ** Destroy a cache allocated using pcache1Create().
// */
func _pcache1Destroy(tls *libc.TLS, p uintptr) {
var pCache, pGroup uintptr
_, _ = pCache, pGroup
pCache = p
pGroup = (*TPCache1)(unsafe.Pointer(pCache)).FpGroup
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
if (*TPCache1)(unsafe.Pointer(pCache)).FnPage != 0 {
_pcache1TruncateUnsafe(tls, pCache, uint32(0))
}
*(*uint32)(unsafe.Pointer(pGroup + 8)) -= (*TPCache1)(unsafe.Pointer(pCache)).FnMax
*(*uint32)(unsafe.Pointer(pGroup + 12)) -= (*TPCache1)(unsafe.Pointer(pCache)).FnMin
(*TPGroup)(unsafe.Pointer(pGroup)).FmxPinned = (*TPGroup)(unsafe.Pointer(pGroup)).FnMaxPage + uint32(10) - (*TPGroup)(unsafe.Pointer(pGroup)).FnMinPage
_pcache1EnforceMaxPage(tls, pCache)
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer(pGroup)).Fmutex)
Xsqlite3_free(tls, (*TPCache1)(unsafe.Pointer(pCache)).FpBulk)
Xsqlite3_free(tls, (*TPCache1)(unsafe.Pointer(pCache)).FapHash)
Xsqlite3_free(tls, pCache)
}
// C documentation
//
// /*
// ** This function is called during initialization (sqlite3_initialize()) to
// ** install the default pluggable cache module, assuming the user has not
// ** already provided an alternative.
// */
func _sqlite3PCacheSetDefault(tls *libc.TLS) {
bp := tls.Alloc(16)
defer tls.Free(16)
Xsqlite3_config(tls, int32(SQLITE_CONFIG_PCACHE2), libc.VaList(bp+8, uintptr(unsafe.Pointer(&_defaultMethods1))))
}
var _defaultMethods1 = Tsqlite3_pcache_methods2{
FiVersion: int32(1),
}
func init() {
p := unsafe.Pointer(&_defaultMethods1)
*(*uintptr)(unsafe.Add(p, 16)) = __ccgo_fp(_pcache1Init)
*(*uintptr)(unsafe.Add(p, 24)) = __ccgo_fp(_pcache1Shutdown)
*(*uintptr)(unsafe.Add(p, 32)) = __ccgo_fp(_pcache1Create)
*(*uintptr)(unsafe.Add(p, 40)) = __ccgo_fp(_pcache1Cachesize)
*(*uintptr)(unsafe.Add(p, 48)) = __ccgo_fp(_pcache1Pagecount)
*(*uintptr)(unsafe.Add(p, 56)) = __ccgo_fp(_pcache1Fetch)
*(*uintptr)(unsafe.Add(p, 64)) = __ccgo_fp(_pcache1Unpin)
*(*uintptr)(unsafe.Add(p, 72)) = __ccgo_fp(_pcache1Rekey)
*(*uintptr)(unsafe.Add(p, 80)) = __ccgo_fp(_pcache1Truncate)
*(*uintptr)(unsafe.Add(p, 88)) = __ccgo_fp(_pcache1Destroy)
*(*uintptr)(unsafe.Add(p, 96)) = __ccgo_fp(_pcache1Shrink)
}
// C documentation
//
// /*
// ** Return the size of the header on each page of this PCACHE implementation.
// */
func _sqlite3HeaderSizePcache1(tls *libc.TLS) (r int32) {
return int32((libc.Uint64FromInt64(56) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
}
// C documentation
//
// /*
// ** Return the global mutex used by this PCACHE implementation. The
// ** sqlite3_status() routine needs access to this mutex.
// */
func _sqlite3Pcache1Mutex(tls *libc.TLS) (r uintptr) {
return _pcache1_g.Fmutex
}
// C documentation
//
// /*
// ** This function is called to free superfluous dynamically allocated memory
// ** held by the pager system. Memory in use by any SQLite pager allocated
// ** by the current thread may be sqlite3_free()ed.
// **
// ** nReq is the number of bytes of memory required. Once this much has
// ** been released, the function returns. The return value is the total number
// ** of bytes of memory released.
// */
func _sqlite3PcacheReleaseMemory(tls *libc.TLS, nReq int32) (r int32) {
var nFree int32
var p, v1 uintptr
var v2 bool
_, _, _, _ = nFree, p, v1, v2
nFree = 0
if _sqlite3Config.FpPage == uintptr(0) {
Xsqlite3_mutex_enter(tls, (*TPGroup)(unsafe.Pointer(uintptr(unsafe.Pointer(&_pcache1_g)))).Fmutex)
for {
if v2 = nReq < 0 || nFree < nReq; v2 {
v1 = _pcache1_g.Fgrp.Flru.FpLruPrev
p = v1
}
if !(v2 && v1 != uintptr(0) && int32((*TPgHdr1)(unsafe.Pointer(p)).FisAnchor) == 0) {
break
}
nFree += _pcache1MemSize(tls, (*TPgHdr1)(unsafe.Pointer(p)).Fpage.FpBuf)
_pcache1PinPage(tls, p)
_pcache1RemoveFromHash(tls, p, int32(1))
}
Xsqlite3_mutex_leave(tls, (*TPGroup)(unsafe.Pointer(uintptr(unsafe.Pointer(&_pcache1_g)))).Fmutex)
}
return nFree
}
/************** End of pcache1.c *********************************************/
/************** Begin file rowset.c ******************************************/
/*
** 2008 December 3
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This module implements an object we call a "RowSet".
**
** The RowSet object is a collection of rowids. Rowids
** are inserted into the RowSet in an arbitrary order. Inserts
** can be intermixed with tests to see if a given rowid has been
** previously inserted into the RowSet.
**
** After all inserts are finished, it is possible to extract the
** elements of the RowSet in sorted order. Once this extraction
** process has started, no new elements may be inserted.
**
** Hence, the primitive operations for a RowSet are:
**
** CREATE
** INSERT
** TEST
** SMALLEST
** DESTROY
**
** The CREATE and DESTROY primitives are the constructor and destructor,
** obviously. The INSERT primitive adds a new element to the RowSet.
** TEST checks to see if an element is already in the RowSet. SMALLEST
** extracts the least value from the RowSet.
**
** The INSERT primitive might allocate additional memory. Memory is
** allocated in chunks so most INSERTs do no allocation. There is an
** upper bound on the size of allocated memory. No memory is freed
** until DESTROY.
**
** The TEST primitive includes a "batch" number. The TEST primitive
** will only see elements that were inserted before the last change
** in the batch number. In other words, if an INSERT occurs between
** two TESTs where the TESTs have the same batch number, then the
** value added by the INSERT will not be visible to the second TEST.
** The initial batch number is zero, so if the very first TEST contains
** a non-zero batch number, it will see all prior INSERTs.
**
** No INSERTs may occurs after a SMALLEST. An assertion will fail if
** that is attempted.
**
** The cost of an INSERT is roughly constant. (Sometimes new memory
** has to be allocated on an INSERT.) The cost of a TEST with a new
** batch number is O(NlogN) where N is the number of elements in the RowSet.
** The cost of a TEST using the same batch number is O(logN). The cost
** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST
** primitives are constant time. The cost of DESTROY is O(N).
**
** TEST and SMALLEST may not be used by the same RowSet. This used to
** be possible, but the feature was not used, so it was removed in order
** to simplify the code.
*/
/* #include "sqliteInt.h" */
/*
** Target size for allocation chunks.
*/
/*
** The number of rowset entries per allocation chunk.
*/
/*
** Each entry in a RowSet is an instance of the following object.
**
** This same object is reused to store a linked list of trees of RowSetEntry
** objects. In that alternative use, pRight points to the next entry
** in the list, pLeft points to the tree, and v is unused. The
** RowSet.pForest value points to the head of this forest list.
*/
type TRowSetEntry = struct {
Fv Ti64
FpRight uintptr
FpLeft uintptr
}
type RowSetEntry = TRowSetEntry
/*
** RowSetEntry objects are allocated in large chunks (instances of the
** following structure) to reduce memory allocation overhead. The
** chunks are kept on a linked list so that they can be deallocated
** when the RowSet is destroyed.
*/
type TRowSetChunk = struct {
FpNextChunk uintptr
FaEntry [42]TRowSetEntry
}
type RowSetChunk = TRowSetChunk
/*
** A RowSet in an instance of the following structure.
**
** A typedef of this structure if found in sqliteInt.h.
*/
type TRowSet1 = struct {
FpChunk uintptr
Fdb uintptr
FpEntry uintptr
FpLast uintptr
FpFresh uintptr
FpForest uintptr
FnFresh Tu16
FrsFlags Tu16
FiBatch int32
}
type RowSet1 = TRowSet1
/*
** Allowed values for RowSet.rsFlags
*/
// C documentation
//
// /*
// ** Allocate a RowSet object. Return NULL if a memory allocation
// ** error occurs.
// */
func _sqlite3RowSetInit(tls *libc.TLS, db uintptr) (r uintptr) {
var N int32
var p uintptr
_, _ = N, p
p = _sqlite3DbMallocRawNN(tls, db, uint64(56))
if p != 0 {
N = _sqlite3DbMallocSize(tls, db, p)
(*TRowSet)(unsafe.Pointer(p)).FpChunk = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).Fdb = db
(*TRowSet)(unsafe.Pointer(p)).FpEntry = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FpLast = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FpForest = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FpFresh = uintptr((libc.Uint64FromInt64(56)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7))) + p
(*TRowSet)(unsafe.Pointer(p)).FnFresh = uint16((uint64(N) - (libc.Uint64FromInt64(56)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7))) / libc.Uint64FromInt64(24))
(*TRowSet)(unsafe.Pointer(p)).FrsFlags = uint16(ROWSET_SORTED)
(*TRowSet)(unsafe.Pointer(p)).FiBatch = 0
}
return p
}
// C documentation
//
// /*
// ** Deallocate all chunks from a RowSet. This frees all memory that
// ** the RowSet has allocated over its lifetime. This routine is
// ** the destructor for the RowSet.
// */
func _sqlite3RowSetClear(tls *libc.TLS, pArg uintptr) {
var p, pChunk, pNextChunk uintptr
_, _, _ = p, pChunk, pNextChunk
p = pArg
pChunk = (*TRowSet)(unsafe.Pointer(p)).FpChunk
for {
if !(pChunk != 0) {
break
}
pNextChunk = (*TRowSetChunk)(unsafe.Pointer(pChunk)).FpNextChunk
_sqlite3DbFree(tls, (*TRowSet)(unsafe.Pointer(p)).Fdb, pChunk)
goto _1
_1:
;
pChunk = pNextChunk
}
(*TRowSet)(unsafe.Pointer(p)).FpChunk = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FnFresh = uint16(0)
(*TRowSet)(unsafe.Pointer(p)).FpEntry = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FpLast = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FpForest = uintptr(0)
(*TRowSet)(unsafe.Pointer(p)).FrsFlags = uint16(ROWSET_SORTED)
}
// C documentation
//
// /*
// ** Deallocate all chunks from a RowSet. This frees all memory that
// ** the RowSet has allocated over its lifetime. This routine is
// ** the destructor for the RowSet.
// */
func _sqlite3RowSetDelete(tls *libc.TLS, pArg uintptr) {
_sqlite3RowSetClear(tls, pArg)
_sqlite3DbFree(tls, (*TRowSet)(unsafe.Pointer(pArg)).Fdb, pArg)
}
// C documentation
//
// /*
// ** Allocate a new RowSetEntry object that is associated with the
// ** given RowSet. Return a pointer to the new and completely uninitialized
// ** object.
// **
// ** In an OOM situation, the RowSet.db->mallocFailed flag is set and this
// ** routine returns NULL.
// */
func _rowSetEntryAlloc(tls *libc.TLS, p uintptr) (r uintptr) {
var pNew, v1, v2 uintptr
_, _, _ = pNew, v1, v2
if int32((*TRowSet)(unsafe.Pointer(p)).FnFresh) == 0 {
pNew = _sqlite3DbMallocRawNN(tls, (*TRowSet)(unsafe.Pointer(p)).Fdb, uint64(1016))
if pNew == uintptr(0) {
return uintptr(0)
}
(*TRowSetChunk)(unsafe.Pointer(pNew)).FpNextChunk = (*TRowSet)(unsafe.Pointer(p)).FpChunk
(*TRowSet)(unsafe.Pointer(p)).FpChunk = pNew
(*TRowSet)(unsafe.Pointer(p)).FpFresh = pNew + 8
(*TRowSet)(unsafe.Pointer(p)).FnFresh = uint16(uint64(libc.Int32FromInt32(ROWSET_ALLOCATION_SIZE)-libc.Int32FromInt32(8)) / libc.Uint64FromInt64(24))
}
(*TRowSet)(unsafe.Pointer(p)).FnFresh--
v2 = p + 32
v1 = *(*uintptr)(unsafe.Pointer(v2))
*(*uintptr)(unsafe.Pointer(v2)) += 24
return v1
}
// C documentation
//
// /*
// ** Insert a new value into a RowSet.
// **
// ** The mallocFailed flag of the database connection is set if a
// ** memory allocation fails.
// */
func _sqlite3RowSetInsert(tls *libc.TLS, p uintptr, rowid Ti64) {
var pEntry, pLast, p1 uintptr
_, _, _ = pEntry, pLast, p1 /* The last prior entry */
/* This routine is never called after sqlite3RowSetNext() */
pEntry = _rowSetEntryAlloc(tls, p)
if pEntry == uintptr(0) {
return
}
(*TRowSetEntry)(unsafe.Pointer(pEntry)).Fv = rowid
(*TRowSetEntry)(unsafe.Pointer(pEntry)).FpRight = uintptr(0)
pLast = (*TRowSet)(unsafe.Pointer(p)).FpLast
if pLast != 0 {
if rowid <= (*TRowSetEntry)(unsafe.Pointer(pLast)).Fv { /*OPTIMIZATION-IF-FALSE*/
/* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags
** where possible */
p1 = p + 50
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(ROWSET_SORTED))
}
(*TRowSetEntry)(unsafe.Pointer(pLast)).FpRight = pEntry
} else {
(*TRowSet)(unsafe.Pointer(p)).FpEntry = pEntry
}
(*TRowSet)(unsafe.Pointer(p)).FpLast = pEntry
}
// C documentation
//
// /*
// ** Merge two lists of RowSetEntry objects. Remove duplicates.
// **
// ** The input lists are connected via pRight pointers and are
// ** assumed to each already be in sorted order.
// */
func _rowSetEntryMerge(tls *libc.TLS, pA uintptr, pB uintptr) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var pTail, v2, v3 uintptr
var _ /* head at bp+0 */ TRowSetEntry
_, _, _ = pTail, v2, v3
pTail = bp
for {
if (*TRowSetEntry)(unsafe.Pointer(pA)).Fv <= (*TRowSetEntry)(unsafe.Pointer(pB)).Fv {
if (*TRowSetEntry)(unsafe.Pointer(pA)).Fv < (*TRowSetEntry)(unsafe.Pointer(pB)).Fv {
v2 = pA
(*TRowSetEntry)(unsafe.Pointer(pTail)).FpRight = v2
pTail = v2
}
pA = (*TRowSetEntry)(unsafe.Pointer(pA)).FpRight
if pA == uintptr(0) {
(*TRowSetEntry)(unsafe.Pointer(pTail)).FpRight = pB
break
}
} else {
v3 = pB
(*TRowSetEntry)(unsafe.Pointer(pTail)).FpRight = v3
pTail = v3
pB = (*TRowSetEntry)(unsafe.Pointer(pB)).FpRight
if pB == uintptr(0) {
(*TRowSetEntry)(unsafe.Pointer(pTail)).FpRight = pA
break
}
}
goto _1
_1:
}
return (*(*TRowSetEntry)(unsafe.Pointer(bp))).FpRight
}
// C documentation
//
// /*
// ** Sort all elements on the list of RowSetEntry objects into order of
// ** increasing v.
// */
func _rowSetEntrySort(tls *libc.TLS, pIn uintptr) (r uintptr) {
bp := tls.Alloc(320)
defer tls.Free(320)
var i uint32
var pNext, v3 uintptr
var _ /* aBucket at bp+0 */ [40]uintptr
_, _, _ = i, pNext, v3
libc.Xmemset(tls, bp, 0, uint64(320))
for pIn != 0 {
pNext = (*TRowSetEntry)(unsafe.Pointer(pIn)).FpRight
(*TRowSetEntry)(unsafe.Pointer(pIn)).FpRight = uintptr(0)
i = uint32(0)
for {
if !((*(*[40]uintptr)(unsafe.Pointer(bp)))[i] != 0) {
break
}
pIn = _rowSetEntryMerge(tls, (*(*[40]uintptr)(unsafe.Pointer(bp)))[i], pIn)
(*(*[40]uintptr)(unsafe.Pointer(bp)))[i] = uintptr(0)
goto _1
_1:
;
i++
}
(*(*[40]uintptr)(unsafe.Pointer(bp)))[i] = pIn
pIn = pNext
}
pIn = (*(*[40]uintptr)(unsafe.Pointer(bp)))[0]
i = uint32(1)
for {
if !(uint64(i) < libc.Uint64FromInt64(320)/libc.Uint64FromInt64(8)) {
break
}
if (*(*[40]uintptr)(unsafe.Pointer(bp)))[i] == uintptr(0) {
goto _2
}
if pIn != 0 {
v3 = _rowSetEntryMerge(tls, pIn, (*(*[40]uintptr)(unsafe.Pointer(bp)))[i])
} else {
v3 = (*(*[40]uintptr)(unsafe.Pointer(bp)))[i]
}
pIn = v3
goto _2
_2:
;
i++
}
return pIn
}
// C documentation
//
// /*
// ** The input, pIn, is a binary tree (or subtree) of RowSetEntry objects.
// ** Convert this tree into a linked list connected by the pRight pointers
// ** and return pointers to the first and last elements of the new list.
// */
func _rowSetTreeToList(tls *libc.TLS, pIn uintptr, ppFirst uintptr, ppLast uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* p at bp+0 */ uintptr
if (*TRowSetEntry)(unsafe.Pointer(pIn)).FpLeft != 0 {
_rowSetTreeToList(tls, (*TRowSetEntry)(unsafe.Pointer(pIn)).FpLeft, ppFirst, bp)
(*TRowSetEntry)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpRight = pIn
} else {
*(*uintptr)(unsafe.Pointer(ppFirst)) = pIn
}
if (*TRowSetEntry)(unsafe.Pointer(pIn)).FpRight != 0 {
_rowSetTreeToList(tls, (*TRowSetEntry)(unsafe.Pointer(pIn)).FpRight, pIn+8, ppLast)
} else {
*(*uintptr)(unsafe.Pointer(ppLast)) = pIn
}
}
// C documentation
//
// /*
// ** Convert a sorted list of elements (connected by pRight) into a binary
// ** tree with depth of iDepth. A depth of 1 means the tree contains a single
// ** node taken from the head of *ppList. A depth of 2 means a tree with
// ** three nodes. And so forth.
// **
// ** Use as many entries from the input list as required and update the
// ** *ppList to point to the unused elements of the list. If the input
// ** list contains too few elements, then construct an incomplete tree
// ** and leave *ppList set to NULL.
// **
// ** Return a pointer to the root of the constructed binary tree.
// */
func _rowSetNDeepTree(tls *libc.TLS, ppList uintptr, iDepth int32) (r uintptr) {
var p, pLeft, v1 uintptr
_, _, _ = p, pLeft, v1 /* Left subtree */
if *(*uintptr)(unsafe.Pointer(ppList)) == uintptr(0) { /*OPTIMIZATION-IF-TRUE*/
/* Prevent unnecessary deep recursion when we run out of entries */
return uintptr(0)
}
if iDepth > int32(1) { /*OPTIMIZATION-IF-TRUE*/
/* This branch causes a *balanced* tree to be generated. A valid tree
** is still generated without this branch, but the tree is wildly
** unbalanced and inefficient. */
pLeft = _rowSetNDeepTree(tls, ppList, iDepth-int32(1))
p = *(*uintptr)(unsafe.Pointer(ppList))
if p == uintptr(0) { /*OPTIMIZATION-IF-FALSE*/
/* It is safe to always return here, but the resulting tree
** would be unbalanced */
return pLeft
}
(*TRowSetEntry)(unsafe.Pointer(p)).FpLeft = pLeft
*(*uintptr)(unsafe.Pointer(ppList)) = (*TRowSetEntry)(unsafe.Pointer(p)).FpRight
(*TRowSetEntry)(unsafe.Pointer(p)).FpRight = _rowSetNDeepTree(tls, ppList, iDepth-int32(1))
} else {
p = *(*uintptr)(unsafe.Pointer(ppList))
*(*uintptr)(unsafe.Pointer(ppList)) = (*TRowSetEntry)(unsafe.Pointer(p)).FpRight
v1 = libc.UintptrFromInt32(0)
(*TRowSetEntry)(unsafe.Pointer(p)).FpRight = v1
(*TRowSetEntry)(unsafe.Pointer(p)).FpLeft = v1
}
return p
}
// C documentation
//
// /*
// ** Convert a sorted list of elements into a binary tree. Make the tree
// ** as deep as it needs to be in order to contain the entire list.
// */
func _rowSetListToTree(tls *libc.TLS, _pList uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
*(*uintptr)(unsafe.Pointer(bp)) = _pList
var iDepth int32
var p, pLeft, v1 uintptr
_, _, _, _ = iDepth, p, pLeft, v1 /* Left subtree */
p = *(*uintptr)(unsafe.Pointer(bp))
*(*uintptr)(unsafe.Pointer(bp)) = (*TRowSetEntry)(unsafe.Pointer(p)).FpRight
v1 = libc.UintptrFromInt32(0)
(*TRowSetEntry)(unsafe.Pointer(p)).FpRight = v1
(*TRowSetEntry)(unsafe.Pointer(p)).FpLeft = v1
iDepth = int32(1)
for {
if !(*(*uintptr)(unsafe.Pointer(bp)) != 0) {
break
}
pLeft = p
p = *(*uintptr)(unsafe.Pointer(bp))
*(*uintptr)(unsafe.Pointer(bp)) = (*TRowSetEntry)(unsafe.Pointer(p)).FpRight
(*TRowSetEntry)(unsafe.Pointer(p)).FpLeft = pLeft
(*TRowSetEntry)(unsafe.Pointer(p)).FpRight = _rowSetNDeepTree(tls, bp, iDepth)
goto _2
_2:
;
iDepth++
}
return p
}
// C documentation
//
// /*
// ** Extract the smallest element from the RowSet.
// ** Write the element into *pRowid. Return 1 on success. Return
// ** 0 if the RowSet is already empty.
// **
// ** After this routine has been called, the sqlite3RowSetInsert()
// ** routine may not be called again.
// **
// ** This routine may not be called after sqlite3RowSetTest() has
// ** been used. Older versions of RowSet allowed that, but as the
// ** capability was not used by the code generator, it was removed
// ** for code economy.
// */
func _sqlite3RowSetNext(tls *libc.TLS, p uintptr, pRowid uintptr) (r int32) {
var p1 uintptr
_ = p1
/* Cannot be used with sqlite3RowSetText() */
/* Merge the forest into a single sorted list on first call */
if int32((*TRowSet)(unsafe.Pointer(p)).FrsFlags)&int32(ROWSET_NEXT) == 0 { /*OPTIMIZATION-IF-FALSE*/
if int32((*TRowSet)(unsafe.Pointer(p)).FrsFlags)&int32(ROWSET_SORTED) == 0 { /*OPTIMIZATION-IF-FALSE*/
(*TRowSet)(unsafe.Pointer(p)).FpEntry = _rowSetEntrySort(tls, (*TRowSet)(unsafe.Pointer(p)).FpEntry)
}
p1 = p + 50
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | (libc.Int32FromInt32(ROWSET_SORTED) | libc.Int32FromInt32(ROWSET_NEXT)))
}
/* Return the next entry on the list */
if (*TRowSet)(unsafe.Pointer(p)).FpEntry != 0 {
*(*Ti64)(unsafe.Pointer(pRowid)) = (*TRowSetEntry)(unsafe.Pointer((*TRowSet)(unsafe.Pointer(p)).FpEntry)).Fv
(*TRowSet)(unsafe.Pointer(p)).FpEntry = (*TRowSetEntry)(unsafe.Pointer((*TRowSet)(unsafe.Pointer(p)).FpEntry)).FpRight
if (*TRowSet)(unsafe.Pointer(p)).FpEntry == uintptr(0) { /*OPTIMIZATION-IF-TRUE*/
/* Free memory immediately, rather than waiting on sqlite3_finalize() */
_sqlite3RowSetClear(tls, p)
}
return int32(1)
} else {
return 0
}
return r
}
// C documentation
//
// /*
// ** Check to see if element iRowid was inserted into the rowset as
// ** part of any insert batch prior to iBatch. Return 1 or 0.
// **
// ** If this is the first test of a new batch and if there exist entries
// ** on pRowSet->pEntry, then sort those entries into the forest at
// ** pRowSet->pForest so that they can be tested.
// */
func _sqlite3RowSetTest(tls *libc.TLS, pRowSet uintptr, iBatch int32, iRowid Tsqlite3_int64) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var p, pTree, ppPrevTree, v2, p3 uintptr
var _ /* pAux at bp+0 */ uintptr
var _ /* pTail at bp+8 */ uintptr
_, _, _, _, _ = p, pTree, ppPrevTree, v2, p3
/* This routine is never called after sqlite3RowSetNext() */
/* Sort entries into the forest on the first test of a new batch.
** To save unnecessary work, only do this when the batch number changes.
*/
if iBatch != (*TRowSet)(unsafe.Pointer(pRowSet)).FiBatch { /*OPTIMIZATION-IF-FALSE*/
p = (*TRowSet)(unsafe.Pointer(pRowSet)).FpEntry
if p != 0 {
ppPrevTree = pRowSet + 40
if int32((*TRowSet)(unsafe.Pointer(pRowSet)).FrsFlags)&int32(ROWSET_SORTED) == 0 { /*OPTIMIZATION-IF-FALSE*/
/* Only sort the current set of entries if they need it */
p = _rowSetEntrySort(tls, p)
}
pTree = (*TRowSet)(unsafe.Pointer(pRowSet)).FpForest
for {
if !(pTree != 0) {
break
}
ppPrevTree = pTree + 8
if (*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft == uintptr(0) {
(*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft = _rowSetListToTree(tls, p)
break
} else {
_rowSetTreeToList(tls, (*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft, bp, bp+8)
(*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft = uintptr(0)
p = _rowSetEntryMerge(tls, *(*uintptr)(unsafe.Pointer(bp)), p)
}
goto _1
_1:
;
pTree = (*TRowSetEntry)(unsafe.Pointer(pTree)).FpRight
}
if pTree == uintptr(0) {
v2 = _rowSetEntryAlloc(tls, pRowSet)
pTree = v2
*(*uintptr)(unsafe.Pointer(ppPrevTree)) = v2
if pTree != 0 {
(*TRowSetEntry)(unsafe.Pointer(pTree)).Fv = 0
(*TRowSetEntry)(unsafe.Pointer(pTree)).FpRight = uintptr(0)
(*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft = _rowSetListToTree(tls, p)
}
}
(*TRowSet)(unsafe.Pointer(pRowSet)).FpEntry = uintptr(0)
(*TRowSet)(unsafe.Pointer(pRowSet)).FpLast = uintptr(0)
p3 = pRowSet + 50
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(ROWSET_SORTED))
}
(*TRowSet)(unsafe.Pointer(pRowSet)).FiBatch = iBatch
}
/* Test to see if the iRowid value appears anywhere in the forest.
** Return 1 if it does and 0 if not.
*/
pTree = (*TRowSet)(unsafe.Pointer(pRowSet)).FpForest
for {
if !(pTree != 0) {
break
}
p = (*TRowSetEntry)(unsafe.Pointer(pTree)).FpLeft
for p != 0 {
if (*TRowSetEntry)(unsafe.Pointer(p)).Fv < iRowid {
p = (*TRowSetEntry)(unsafe.Pointer(p)).FpRight
} else {
if (*TRowSetEntry)(unsafe.Pointer(p)).Fv > iRowid {
p = (*TRowSetEntry)(unsafe.Pointer(p)).FpLeft
} else {
return int32(1)
}
}
}
goto _4
_4:
;
pTree = (*TRowSetEntry)(unsafe.Pointer(pTree)).FpRight
}
return 0
}
/************** End of rowset.c **********************************************/
/************** Begin file pager.c *******************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This is the implementation of the page cache subsystem or "pager".
**
** The pager is used to access a database disk file. It implements
** atomic commit and rollback through the use of a journal file that
** is separate from the database file. The pager also implements file
** locking to prevent two processes from writing the same database
** file simultaneously, or one process from reading the database while
** another is writing.
*/
/* #include "sqliteInt.h" */
/************** Include wal.h in the middle of pager.c ***********************/
/************** Begin file wal.h *********************************************/
/*
** 2010 February 1
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This header file defines the interface to the write-ahead logging
** system. Refer to the comments below and the header comment attached to
** the implementation of each function in log.c for further details.
*/
/* #include "sqliteInt.h" */
/* Macros for extracting appropriate sync flags for either transaction
** commits (WAL_SYNC_FLAGS(X)) or for checkpoint ops (CKPT_SYNC_FLAGS(X)):
*/
// C documentation
//
// /* Connection to a write-ahead log (WAL) file.
// ** There is one object of this type for each pager.
// */
type TWal = struct {
FpVfs uintptr
FpDbFd uintptr
FpWalFd uintptr
FiCallback Tu32
FmxWalSize Ti64
FnWiData int32
FszFirstBlock int32
FapWiData uintptr
FszPage Tu32
FreadLock Ti16
FsyncFlags Tu8
FexclusiveMode Tu8
FwriteLock Tu8
FckptLock Tu8
FreadOnly Tu8
FtruncateOnCommit Tu8
FsyncHeader Tu8
FpadToSectorBoundary Tu8
FbShmUnreliable Tu8
Fhdr TWalIndexHdr
FminFrame Tu32
FiReCksum Tu32
FzWalName uintptr
FnCkpt Tu32
FpSnapshot uintptr
}
type Wal = TWal
/************** End of wal.h *************************************************/
/************** Continuing where we left off in pager.c **********************/
/******************* NOTES ON THE DESIGN OF THE PAGER ************************
**
** This comment block describes invariants that hold when using a rollback
** journal. These invariants do not apply for journal_mode=WAL,
** journal_mode=MEMORY, or journal_mode=OFF.
**
** Within this comment block, a page is deemed to have been synced
** automatically as soon as it is written when PRAGMA synchronous=OFF.
** Otherwise, the page is not synced until the xSync method of the VFS
** is called successfully on the file containing the page.
**
** Definition: A page of the database file is said to be "overwriteable" if
** one or more of the following are true about the page:
**
** (a) The original content of the page as it was at the beginning of
** the transaction has been written into the rollback journal and
** synced.
**
** (b) The page was a freelist leaf page at the start of the transaction.
**
** (c) The page number is greater than the largest page that existed in
** the database file at the start of the transaction.
**
** (1) A page of the database file is never overwritten unless one of the
** following are true:
**
** (a) The page and all other pages on the same sector are overwriteable.
**
** (b) The atomic page write optimization is enabled, and the entire
** transaction other than the update of the transaction sequence
** number consists of a single page change.
**
** (2) The content of a page written into the rollback journal exactly matches
** both the content in the database when the rollback journal was written
** and the content in the database at the beginning of the current
** transaction.
**
** (3) Writes to the database file are an integer multiple of the page size
** in length and are aligned on a page boundary.
**
** (4) Reads from the database file are either aligned on a page boundary and
** an integer multiple of the page size in length or are taken from the
** first 100 bytes of the database file.
**
** (5) All writes to the database file are synced prior to the rollback journal
** being deleted, truncated, or zeroed.
**
** (6) If a super-journal file is used, then all writes to the database file
** are synced prior to the super-journal being deleted.
**
** Definition: Two databases (or the same database at two points it time)
** are said to be "logically equivalent" if they give the same answer to
** all queries. Note in particular the content of freelist leaf
** pages can be changed arbitrarily without affecting the logical equivalence
** of the database.
**
** (7) At any time, if any subset, including the empty set and the total set,
** of the unsynced changes to a rollback journal are removed and the
** journal is rolled back, the resulting database file will be logically
** equivalent to the database file at the beginning of the transaction.
**
** (8) When a transaction is rolled back, the xTruncate method of the VFS
** is called to restore the database file to the same size it was at
** the beginning of the transaction. (In some VFSes, the xTruncate
** method is a no-op, but that does not change the fact the SQLite will
** invoke it.)
**
** (9) Whenever the database file is modified, at least one bit in the range
** of bytes from 24 through 39 inclusive will be changed prior to releasing
** the EXCLUSIVE lock, thus signaling other connections on the same
** database to flush their caches.
**
** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less
** than one billion transactions.
**
** (11) A database file is well-formed at the beginning and at the conclusion
** of every transaction.
**
** (12) An EXCLUSIVE lock is held on the database file when writing to
** the database file.
**
** (13) A SHARED lock is held on the database file while reading any
** content out of the database file.
**
******************************************************************************/
/*
** Macros for troubleshooting. Normally turned off
*/
/*
** The following two macros are used within the PAGERTRACE() macros above
** to print out file-descriptors.
**
** PAGERID() takes a pointer to a Pager struct as its argument. The
** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file
** struct as its argument.
*/
/*
** The Pager.eState variable stores the current 'state' of a pager. A
** pager may be in any one of the seven states shown in the following
** state diagram.
**
** OPEN <------+------+
** | | |
** V | |
** +---------> READER-------+ |
** | | |
** | V |
** |<-------WRITER_LOCKED------> ERROR
** | | ^
** | V |
** |<------WRITER_CACHEMOD-------->|
** | | |
** | V |
** |<-------WRITER_DBMOD---------->|
** | | |
** | V |
** +<------WRITER_FINISHED-------->+
**
**
** List of state transitions and the C [function] that performs each:
**
** OPEN -> READER [sqlite3PagerSharedLock]
** READER -> OPEN [pager_unlock]
**
** READER -> WRITER_LOCKED [sqlite3PagerBegin]
** WRITER_LOCKED -> WRITER_CACHEMOD [pager_open_journal]
** WRITER_CACHEMOD -> WRITER_DBMOD [syncJournal]
** WRITER_DBMOD -> WRITER_FINISHED [sqlite3PagerCommitPhaseOne]
** WRITER_*** -> READER [pager_end_transaction]
**
** WRITER_*** -> ERROR [pager_error]
** ERROR -> OPEN [pager_unlock]
**
**
** OPEN:
**
** The pager starts up in this state. Nothing is guaranteed in this
** state - the file may or may not be locked and the database size is
** unknown. The database may not be read or written.
**
** * No read or write transaction is active.
** * Any lock, or no lock at all, may be held on the database file.
** * The dbSize, dbOrigSize and dbFileSize variables may not be trusted.
**
** READER:
**
** In this state all the requirements for reading the database in
** rollback (non-WAL) mode are met. Unless the pager is (or recently
** was) in exclusive-locking mode, a user-level read transaction is
** open. The database size is known in this state.
**
** A connection running with locking_mode=normal enters this state when
** it opens a read-transaction on the database and returns to state
** OPEN after the read-transaction is completed. However a connection
** running in locking_mode=exclusive (including temp databases) remains in
** this state even after the read-transaction is closed. The only way
** a locking_mode=exclusive connection can transition from READER to OPEN
** is via the ERROR state (see below).
**
** * A read transaction may be active (but a write-transaction cannot).
** * A SHARED or greater lock is held on the database file.
** * The dbSize variable may be trusted (even if a user-level read
** transaction is not active). The dbOrigSize and dbFileSize variables
** may not be trusted at this point.
** * If the database is a WAL database, then the WAL connection is open.
** * Even if a read-transaction is not open, it is guaranteed that
** there is no hot-journal in the file-system.
**
** WRITER_LOCKED:
**
** The pager moves to this state from READER when a write-transaction
** is first opened on the database. In WRITER_LOCKED state, all locks
** required to start a write-transaction are held, but no actual
** modifications to the cache or database have taken place.
**
** In rollback mode, a RESERVED or (if the transaction was opened with
** BEGIN EXCLUSIVE) EXCLUSIVE lock is obtained on the database file when
** moving to this state, but the journal file is not written to or opened
** to in this state. If the transaction is committed or rolled back while
** in WRITER_LOCKED state, all that is required is to unlock the database
** file.
**
** IN WAL mode, WalBeginWriteTransaction() is called to lock the log file.
** If the connection is running with locking_mode=exclusive, an attempt
** is made to obtain an EXCLUSIVE lock on the database file.
**
** * A write transaction is active.
** * If the connection is open in rollback-mode, a RESERVED or greater
** lock is held on the database file.
** * If the connection is open in WAL-mode, a WAL write transaction
** is open (i.e. sqlite3WalBeginWriteTransaction() has been successfully
** called).
** * The dbSize, dbOrigSize and dbFileSize variables are all valid.
** * The contents of the pager cache have not been modified.
** * The journal file may or may not be open.
** * Nothing (not even the first header) has been written to the journal.
**
** WRITER_CACHEMOD:
**
** A pager moves from WRITER_LOCKED state to this state when a page is
** first modified by the upper layer. In rollback mode the journal file
** is opened (if it is not already open) and a header written to the
** start of it. The database file on disk has not been modified.
**
** * A write transaction is active.
** * A RESERVED or greater lock is held on the database file.
** * The journal file is open and the first header has been written
** to it, but the header has not been synced to disk.
** * The contents of the page cache have been modified.
**
** WRITER_DBMOD:
**
** The pager transitions from WRITER_CACHEMOD into WRITER_DBMOD state
** when it modifies the contents of the database file. WAL connections
** never enter this state (since they do not modify the database file,
** just the log file).
**
** * A write transaction is active.
** * An EXCLUSIVE or greater lock is held on the database file.
** * The journal file is open and the first header has been written
** and synced to disk.
** * The contents of the page cache have been modified (and possibly
** written to disk).
**
** WRITER_FINISHED:
**
** It is not possible for a WAL connection to enter this state.
**
** A rollback-mode pager changes to WRITER_FINISHED state from WRITER_DBMOD
** state after the entire transaction has been successfully written into the
** database file. In this state the transaction may be committed simply
** by finalizing the journal file. Once in WRITER_FINISHED state, it is
** not possible to modify the database further. At this point, the upper
** layer must either commit or rollback the transaction.
**
** * A write transaction is active.
** * An EXCLUSIVE or greater lock is held on the database file.
** * All writing and syncing of journal and database data has finished.
** If no error occurred, all that remains is to finalize the journal to
** commit the transaction. If an error did occur, the caller will need
** to rollback the transaction.
**
** ERROR:
**
** The ERROR state is entered when an IO or disk-full error (including
** SQLITE_IOERR_NOMEM) occurs at a point in the code that makes it
** difficult to be sure that the in-memory pager state (cache contents,
** db size etc.) are consistent with the contents of the file-system.
**
** Temporary pager files may enter the ERROR state, but in-memory pagers
** cannot.
**
** For example, if an IO error occurs while performing a rollback,
** the contents of the page-cache may be left in an inconsistent state.
** At this point it would be dangerous to change back to READER state
** (as usually happens after a rollback). Any subsequent readers might
** report database corruption (due to the inconsistent cache), and if
** they upgrade to writers, they may inadvertently corrupt the database
** file. To avoid this hazard, the pager switches into the ERROR state
** instead of READER following such an error.
**
** Once it has entered the ERROR state, any attempt to use the pager
** to read or write data returns an error. Eventually, once all
** outstanding transactions have been abandoned, the pager is able to
** transition back to OPEN state, discarding the contents of the
** page-cache and any other in-memory state at the same time. Everything
** is reloaded from disk (and, if necessary, hot-journal rollback performed)
** when a read-transaction is next opened on the pager (transitioning
** the pager into READER state). At that point the system has recovered
** from the error.
**
** Specifically, the pager jumps into the ERROR state if:
**
** 1. An error occurs while attempting a rollback. This happens in
** function sqlite3PagerRollback().
**
** 2. An error occurs while attempting to finalize a journal file
** following a commit in function sqlite3PagerCommitPhaseTwo().
**
** 3. An error occurs while attempting to write to the journal or
** database file in function pagerStress() in order to free up
** memory.
**
** In other cases, the error is returned to the b-tree layer. The b-tree
** layer then attempts a rollback operation. If the error condition
** persists, the pager enters the ERROR state via condition (1) above.
**
** Condition (3) is necessary because it can be triggered by a read-only
** statement executed within a transaction. In this case, if the error
** code were simply returned to the user, the b-tree layer would not
** automatically attempt a rollback, as it assumes that an error in a
** read-only statement cannot leave the pager in an internally inconsistent
** state.
**
** * The Pager.errCode variable is set to something other than SQLITE_OK.
** * There are one or more outstanding references to pages (after the
** last reference is dropped the pager should move back to OPEN state).
** * The pager is not an in-memory pager.
**
**
** Notes:
**
** * A pager is never in WRITER_DBMOD or WRITER_FINISHED state if the
** connection is open in WAL mode. A WAL connection is always in one
** of the first four states.
**
** * Normally, a connection open in exclusive mode is never in PAGER_OPEN
** state. There are two exceptions: immediately after exclusive-mode has
** been turned on (and before any read or write transactions are
** executed), and when the pager is leaving the "error state".
**
** * See also: assert_pager_state().
*/
/*
** The Pager.eLock variable is almost always set to one of the
** following locking-states, according to the lock currently held on
** the database file: NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK.
** This variable is kept up to date as locks are taken and released by
** the pagerLockDb() and pagerUnlockDb() wrappers.
**
** If the VFS xLock() or xUnlock() returns an error other than SQLITE_BUSY
** (i.e. one of the SQLITE_IOERR subtypes), it is not clear whether or not
** the operation was successful. In these circumstances pagerLockDb() and
** pagerUnlockDb() take a conservative approach - eLock is always updated
** when unlocking the file, and only updated when locking the file if the
** VFS call is successful. This way, the Pager.eLock variable may be set
** to a less exclusive (lower) value than the lock that is actually held
** at the system level, but it is never set to a more exclusive value.
**
** This is usually safe. If an xUnlock fails or appears to fail, there may
** be a few redundant xLock() calls or a lock may be held for longer than
** required, but nothing really goes wrong.
**
** The exception is when the database file is unlocked as the pager moves
** from ERROR to OPEN state. At this point there may be a hot-journal file
** in the file-system that needs to be rolled back (as part of an OPEN->SHARED
** transition, by the same pager or any other). If the call to xUnlock()
** fails at this point and the pager is left holding an EXCLUSIVE lock, this
** can confuse the call to xCheckReservedLock() call made later as part
** of hot-journal detection.
**
** xCheckReservedLock() is defined as returning true "if there is a RESERVED
** lock held by this process or any others". So xCheckReservedLock may
** return true because the caller itself is holding an EXCLUSIVE lock (but
** doesn't know it because of a previous error in xUnlock). If this happens
** a hot-journal may be mistaken for a journal being created by an active
** transaction in another process, causing SQLite to read from the database
** without rolling it back.
**
** To work around this, if a call to xUnlock() fails when unlocking the
** database in the ERROR state, Pager.eLock is set to UNKNOWN_LOCK. It
** is only changed back to a real locking state after a successful call
** to xLock(EXCLUSIVE). Also, the code to do the OPEN->SHARED state transition
** omits the check for a hot-journal if Pager.eLock is set to UNKNOWN_LOCK
** lock. Instead, it assumes a hot-journal exists and obtains an EXCLUSIVE
** lock on the database file before attempting to roll it back. See function
** PagerSharedLock() for more detail.
**
** Pager.eLock may only be set to UNKNOWN_LOCK when the pager is in
** PAGER_OPEN state.
*/
/*
** The maximum allowed sector size. 64KiB. If the xSectorsize() method
** returns a value larger than this, then MAX_SECTOR_SIZE is used instead.
** This could conceivably cause corruption following a power failure on
** such a system. This is currently an undocumented limit.
*/
// C documentation
//
// /*
// ** An instance of the following structure is allocated for each active
// ** savepoint and statement transaction in the system. All such structures
// ** are stored in the Pager.aSavepoint[] array, which is allocated and
// ** resized using sqlite3Realloc().
// **
// ** When a savepoint is created, the PagerSavepoint.iHdrOffset field is
// ** set to 0. If a journal-header is written into the main journal while
// ** the savepoint is active, then iHdrOffset is set to the byte offset
// ** immediately following the last journal record written into the main
// ** journal before the journal-header. This is required during savepoint
// ** rollback (see pagerPlaybackSavepoint()).
// */
type TPagerSavepoint = struct {
FiOffset Ti64
FiHdrOffset Ti64
FpInSavepoint uintptr
FnOrig TPgno
FiSubRec TPgno
FbTruncateOnRelease int32
FaWalData [4]Tu32
}
type PagerSavepoint = TPagerSavepoint
type TPagerSavepoint1 = struct {
FiOffset Ti64
FiHdrOffset Ti64
FpInSavepoint uintptr
FnOrig TPgno
FiSubRec TPgno
FbTruncateOnRelease int32
FaWalData [4]Tu32
}
type PagerSavepoint1 = TPagerSavepoint1
/*
** Bits of the Pager.doNotSpill flag. See further description below.
*/
/*
** An open page cache is an instance of struct Pager. A description of
** some of the more important member variables follows:
**
** eState
**
** The current 'state' of the pager object. See the comment and state
** diagram above for a description of the pager state.
**
** eLock
**
** For a real on-disk database, the current lock held on the database file -
** NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK.
**
** For a temporary or in-memory database (neither of which require any
** locks), this variable is always set to EXCLUSIVE_LOCK. Since such
** databases always have Pager.exclusiveMode==1, this tricks the pager
** logic into thinking that it already has all the locks it will ever
** need (and no reason to release them).
**
** In some (obscure) circumstances, this variable may also be set to
** UNKNOWN_LOCK. See the comment above the #define of UNKNOWN_LOCK for
** details.
**
** changeCountDone
**
** This boolean variable is used to make sure that the change-counter
** (the 4-byte header field at byte offset 24 of the database file) is
** not updated more often than necessary.
**
** It is set to true when the change-counter field is updated, which
** can only happen if an exclusive lock is held on the database file.
** It is cleared (set to false) whenever an exclusive lock is
** relinquished on the database file. Each time a transaction is committed,
** The changeCountDone flag is inspected. If it is true, the work of
** updating the change-counter is omitted for the current transaction.
**
** This mechanism means that when running in exclusive mode, a connection
** need only update the change-counter once, for the first transaction
** committed.
**
** setSuper
**
** When PagerCommitPhaseOne() is called to commit a transaction, it may
** (or may not) specify a super-journal name to be written into the
** journal file before it is synced to disk.
**
** Whether or not a journal file contains a super-journal pointer affects
** the way in which the journal file is finalized after the transaction is
** committed or rolled back when running in "journal_mode=PERSIST" mode.
** If a journal file does not contain a super-journal pointer, it is
** finalized by overwriting the first journal header with zeroes. If
** it does contain a super-journal pointer the journal file is finalized
** by truncating it to zero bytes, just as if the connection were
** running in "journal_mode=truncate" mode.
**
** Journal files that contain super-journal pointers cannot be finalized
** simply by overwriting the first journal-header with zeroes, as the
** super-journal pointer could interfere with hot-journal rollback of any
** subsequently interrupted transaction that reuses the journal file.
**
** The flag is cleared as soon as the journal file is finalized (either
** by PagerCommitPhaseTwo or PagerRollback). If an IO error prevents the
** journal file from being successfully finalized, the setSuper flag
** is cleared anyway (and the pager will move to ERROR state).
**
** doNotSpill
**
** This variables control the behavior of cache-spills (calls made by
** the pcache module to the pagerStress() routine to write cached data
** to the file-system in order to free up memory).
**
** When bits SPILLFLAG_OFF or SPILLFLAG_ROLLBACK of doNotSpill are set,
** writing to the database from pagerStress() is disabled altogether.
** The SPILLFLAG_ROLLBACK case is done in a very obscure case that
** comes up during savepoint rollback that requires the pcache module
** to allocate a new page to prevent the journal file from being written
** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF
** case is a user preference.
**
** If the SPILLFLAG_NOSYNC bit is set, writing to the database from
** pagerStress() is permitted, but syncing the journal file is not.
** This flag is set by sqlite3PagerWrite() when the file-system sector-size
** is larger than the database page-size in order to prevent a journal sync
** from happening in between the journalling of two pages on the same sector.
**
** subjInMemory
**
** This is a boolean variable. If true, then any required sub-journal
** is opened as an in-memory journal file. If false, then in-memory
** sub-journals are only used for in-memory pager files.
**
** This variable is updated by the upper layer each time a new
** write-transaction is opened.
**
** dbSize, dbOrigSize, dbFileSize
**
** Variable dbSize is set to the number of pages in the database file.
** It is valid in PAGER_READER and higher states (all states except for
** OPEN and ERROR).
**
** dbSize is set based on the size of the database file, which may be
** larger than the size of the database (the value stored at offset
** 28 of the database header by the btree). If the size of the file
** is not an integer multiple of the page-size, the value stored in
** dbSize is rounded down (i.e. a 5KB file with 2K page-size has dbSize==2).
** Except, any file that is greater than 0 bytes in size is considered
** to have at least one page. (i.e. a 1KB file with 2K page-size leads
** to dbSize==1).
**
** During a write-transaction, if pages with page-numbers greater than
** dbSize are modified in the cache, dbSize is updated accordingly.
** Similarly, if the database is truncated using PagerTruncateImage(),
** dbSize is updated.
**
** Variables dbOrigSize and dbFileSize are valid in states
** PAGER_WRITER_LOCKED and higher. dbOrigSize is a copy of the dbSize
** variable at the start of the transaction. It is used during rollback,
** and to determine whether or not pages need to be journalled before
** being modified.
**
** Throughout a write-transaction, dbFileSize contains the size of
** the file on disk in pages. It is set to a copy of dbSize when the
** write-transaction is first opened, and updated when VFS calls are made
** to write or truncate the database file on disk.
**
** The only reason the dbFileSize variable is required is to suppress
** unnecessary calls to xTruncate() after committing a transaction. If,
** when a transaction is committed, the dbFileSize variable indicates
** that the database file is larger than the database image (Pager.dbSize),
** pager_truncate() is called. The pager_truncate() call uses xFilesize()
** to measure the database file on disk, and then truncates it if required.
** dbFileSize is not used when rolling back a transaction. In this case
** pager_truncate() is called unconditionally (which means there may be
** a call to xFilesize() that is not strictly required). In either case,
** pager_truncate() may cause the file to become smaller or larger.
**
** dbHintSize
**
** The dbHintSize variable is used to limit the number of calls made to
** the VFS xFileControl(FCNTL_SIZE_HINT) method.
**
** dbHintSize is set to a copy of the dbSize variable when a
** write-transaction is opened (at the same time as dbFileSize and
** dbOrigSize). If the xFileControl(FCNTL_SIZE_HINT) method is called,
** dbHintSize is increased to the number of pages that correspond to the
** size-hint passed to the method call. See pager_write_pagelist() for
** details.
**
** errCode
**
** The Pager.errCode variable is only ever used in PAGER_ERROR state. It
** is set to zero in all other states. In PAGER_ERROR state, Pager.errCode
** is always set to SQLITE_FULL, SQLITE_IOERR or one of the SQLITE_IOERR_XXX
** sub-codes.
**
** syncFlags, walSyncFlags
**
** syncFlags is either SQLITE_SYNC_NORMAL (0x02) or SQLITE_SYNC_FULL (0x03).
** syncFlags is used for rollback mode. walSyncFlags is used for WAL mode
** and contains the flags used to sync the checkpoint operations in the
** lower two bits, and sync flags used for transaction commits in the WAL
** file in bits 0x04 and 0x08. In other words, to get the correct sync flags
** for checkpoint operations, use (walSyncFlags&0x03) and to get the correct
** sync flags for transaction commit, use ((walSyncFlags>>2)&0x03). Note
** that with synchronous=NORMAL in WAL mode, transaction commit is not synced
** meaning that the 0x04 and 0x08 bits are both zero.
*/
type TPager1 = struct {
FpVfs uintptr
FexclusiveMode Tu8
FjournalMode Tu8
FuseJournal Tu8
FnoSync Tu8
FfullSync Tu8
FextraSync Tu8
FsyncFlags Tu8
FwalSyncFlags Tu8
FtempFile Tu8
FnoLock Tu8
FreadOnly Tu8
FmemDb Tu8
FmemVfs Tu8
FeState Tu8
FeLock Tu8
FchangeCountDone Tu8
FsetSuper Tu8
FdoNotSpill Tu8
FsubjInMemory Tu8
FbUseFetch Tu8
FhasHeldSharedLock Tu8
FdbSize TPgno
FdbOrigSize TPgno
FdbFileSize TPgno
FdbHintSize TPgno
FerrCode int32
FnRec int32
FcksumInit Tu32
FnSubRec Tu32
FpInJournal uintptr
Ffd uintptr
Fjfd uintptr
Fsjfd uintptr
FjournalOff Ti64
FjournalHdr Ti64
FpBackup uintptr
FaSavepoint uintptr
FnSavepoint int32
FiDataVersion Tu32
FdbFileVers [16]int8
FnMmapOut int32
FszMmap Tsqlite3_int64
FpMmapFreelist uintptr
FnExtra Tu16
FnReserve Ti16
FvfsFlags Tu32
FsectorSize Tu32
FmxPgno TPgno
FlckPgno TPgno
FpageSize Ti64
FjournalSizeLimit Ti64
FzFilename uintptr
FzJournal uintptr
FxBusyHandler uintptr
FpBusyHandlerArg uintptr
FaStat [4]Tu32
FxReiniter uintptr
FxGet uintptr
FpTmpSpace uintptr
FpPCache uintptr
FpWal uintptr
FzWal uintptr
}
type Pager1 = TPager1
/*
** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains
** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS
** or CACHE_WRITE to sqlite3_db_status().
*/
/*
** The following global variables hold counters used for
** testing purposes only. These variables do not exist in
** a non-testing build. These variables are not thread-safe.
*/
// C documentation
//
// /*
// ** Journal files begin with the following magic string. The data
// ** was obtained from /dev/random. It is used only as a sanity check.
// **
// ** Since version 2.8.0, the journal format contains additional sanity
// ** checking information. If the power fails while the journal is being
// ** written, semi-random garbage data might appear in the journal
// ** file after power is restored. If an attempt is then made
// ** to roll the journal back, the database could be corrupted. The additional
// ** sanity checking data is an attempt to discover the garbage in the
// ** journal and ignore it.
// **
// ** The sanity checking information for the new journal format consists
// ** of a 32-bit checksum on each page of data. The checksum covers both
// ** the page number and the pPager->pageSize bytes of data for the page.
// ** This cksum is initialized to a 32-bit random value that appears in the
// ** journal file right after the header. The random initializer is important,
// ** because garbage data that appears at the end of a journal is likely
// ** data that was once in other files that have now been deleted. If the
// ** garbage data came from an obsolete journal file, the checksums might
// ** be correct. But by initializing the checksum to random value which
// ** is different for every journal, we minimize that risk.
// */
var _aJournalMagic = [8]uint8{
0: uint8(0xd9),
1: uint8(0xd5),
2: uint8(0x05),
3: uint8(0xf9),
4: uint8(0x20),
5: uint8(0xa1),
6: uint8(0x63),
7: uint8(0xd7),
}
/*
** The size of the of each page record in the journal is given by
** the following macro.
*/
/*
** The journal header size for this pager. This is usually the same
** size as a single disk sector. See also setSectorSize().
*/
/*
** The macro MEMDB is true if we are dealing with an in-memory database.
** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set,
** the value of MEMDB will be a constant and the compiler will optimize
** out code that would never execute.
*/
/*
** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch
** interfaces to access the database using memory-mapped I/O.
*/
/*
** The argument to this macro is a file descriptor (type sqlite3_file*).
** Return 0 if it is not open, or non-zero (but not 1) if it is.
**
** This is so that expressions can be written as:
**
** if( isOpen(pPager->jfd) ){ ...
**
** instead of
**
** if( pPager->jfd->pMethods ){ ...
*/
// C documentation
//
// /*
// ** Return true if page pgno can be read directly from the database file
// ** by the b-tree layer. This is the case if:
// **
// ** * the database file is open,
// ** * there are no dirty pages in the cache, and
// ** * the desired page is not currently in the wal file.
// */
func _sqlite3PagerDirectReadOk(tls *libc.TLS, pPager uintptr, pgno TPgno) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* iRead at bp+0 */ Tu32
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods == uintptr(0) {
return 0
}
if _sqlite3PCacheIsDirty(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache) != 0 {
return 0
}
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0)
_sqlite3WalFindFrame(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, pgno, bp)
return libc.BoolInt32(*(*Tu32)(unsafe.Pointer(bp)) == uint32(0))
}
return int32(1)
}
// C documentation
//
// /*
// ** Set the Pager.xGet method for the appropriate routine used to fetch
// ** content from the pager.
// */
func _setGetterMethod(tls *libc.TLS, pPager uintptr) {
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
(*TPager)(unsafe.Pointer(pPager)).FxGet = __ccgo_fp(_getPageError)
} else {
if (*TPager)(unsafe.Pointer(pPager)).FbUseFetch != 0 {
(*TPager)(unsafe.Pointer(pPager)).FxGet = __ccgo_fp(_getPageMMap)
} else {
(*TPager)(unsafe.Pointer(pPager)).FxGet = __ccgo_fp(_getPageNormal)
}
}
}
// C documentation
//
// /*
// ** Return true if it is necessary to write page *pPg into the sub-journal.
// ** A page needs to be written into the sub-journal if there exists one
// ** or more open savepoints for which:
// **
// ** * The page-number is less than or equal to PagerSavepoint.nOrig, and
// ** * The bit corresponding to the page-number is not set in
// ** PagerSavepoint.pInSavepoint.
// */
func _subjRequiresPage(tls *libc.TLS, pPg uintptr) (r int32) {
var i int32
var p, pPager uintptr
var pgno TPgno
_, _, _, _ = i, p, pPager, pgno
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
pgno = (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno
i = 0
for {
if !(i < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
p = (*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(i)*56
if (*TPagerSavepoint)(unsafe.Pointer(p)).FnOrig >= pgno && 0 == _sqlite3BitvecTestNotNull(tls, (*TPagerSavepoint)(unsafe.Pointer(p)).FpInSavepoint, pgno) {
i = i + int32(1)
for {
if !(i < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
(*(*TPagerSavepoint)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(i)*56))).FbTruncateOnRelease = 0
goto _2
_2:
;
i++
}
return int32(1)
}
goto _1
_1:
;
i++
}
return 0
}
// C documentation
//
// /*
// ** Read a 32-bit integer from the given file descriptor. Store the integer
// ** that is read in *pRes. Return SQLITE_OK if everything worked, or an
// ** error code is something goes wrong.
// **
// ** All values are stored on disk as big-endian.
// */
func _read32bits(tls *libc.TLS, fd uintptr, offset Ti64, pRes uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* ac at bp+0 */ [4]uint8
_ = rc
rc = _sqlite3OsRead(tls, fd, bp, int32(4), offset)
if rc == SQLITE_OK {
*(*Tu32)(unsafe.Pointer(pRes)) = _sqlite3Get4byte(tls, bp)
}
return rc
}
/*
** Write a 32-bit integer into a string buffer in big-endian byte order.
*/
// C documentation
//
// /*
// ** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK
// ** on success or an error code is something goes wrong.
// */
func _write32bits(tls *libc.TLS, fd uintptr, offset Ti64, val Tu32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* ac at bp+0 */ [4]int8
_sqlite3Put4byte(tls, bp, val)
return _sqlite3OsWrite(tls, fd, bp, int32(4), offset)
}
// C documentation
//
// /*
// ** Unlock the database file to level eLock, which must be either NO_LOCK
// ** or SHARED_LOCK. Regardless of whether or not the call to xUnlock()
// ** succeeds, set the Pager.eLock variable to match the (attempted) new lock.
// **
// ** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is
// ** called, do not modify it. See the comment above the #define of
// ** UNKNOWN_LOCK for an explanation of this.
// */
func _pagerUnlockDb(tls *libc.TLS, pPager uintptr, eLock int32) (r int32) {
var rc, v1 int32
_, _ = rc, v1
rc = SQLITE_OK
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) {
if (*TPager)(unsafe.Pointer(pPager)).FnoLock != 0 {
v1 = SQLITE_OK
} else {
v1 = _sqlite3OsUnlock(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, eLock)
}
rc = v1
if int32((*TPager)(unsafe.Pointer(pPager)).FeLock) != libc.Int32FromInt32(EXCLUSIVE_LOCK)+libc.Int32FromInt32(1) {
(*TPager)(unsafe.Pointer(pPager)).FeLock = uint8(eLock)
}
}
(*TPager)(unsafe.Pointer(pPager)).FchangeCountDone = (*TPager)(unsafe.Pointer(pPager)).FtempFile /* ticket fb3b3024ea238d5c */
return rc
}
// C documentation
//
// /*
// ** Lock the database file to level eLock, which must be either SHARED_LOCK,
// ** RESERVED_LOCK or EXCLUSIVE_LOCK. If the caller is successful, set the
// ** Pager.eLock variable to the new locking state.
// **
// ** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is
// ** called, do not modify it unless the new locking state is EXCLUSIVE_LOCK.
// ** See the comment above the #define of UNKNOWN_LOCK for an explanation
// ** of this.
// */
func _pagerLockDb(tls *libc.TLS, pPager uintptr, eLock int32) (r int32) {
var rc, v1 int32
_, _ = rc, v1
rc = SQLITE_OK
if int32((*TPager)(unsafe.Pointer(pPager)).FeLock) < eLock || int32((*TPager)(unsafe.Pointer(pPager)).FeLock) == libc.Int32FromInt32(EXCLUSIVE_LOCK)+libc.Int32FromInt32(1) {
if (*TPager)(unsafe.Pointer(pPager)).FnoLock != 0 {
v1 = SQLITE_OK
} else {
v1 = _sqlite3OsLock(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, eLock)
}
rc = v1
if rc == SQLITE_OK && (int32((*TPager)(unsafe.Pointer(pPager)).FeLock) != libc.Int32FromInt32(EXCLUSIVE_LOCK)+libc.Int32FromInt32(1) || eLock == int32(EXCLUSIVE_LOCK)) {
(*TPager)(unsafe.Pointer(pPager)).FeLock = uint8(eLock)
}
}
return rc
}
// C documentation
//
// /*
// ** This function determines whether or not the atomic-write or
// ** atomic-batch-write optimizations can be used with this pager. The
// ** atomic-write optimization can be used if:
// **
// ** (a) the value returned by OsDeviceCharacteristics() indicates that
// ** a database page may be written atomically, and
// ** (b) the value returned by OsSectorSize() is less than or equal
// ** to the page size.
// **
// ** If it can be used, then the value returned is the size of the journal
// ** file when it contains rollback data for exactly one page.
// **
// ** The atomic-batch-write optimization can be used if OsDeviceCharacteristics()
// ** returns a value with the SQLITE_IOCAP_BATCH_ATOMIC bit set. -1 is
// ** returned in this case.
// **
// ** If neither optimization can be used, 0 is returned.
// */
func _jrnlBufferSize(tls *libc.TLS, pPager uintptr) (r int32) {
_ = pPager
return 0
}
/*
** If SQLITE_CHECK_PAGES is defined then we do some sanity checking
** on the cache using a hash function. This is used for testing
** and debugging only.
*/
// C documentation
//
// /*
// ** When this is called the journal file for pager pPager must be open.
// ** This function attempts to read a super-journal file name from the
// ** end of the file and, if successful, copies it into memory supplied
// ** by the caller. See comments above writeSuperJournal() for the format
// ** used to store a super-journal file name at the end of a journal file.
// **
// ** zSuper must point to a buffer of at least nSuper bytes allocated by
// ** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is
// ** enough space to write the super-journal name). If the super-journal
// ** name in the journal is longer than nSuper bytes (including a
// ** nul-terminator), then this is handled as if no super-journal name
// ** were present in the journal.
// **
// ** If a super-journal file name is present at the end of the journal
// ** file, then it is copied into the buffer pointed to by zSuper. A
// ** nul-terminator byte is appended to the buffer following the
// ** super-journal file name.
// **
// ** If it is determined that no super-journal file name is present
// ** zSuper[0] is set to 0 and SQLITE_OK returned.
// **
// ** If an error occurs while reading from the journal file, an SQLite
// ** error code is returned.
// */
func _readSuperJournal(tls *libc.TLS, pJrnl uintptr, zSuper uintptr, nSuper Tu32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var rc, v1, v2, v4, v6, v8 int32
var u Tu32
var v3, v5, v7, v9 bool
var _ /* aMagic at bp+24 */ [8]uint8
var _ /* cksum at bp+16 */ Tu32
var _ /* len at bp+0 */ Tu32
var _ /* szJ at bp+8 */ Ti64
_, _, _, _, _, _, _, _, _, _, _ = rc, u, v1, v2, v3, v4, v5, v6, v7, v8, v9 /* A buffer to hold the magic header */
*(*int8)(unsafe.Pointer(zSuper)) = int8('\000')
v1 = _sqlite3OsFileSize(tls, pJrnl, bp+8)
rc = v1
if v3 = SQLITE_OK != v1 || *(*Ti64)(unsafe.Pointer(bp + 8)) < int64(16); !v3 {
v2 = _read32bits(tls, pJrnl, *(*Ti64)(unsafe.Pointer(bp + 8))-int64(16), bp)
rc = v2
}
if v5 = v3 || SQLITE_OK != v2 || *(*Tu32)(unsafe.Pointer(bp)) >= nSuper || int64(*(*Tu32)(unsafe.Pointer(bp))) > *(*Ti64)(unsafe.Pointer(bp + 8))-int64(16) || *(*Tu32)(unsafe.Pointer(bp)) == uint32(0); !v5 {
v4 = _read32bits(tls, pJrnl, *(*Ti64)(unsafe.Pointer(bp + 8))-int64(12), bp+16)
rc = v4
}
if v7 = v5 || SQLITE_OK != v4; !v7 {
v6 = _sqlite3OsRead(tls, pJrnl, bp+24, int32(8), *(*Ti64)(unsafe.Pointer(bp + 8))-int64(8))
rc = v6
}
if v9 = v7 || SQLITE_OK != v6 || libc.Xmemcmp(tls, bp+24, uintptr(unsafe.Pointer(&_aJournalMagic)), uint64(8)) != 0; !v9 {
v8 = _sqlite3OsRead(tls, pJrnl, zSuper, int32(*(*Tu32)(unsafe.Pointer(bp))), *(*Ti64)(unsafe.Pointer(bp + 8))-int64(16)-int64(*(*Tu32)(unsafe.Pointer(bp))))
rc = v8
}
if v9 || SQLITE_OK != v8 {
return rc
}
/* See if the checksum matches the super-journal name */
u = uint32(0)
for {
if !(u < *(*Tu32)(unsafe.Pointer(bp))) {
break
}
*(*Tu32)(unsafe.Pointer(bp + 16)) -= uint32(*(*int8)(unsafe.Pointer(zSuper + uintptr(u))))
goto _10
_10:
;
u++
}
if *(*Tu32)(unsafe.Pointer(bp + 16)) != 0 {
/* If the checksum doesn't add up, then one or more of the disk sectors
** containing the super-journal filename is corrupted. This means
** definitely roll back, so just return SQLITE_OK and report a (nul)
** super-journal filename.
*/
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0)
}
*(*int8)(unsafe.Pointer(zSuper + uintptr(*(*Tu32)(unsafe.Pointer(bp))))) = int8('\000')
*(*int8)(unsafe.Pointer(zSuper + uintptr(*(*Tu32)(unsafe.Pointer(bp))+uint32(1)))) = int8('\000')
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return the offset of the sector boundary at or immediately
// ** following the value in pPager->journalOff, assuming a sector
// ** size of pPager->sectorSize bytes.
// **
// ** i.e for a sector size of 512:
// **
// ** Pager.journalOff Return value
// ** ---------------------------------------
// ** 0 0
// ** 512 512
// ** 100 512
// ** 2000 2048
// **
// */
func _journalHdrOffset(tls *libc.TLS, pPager uintptr) (r Ti64) {
var c, offset Ti64
_, _ = c, offset
offset = 0
c = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
if c != 0 {
offset = ((c-int64(1))/int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) + int64(1)) * int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize)
}
return offset
}
// C documentation
//
// /*
// ** The journal file must be open when this function is called.
// **
// ** This function is a no-op if the journal file has not been written to
// ** within the current transaction (i.e. if Pager.journalOff==0).
// **
// ** If doTruncate is non-zero or the Pager.journalSizeLimit variable is
// ** set to 0, then truncate the journal file to zero bytes in size. Otherwise,
// ** zero the 28-byte header at the start of the journal file. In either case,
// ** if the pager is not in no-sync mode, sync the journal file immediately
// ** after writing or truncating it.
// **
// ** If Pager.journalSizeLimit is set to a positive, non-zero value, and
// ** following the truncation or zeroing described above the size of the
// ** journal file in bytes is larger than this value, then truncate the
// ** journal file to Pager.journalSizeLimit bytes. The journal file does
// ** not need to be synced following this operation.
// **
// ** If an IO error occurs, abandon processing and return the IO error code.
// ** Otherwise, return SQLITE_OK.
// */
func _zeroJournalHdr(tls *libc.TLS, pPager uintptr, doTruncate int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iLimit Ti64
var rc int32
var _ /* sz at bp+0 */ Ti64
_, _ = iLimit, rc
rc = SQLITE_OK /* Return code */
if (*TPager)(unsafe.Pointer(pPager)).FjournalOff != 0 {
iLimit = (*TPager)(unsafe.Pointer(pPager)).FjournalSizeLimit /* Local cache of jsl */
if doTruncate != 0 || iLimit == 0 {
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, 0)
} else {
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, uintptr(unsafe.Pointer(&_zeroHdr)), int32(28), 0)
}
if rc == SQLITE_OK && !((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0) {
rc = _sqlite3OsSync(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, int32(SQLITE_SYNC_DATAONLY)|int32((*TPager)(unsafe.Pointer(pPager)).FsyncFlags))
}
/* At this point the transaction is committed but the write lock
** is still held on the file. If there is a size limit configured for
** the persistent journal and the journal file currently consumes more
** space than that limit allows for, truncate it now. There is no need
** to sync the file following this operation.
*/
if rc == SQLITE_OK && iLimit > 0 {
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp)
if rc == SQLITE_OK && *(*Ti64)(unsafe.Pointer(bp)) > iLimit {
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iLimit)
}
}
}
return rc
}
var _zeroHdr = [28]int8{}
// C documentation
//
// /*
// ** The journal file must be open when this routine is called. A journal
// ** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the
// ** current location.
// **
// ** The format for the journal header is as follows:
// ** - 8 bytes: Magic identifying journal format.
// ** - 4 bytes: Number of records in journal, or -1 no-sync mode is on.
// ** - 4 bytes: Random number used for page hash.
// ** - 4 bytes: Initial database page count.
// ** - 4 bytes: Sector size used by the process that wrote this journal.
// ** - 4 bytes: Database page size.
// **
// ** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space.
// */
func _writeJournalHdr(tls *libc.TLS, pPager uintptr) (r int32) {
var ii, rc int32
var nHeader, nWrite Tu32
var zHeader uintptr
var v2 Ti64
_, _, _, _, _, _ = ii, nHeader, nWrite, rc, zHeader, v2
rc = SQLITE_OK /* Return code */
zHeader = (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace /* Temporary space used to build header */
nHeader = uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize) /* Loop counter */
/* Journal file must be open. */
if nHeader > (*TPager)(unsafe.Pointer(pPager)).FsectorSize {
nHeader = (*TPager)(unsafe.Pointer(pPager)).FsectorSize
}
/* If there are active savepoints and any of them were created
** since the most recent journal header was written, update the
** PagerSavepoint.iHdrOffset fields now.
*/
ii = 0
for {
if !(ii < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
if (*(*TPagerSavepoint)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(ii)*56))).FiHdrOffset == 0 {
(*(*TPagerSavepoint)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(ii)*56))).FiHdrOffset = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
}
goto _1
_1:
;
ii++
}
v2 = _journalHdrOffset(tls, pPager)
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = v2
(*TPager)(unsafe.Pointer(pPager)).FjournalHdr = v2
/*
** Write the nRec Field - the number of page records that follow this
** journal header. Normally, zero is written to this value at this time.
** After the records are added to the journal (and the journal synced,
** if in full-sync mode), the zero is overwritten with the true number
** of records (see syncJournal()).
**
** A faster alternative is to write 0xFFFFFFFF to the nRec field. When
** reading the journal this value tells SQLite to assume that the
** rest of the journal file contains valid page records. This assumption
** is dangerous, as if a failure occurred whilst writing to the journal
** file it may contain some garbage data. There are two scenarios
** where this risk can be ignored:
**
** * When the pager is in no-sync mode. Corruption can follow a
** power failure in this case anyway.
**
** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees
** that garbage data is never appended to the journal file.
*/
if (*TPager)(unsafe.Pointer(pPager)).FnoSync != 0 || int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_MEMORY) || _sqlite3OsDeviceCharacteristics(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd)&int32(SQLITE_IOCAP_SAFE_APPEND) != 0 {
libc.Xmemcpy(tls, zHeader, uintptr(unsafe.Pointer(&_aJournalMagic)), uint64(8))
_sqlite3Put4byte(tls, zHeader+uintptr(8), uint32(0xffffffff))
} else {
libc.Xmemset(tls, zHeader, 0, libc.Uint64FromInt64(8)+libc.Uint64FromInt32(4))
}
/* The random check-hash initializer */
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) != int32(PAGER_JOURNALMODE_MEMORY) {
Xsqlite3_randomness(tls, int32(4), pPager+56)
}
_sqlite3Put4byte(tls, zHeader+uintptr(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(4)), (*TPager)(unsafe.Pointer(pPager)).FcksumInit)
/* The initial database size */
_sqlite3Put4byte(tls, zHeader+uintptr(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(8)), (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize)
/* The assumed sector size for this process */
_sqlite3Put4byte(tls, zHeader+uintptr(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(12)), (*TPager)(unsafe.Pointer(pPager)).FsectorSize)
/* The page size */
_sqlite3Put4byte(tls, zHeader+uintptr(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(16)), uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize))
/* Initializing the tail of the buffer is not necessary. Everything
** works find if the following memset() is omitted. But initializing
** the memory prevents valgrind from complaining, so we are willing to
** take the performance hit.
*/
libc.Xmemset(tls, zHeader+uintptr(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(20)), 0, uint64(nHeader)-(libc.Uint64FromInt64(8)+libc.Uint64FromInt32(20)))
/* In theory, it is only necessary to write the 28 bytes that the
** journal header consumes to the journal file here. Then increment the
** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next
** record is written to the following sector (leaving a gap in the file
** that will be implicitly filled in by the OS).
**
** However it has been discovered that on some systems this pattern can
** be significantly slower than contiguously writing data to the file,
** even if that means explicitly writing data to the block of
** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what
** is done.
**
** The loop is required here in case the sector-size is larger than the
** database page size. Since the zHeader buffer is only Pager.pageSize
** bytes in size, more than one call to sqlite3OsWrite() may be required
** to populate the entire journal header sector.
*/
nWrite = uint32(0)
for {
if !(rc == SQLITE_OK && nWrite < (*TPager)(unsafe.Pointer(pPager)).FsectorSize) {
break
}
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, zHeader, int32(nHeader), (*TPager)(unsafe.Pointer(pPager)).FjournalOff)
*(*Ti64)(unsafe.Pointer(pPager + 96)) += int64(nHeader)
goto _3
_3:
;
nWrite += nHeader
}
return rc
}
// C documentation
//
// /*
// ** The journal file must be open when this is called. A journal header file
// ** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal
// ** file. The current location in the journal file is given by
// ** pPager->journalOff. See comments above function writeJournalHdr() for
// ** a description of the journal header format.
// **
// ** If the header is read successfully, *pNRec is set to the number of
// ** page records following this header and *pDbSize is set to the size of the
// ** database before the transaction began, in pages. Also, pPager->cksumInit
// ** is set to the value read from the journal header. SQLITE_OK is returned
// ** in this case.
// **
// ** If the journal header file appears to be corrupted, SQLITE_DONE is
// ** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes
// ** cannot be read from the journal file an error code is returned.
// */
func _readJournalHdr(tls *libc.TLS, pPager uintptr, isHot int32, journalSize Ti64, pNRec uintptr, pDbSize uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iHdrOff Ti64
var rc, v1, v2, v4, v6, v7 int32
var v3, v5, v8 bool
var _ /* aMagic at bp+0 */ [8]uint8
var _ /* iPageSize at bp+8 */ Tu32
var _ /* iSectorSize at bp+12 */ Tu32
_, _, _, _, _, _, _, _, _, _ = iHdrOff, rc, v1, v2, v3, v4, v5, v6, v7, v8 /* Offset of journal header being read */
/* Journal file must be open. */
/* Advance Pager.journalOff to the start of the next sector. If the
** journal file is too small for there to be a header stored at this
** point, return SQLITE_DONE.
*/
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = _journalHdrOffset(tls, pPager)
if (*TPager)(unsafe.Pointer(pPager)).FjournalOff+int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) > journalSize {
return int32(SQLITE_DONE)
}
iHdrOff = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
/* Read in the first 8 bytes of the journal header. If they do not match
** the magic string found at the start of each journal header, return
** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise,
** proceed.
*/
if isHot != 0 || iHdrOff != (*TPager)(unsafe.Pointer(pPager)).FjournalHdr {
rc = _sqlite3OsRead(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp, int32(8), iHdrOff)
if rc != 0 {
return rc
}
if libc.Xmemcmp(tls, bp, uintptr(unsafe.Pointer(&_aJournalMagic)), uint64(8)) != 0 {
return int32(SQLITE_DONE)
}
}
/* Read the first three 32-bit fields of the journal header: The nRec
** field, the checksum-initializer and the database size at the start
** of the transaction. Return an error code if anything goes wrong.
*/
v1 = _read32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(8), pNRec)
rc = v1
if v3 = SQLITE_OK != v1; !v3 {
v2 = _read32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(12), pPager+56)
rc = v2
}
if v5 = v3 || SQLITE_OK != v2; !v5 {
v4 = _read32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(16), pDbSize)
rc = v4
}
if v5 || SQLITE_OK != v4 {
return rc
}
if (*TPager)(unsafe.Pointer(pPager)).FjournalOff == 0 { /* Sector-size field of journal header */
/* Read the page-size and sector-size journal header fields. */
v6 = _read32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(20), bp+12)
rc = v6
if v8 = SQLITE_OK != v6; !v8 {
v7 = _read32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(24), bp+8)
rc = v7
}
if v8 || SQLITE_OK != v7 {
return rc
}
/* Versions of SQLite prior to 3.5.8 set the page-size field of the
** journal header to zero. In this case, assume that the Pager.pageSize
** variable is already set to the correct page size.
*/
if *(*Tu32)(unsafe.Pointer(bp + 8)) == uint32(0) {
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize)
}
/* Check that the values read from the page-size and sector-size fields
** are within range. To be 'in range', both values need to be a power
** of two greater than or equal to 512 or 32, and not greater than their
** respective compile time maximum limits.
*/
if *(*Tu32)(unsafe.Pointer(bp + 8)) < uint32(512) || *(*Tu32)(unsafe.Pointer(bp + 12)) < uint32(32) || *(*Tu32)(unsafe.Pointer(bp + 8)) > uint32(SQLITE_MAX_PAGE_SIZE) || *(*Tu32)(unsafe.Pointer(bp + 12)) > uint32(MAX_SECTOR_SIZE) || (*(*Tu32)(unsafe.Pointer(bp + 8))-uint32(1))&*(*Tu32)(unsafe.Pointer(bp + 8)) != uint32(0) || (*(*Tu32)(unsafe.Pointer(bp + 12))-uint32(1))&*(*Tu32)(unsafe.Pointer(bp + 12)) != uint32(0) {
/* If the either the page-size or sector-size in the journal-header is
** invalid, then the process that wrote the journal-header must have
** crashed before the header was synced. In this case stop reading
** the journal file here.
*/
return int32(SQLITE_DONE)
}
/* Update the page-size to match the value read from the journal.
** Use a testcase() macro to make sure that malloc failure within
** PagerSetPagesize() is tested.
*/
rc = _sqlite3PagerSetPagesize(tls, pPager, bp+8, -int32(1))
/* Update the assumed sector-size to match the value used by
** the process that created this journal. If this journal was
** created by a process other than this one, then this routine
** is being called from within pager_playback(). The local value
** of Pager.sectorSize is restored at the end of that routine.
*/
(*TPager)(unsafe.Pointer(pPager)).FsectorSize = *(*Tu32)(unsafe.Pointer(bp + 12))
}
*(*Ti64)(unsafe.Pointer(pPager + 96)) += int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize)
return rc
}
// C documentation
//
// /*
// ** Write the supplied super-journal name into the journal file for pager
// ** pPager at the current location. The super-journal name must be the last
// ** thing written to a journal file. If the pager is in full-sync mode, the
// ** journal file descriptor is advanced to the next sector boundary before
// ** anything is written. The format is:
// **
// ** + 4 bytes: PAGER_SJ_PGNO.
// ** + N bytes: super-journal filename in utf-8.
// ** + 4 bytes: N (length of super-journal name in bytes, no nul-terminator).
// ** + 4 bytes: super-journal name checksum.
// ** + 8 bytes: aJournalMagic[].
// **
// ** The super-journal page checksum is the sum of the bytes in the super-journal
// ** name, where each byte is interpreted as a signed 8-bit integer.
// **
// ** If zSuper is a NULL pointer (occurs for a single database transaction),
// ** this call is a no-op.
// */
func _writeSuperJournal(tls *libc.TLS, pPager uintptr, zSuper uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var cksum Tu32
var iHdrOff Ti64
var nSuper, rc, v11, v2, v3, v5, v7, v9 int32
var v10, v4, v6, v8 bool
var _ /* jrnlSize at bp+0 */ Ti64
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = cksum, iHdrOff, nSuper, rc, v10, v11, v2, v3, v4, v5, v6, v7, v8, v9 /* Size of journal file on disk */
cksum = uint32(0) /* Checksum of string zSuper */
if !(zSuper != 0) || int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_MEMORY) || !((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != libc.UintptrFromInt32(0)) {
return SQLITE_OK
}
(*TPager)(unsafe.Pointer(pPager)).FsetSuper = uint8(1)
/* Calculate the length in bytes and the checksum of zSuper */
nSuper = 0
for {
if !(*(*int8)(unsafe.Pointer(zSuper + uintptr(nSuper))) != 0) {
break
}
cksum += uint32(*(*int8)(unsafe.Pointer(zSuper + uintptr(nSuper))))
goto _1
_1:
;
nSuper++
}
/* If in full-sync mode, advance to the next disk sector before writing
** the super-journal name. This is in case the previous page written to
** the journal has already been synced.
*/
if (*TPager)(unsafe.Pointer(pPager)).FfullSync != 0 {
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = _journalHdrOffset(tls, pPager)
}
iHdrOff = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
/* Write the super-journal data to the end of the journal file. If
** an error occurs, return the error code to the caller.
*/
v2 = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff, (*TPager)(unsafe.Pointer(pPager)).FlckPgno)
rc = v2
if v4 = 0 != v2; !v4 {
v3 = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, zSuper, nSuper, iHdrOff+int64(4))
rc = v3
}
if v6 = v4 || 0 != v3; !v6 {
v5 = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(4)+int64(nSuper), uint32(nSuper))
rc = v5
}
if v8 = v6 || 0 != v5; !v8 {
v7 = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iHdrOff+int64(4)+int64(nSuper)+int64(4), cksum)
rc = v7
}
if v10 = v8 || 0 != v7; !v10 {
v9 = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, uintptr(unsafe.Pointer(&_aJournalMagic)), int32(8), iHdrOff+int64(4)+int64(nSuper)+int64(8))
rc = v9
}
if v10 || 0 != v9 {
return rc
}
*(*Ti64)(unsafe.Pointer(pPager + 96)) += int64(nSuper + libc.Int32FromInt32(20))
/* If the pager is in persistent-journal mode, then the physical
** journal-file may extend past the end of the super-journal name
** and 8 bytes of magic data just written to the file. This is
** dangerous because the code to rollback a hot-journal file
** will not be able to find the super-journal name to determine
** whether or not the journal is hot.
**
** Easiest thing to do in this scenario is to truncate the journal
** file to the required size.
*/
v11 = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp)
rc = v11
if SQLITE_OK == v11 && *(*Ti64)(unsafe.Pointer(bp)) > (*TPager)(unsafe.Pointer(pPager)).FjournalOff {
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, (*TPager)(unsafe.Pointer(pPager)).FjournalOff)
}
return rc
}
// C documentation
//
// /*
// ** Discard the entire contents of the in-memory page-cache.
// */
func _pager_reset(tls *libc.TLS, pPager uintptr) {
(*TPager)(unsafe.Pointer(pPager)).FiDataVersion++
_sqlite3BackupRestart(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup)
_sqlite3PcacheClear(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
}
// C documentation
//
// /*
// ** Return the pPager->iDataVersion value
// */
func _sqlite3PagerDataVersion(tls *libc.TLS, pPager uintptr) (r Tu32) {
return (*TPager)(unsafe.Pointer(pPager)).FiDataVersion
}
// C documentation
//
// /*
// ** Free all structures in the Pager.aSavepoint[] array and set both
// ** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal
// ** if it is open and the pager is not in exclusive mode.
// */
func _releaseAllSavepoints(tls *libc.TLS, pPager uintptr) {
var ii int32
_ = ii /* Iterator for looping through Pager.aSavepoint */
ii = 0
for {
if !(ii < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
_sqlite3BitvecDestroy(tls, (*(*TPagerSavepoint)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(ii)*56))).FpInSavepoint)
goto _1
_1:
;
ii++
}
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) || _sqlite3JournalIsInMemory(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd) != 0 {
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd)
}
Xsqlite3_free(tls, (*TPager)(unsafe.Pointer(pPager)).FaSavepoint)
(*TPager)(unsafe.Pointer(pPager)).FaSavepoint = uintptr(0)
(*TPager)(unsafe.Pointer(pPager)).FnSavepoint = 0
(*TPager)(unsafe.Pointer(pPager)).FnSubRec = uint32(0)
}
// C documentation
//
// /*
// ** Set the bit number pgno in the PagerSavepoint.pInSavepoint
// ** bitvecs of all open savepoints. Return SQLITE_OK if successful
// ** or SQLITE_NOMEM if a malloc failure occurs.
// */
func _addToSavepointBitvecs(tls *libc.TLS, pPager uintptr, pgno TPgno) (r int32) {
var ii, rc int32
var p uintptr
_, _, _ = ii, p, rc /* Loop counter */
rc = SQLITE_OK /* Result code */
ii = 0
for {
if !(ii < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
p = (*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(ii)*56
if pgno <= (*TPagerSavepoint)(unsafe.Pointer(p)).FnOrig {
rc |= _sqlite3BitvecSet(tls, (*TPagerSavepoint)(unsafe.Pointer(p)).FpInSavepoint, pgno)
}
goto _1
_1:
;
ii++
}
return rc
}
// C documentation
//
// /*
// ** This function is a no-op if the pager is in exclusive mode and not
// ** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN
// ** state.
// **
// ** If the pager is not in exclusive-access mode, the database file is
// ** completely unlocked. If the file is unlocked and the file-system does
// ** not exhibit the UNDELETABLE_WHEN_OPEN property, the journal file is
// ** closed (if it is open).
// **
// ** If the pager is in ERROR state when this function is called, the
// ** contents of the pager cache are discarded before switching back to
// ** the OPEN state. Regardless of whether the pager is in exclusive-mode
// ** or not, any journal file left in the file-system will be treated
// ** as a hot-journal and rolled back the next time a read-transaction
// ** is opened (by this or by any other connection).
// */
func _pager_unlock(tls *libc.TLS, pPager uintptr) {
var iDc, rc, v1, v2 int32
_, _, _, _ = iDc, rc, v1, v2
_sqlite3BitvecDestroy(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal)
(*TPager)(unsafe.Pointer(pPager)).FpInJournal = uintptr(0)
_releaseAllSavepoints(tls, pPager)
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
_sqlite3WalEndReadTransaction(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_OPEN)
} else {
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) {
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) {
v1 = _sqlite3OsDeviceCharacteristics(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd)
} else {
v1 = 0
} /* Error code returned by pagerUnlockDb() */
iDc = v1
/* If the operating system support deletion of open files, then
** close the journal file when dropping the database lock. Otherwise
** another connection with journal_mode=delete might delete the file
** out from under us.
*/
if 0 == iDc&int32(SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN) || int32(1) != int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode)&int32(5) {
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
}
/* If the pager is in the ERROR state and the call to unlock the database
** file fails, set the current lock to UNKNOWN_LOCK. See the comment
** above the #define for UNKNOWN_LOCK for an explanation of why this
** is necessary.
*/
rc = _pagerUnlockDb(tls, pPager, NO_LOCK)
if rc != SQLITE_OK && int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_ERROR) {
(*TPager)(unsafe.Pointer(pPager)).FeLock = uint8(libc.Int32FromInt32(EXCLUSIVE_LOCK) + libc.Int32FromInt32(1))
}
/* The pager state may be changed from PAGER_ERROR to PAGER_OPEN here
** without clearing the error code. This is intentional - the error
** code is cleared and the cache reset in the block below.
*/
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_OPEN)
}
}
/* If Pager.errCode is set, the contents of the pager cache cannot be
** trusted. Now that there are no outstanding references to the pager,
** it can safely move back to PAGER_OPEN state. This happens in both
** normal and exclusive-locking mode.
*/
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
if int32((*TPager)(unsafe.Pointer(pPager)).FtempFile) == 0 {
_pager_reset(tls, pPager)
(*TPager)(unsafe.Pointer(pPager)).FchangeCountDone = uint8(0)
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_OPEN)
} else {
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) {
v2 = PAGER_OPEN
} else {
v2 = int32(PAGER_READER)
}
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(v2)
}
if (*TPager)(unsafe.Pointer(pPager)).FbUseFetch != 0 {
_sqlite3OsUnfetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, 0, uintptr(0))
}
(*TPager)(unsafe.Pointer(pPager)).FerrCode = SQLITE_OK
_setGetterMethod(tls, pPager)
}
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
(*TPager)(unsafe.Pointer(pPager)).FjournalHdr = 0
(*TPager)(unsafe.Pointer(pPager)).FsetSuper = uint8(0)
}
// C documentation
//
// /*
// ** This function is called whenever an IOERR or FULL error that requires
// ** the pager to transition into the ERROR state may have occurred.
// ** The first argument is a pointer to the pager structure, the second
// ** the error-code about to be returned by a pager API function. The
// ** value returned is a copy of the second argument to this function.
// **
// ** If the second argument is SQLITE_FULL, SQLITE_IOERR or one of the
// ** IOERR sub-codes, the pager enters the ERROR state and the error code
// ** is stored in Pager.errCode. While the pager remains in the ERROR state,
// ** all major API calls on the Pager will immediately return Pager.errCode.
// **
// ** The ERROR state indicates that the contents of the pager-cache
// ** cannot be trusted. This state can be cleared by completely discarding
// ** the contents of the pager-cache. If a transaction was active when
// ** the persistent error occurred, then the rollback journal may need
// ** to be replayed to restore the contents of the database file (as if
// ** it were a hot-journal).
// */
func _pager_error(tls *libc.TLS, pPager uintptr, rc int32) (r int32) {
var rc2 int32
_ = rc2
rc2 = rc & int32(0xff)
if rc2 == int32(SQLITE_FULL) || rc2 == int32(SQLITE_IOERR) {
(*TPager)(unsafe.Pointer(pPager)).FerrCode = rc
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_ERROR)
_setGetterMethod(tls, pPager)
}
return rc
}
// C documentation
//
// /*
// ** The write transaction open on pPager is being committed (bCommit==1)
// ** or rolled back (bCommit==0).
// **
// ** Return TRUE if and only if all dirty pages should be flushed to disk.
// **
// ** Rules:
// **
// ** * For non-TEMP databases, always sync to disk. This is necessary
// ** for transactions to be durable.
// **
// ** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing
// ** file has been created already (via a spill on pagerStress()) and
// ** when the number of dirty pages in memory exceeds 25% of the total
// ** cache size.
// */
func _pagerFlushOnCommit(tls *libc.TLS, pPager uintptr, bCommit int32) (r int32) {
if int32((*TPager)(unsafe.Pointer(pPager)).FtempFile) == 0 {
return int32(1)
}
if !(bCommit != 0) {
return 0
}
if !((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != libc.UintptrFromInt32(0)) {
return 0
}
return libc.BoolInt32(_sqlite3PCachePercentDirty(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache) >= int32(25))
}
// C documentation
//
// /*
// ** This routine ends a transaction. A transaction is usually ended by
// ** either a COMMIT or a ROLLBACK operation. This routine may be called
// ** after rollback of a hot-journal, or if an error occurs while opening
// ** the journal file or writing the very first journal-header of a
// ** database transaction.
// **
// ** This routine is never called in PAGER_ERROR state. If it is called
// ** in PAGER_NONE or PAGER_SHARED state and the lock held is less
// ** exclusive than a RESERVED lock, it is a no-op.
// **
// ** Otherwise, any active savepoints are released.
// **
// ** If the journal file is open, then it is "finalized". Once a journal
// ** file has been finalized it is not possible to use it to roll back a
// ** transaction. Nor will it be considered to be a hot-journal by this
// ** or any other database connection. Exactly how a journal is finalized
// ** depends on whether or not the pager is running in exclusive mode and
// ** the current journal-mode (Pager.journalMode value), as follows:
// **
// ** journalMode==MEMORY
// ** Journal file descriptor is simply closed. This destroys an
// ** in-memory journal.
// **
// ** journalMode==TRUNCATE
// ** Journal file is truncated to zero bytes in size.
// **
// ** journalMode==PERSIST
// ** The first 28 bytes of the journal file are zeroed. This invalidates
// ** the first journal header in the file, and hence the entire journal
// ** file. An invalid journal file cannot be rolled back.
// **
// ** journalMode==DELETE
// ** The journal file is closed and deleted using sqlite3OsDelete().
// **
// ** If the pager is running in exclusive mode, this method of finalizing
// ** the journal file is never used. Instead, if the journalMode is
// ** DELETE and the pager is in exclusive mode, the method described under
// ** journalMode==PERSIST is used instead.
// **
// ** After the journal is finalized, the pager moves to PAGER_READER state.
// ** If running in non-exclusive rollback mode, the lock on the file is
// ** downgraded to a SHARED_LOCK.
// **
// ** SQLITE_OK is returned if no error occurs. If an error occurs during
// ** any of the IO operations to finalize the journal file or unlock the
// ** database then the IO error code is returned to the user. If the
// ** operation to finalize the journal file fails, then the code still
// ** tries to unlock the database file if not in exclusive mode. If the
// ** unlock operation fails as well, then the first error code related
// ** to the first error encountered (the journal finalization one) is
// ** returned.
// */
func _pager_end_transaction(tls *libc.TLS, pPager uintptr, hasSuper int32, bCommit int32) (r int32) {
var bDelete, rc, rc2, v1 int32
_, _, _, _ = bDelete, rc, rc2, v1
rc = SQLITE_OK /* Error code from journal finalization operation */
rc2 = SQLITE_OK /* Error code from db file unlock operation */
/* Do nothing if the pager does not have an open write transaction
** or at least a RESERVED lock. This function may be called when there
** is no write-transaction active but a RESERVED or greater lock is
** held under two circumstances:
**
** 1. After a successful hot-journal rollback, it is called with
** eState==PAGER_NONE and eLock==EXCLUSIVE_LOCK.
**
** 2. If a connection with locking_mode=exclusive holding an EXCLUSIVE
** lock switches back to locking_mode=normal and then executes a
** read-transaction, this function is called with eState==PAGER_READER
** and eLock==EXCLUSIVE_LOCK when the read-transaction is closed.
*/
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) < int32(PAGER_WRITER_LOCKED) && int32((*TPager)(unsafe.Pointer(pPager)).FeLock) < int32(RESERVED_LOCK) {
return SQLITE_OK
}
_releaseAllSavepoints(tls, pPager)
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) {
/* Finalize the journal file. */
if _sqlite3JournalIsInMemory(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd) != 0 {
/* assert( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); */
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
} else {
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_TRUNCATE) {
if (*TPager)(unsafe.Pointer(pPager)).FjournalOff == 0 {
rc = SQLITE_OK
} else {
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, 0)
if rc == SQLITE_OK && (*TPager)(unsafe.Pointer(pPager)).FfullSync != 0 {
/* Make sure the new file size is written into the inode right away.
** Otherwise the journal might resurrect following a power loss and
** cause the last transaction to roll back. See
** https://bugzilla.mozilla.org/show_bug.cgi?id=1072773
*/
rc = _sqlite3OsSync(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, int32((*TPager)(unsafe.Pointer(pPager)).FsyncFlags))
}
}
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
} else {
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_PERSIST) || (*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0 && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) != int32(PAGER_JOURNALMODE_WAL) {
rc = _zeroJournalHdr(tls, pPager, libc.BoolInt32(hasSuper != 0 || (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0))
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
} else {
/* This branch may be executed with Pager.journalMode==MEMORY if
** a hot-journal was just rolled back. In this case the journal
** file should be closed and deleted. If this connection writes to
** the database file, it will do so using an in-memory journal.
*/
bDelete = libc.BoolInt32(!((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0))
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
if bDelete != 0 {
rc = _sqlite3OsDelete(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, int32((*TPager)(unsafe.Pointer(pPager)).FextraSync))
}
}
}
}
}
_sqlite3BitvecDestroy(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal)
(*TPager)(unsafe.Pointer(pPager)).FpInJournal = uintptr(0)
(*TPager)(unsafe.Pointer(pPager)).FnRec = 0
if rc == SQLITE_OK {
if (*TPager)(unsafe.Pointer(pPager)).FmemDb != 0 || _pagerFlushOnCommit(tls, pPager, bCommit) != 0 {
_sqlite3PcacheCleanAll(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
} else {
_sqlite3PcacheClearWritable(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
}
_sqlite3PcacheTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, (*TPager)(unsafe.Pointer(pPager)).FdbSize)
}
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
/* Drop the WAL write-lock, if any. Also, if the connection was in
** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE
** lock held on the database file.
*/
rc2 = _sqlite3WalEndWriteTransaction(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
} else {
if rc == SQLITE_OK && bCommit != 0 && (*TPager)(unsafe.Pointer(pPager)).FdbFileSize > (*TPager)(unsafe.Pointer(pPager)).FdbSize {
/* This branch is taken when committing a transaction in rollback-journal
** mode if the database file on disk is larger than the database image.
** At this point the journal has been finalized and the transaction
** successfully committed, but the EXCLUSIVE lock is still held on the
** file. So it is safe to truncate the database file to its minimum
** required size. */
rc = _pager_truncate(tls, pPager, (*TPager)(unsafe.Pointer(pPager)).FdbSize)
}
}
if rc == SQLITE_OK && bCommit != 0 {
rc = _sqlite3OsFileControl(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_COMMIT_PHASETWO), uintptr(0))
if rc == int32(SQLITE_NOTFOUND) {
rc = SQLITE_OK
}
}
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) && (!((*TPager)(unsafe.Pointer(pPager)).FpWal != libc.UintptrFromInt32(0)) || _sqlite3WalExclusiveMode(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, 0) != 0) {
rc2 = _pagerUnlockDb(tls, pPager, int32(SHARED_LOCK))
}
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_READER)
(*TPager)(unsafe.Pointer(pPager)).FsetSuper = uint8(0)
if rc == SQLITE_OK {
v1 = rc2
} else {
v1 = rc
}
return v1
}
// C documentation
//
// /*
// ** Execute a rollback if a transaction is active and unlock the
// ** database file.
// **
// ** If the pager has already entered the ERROR state, do not attempt
// ** the rollback at this time. Instead, pager_unlock() is called. The
// ** call to pager_unlock() will discard all in-memory pages, unlock
// ** the database file and move the pager back to OPEN state. If this
// ** means that there is a hot-journal left in the file-system, the next
// ** connection to obtain a shared lock on the pager (which may be this one)
// ** will roll it back.
// **
// ** If the pager has not already entered the ERROR state, but an IO or
// ** malloc error occurs during a rollback, then this will itself cause
// ** the pager to enter the ERROR state. Which will be cleared by the
// ** call to pager_unlock(), as described above.
// */
func _pagerUnlockAndRollback(tls *libc.TLS, pPager uintptr) {
var eLock Tu8
var errCode int32
_, _ = eLock, errCode
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) != int32(PAGER_ERROR) && int32((*TPager)(unsafe.Pointer(pPager)).FeState) != PAGER_OPEN {
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) >= int32(PAGER_WRITER_LOCKED) {
_sqlite3BeginBenignMalloc(tls)
_sqlite3PagerRollback(tls, pPager)
_sqlite3EndBenignMalloc(tls)
} else {
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) {
_pager_end_transaction(tls, pPager, 0, 0)
}
}
} else {
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_ERROR) && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_MEMORY) && (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) {
/* Special case for a ROLLBACK due to I/O error with an in-memory
** journal: We have to rollback immediately, before the journal is
** closed, because once it is closed, all content is forgotten. */
errCode = (*TPager)(unsafe.Pointer(pPager)).FerrCode
eLock = (*TPager)(unsafe.Pointer(pPager)).FeLock
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_OPEN)
(*TPager)(unsafe.Pointer(pPager)).FerrCode = SQLITE_OK
(*TPager)(unsafe.Pointer(pPager)).FeLock = uint8(EXCLUSIVE_LOCK)
_pager_playback(tls, pPager, int32(1))
(*TPager)(unsafe.Pointer(pPager)).FerrCode = errCode
(*TPager)(unsafe.Pointer(pPager)).FeLock = eLock
}
}
_pager_unlock(tls, pPager)
}
// C documentation
//
// /*
// ** Parameter aData must point to a buffer of pPager->pageSize bytes
// ** of data. Compute and return a checksum based on the contents of the
// ** page of data and the current value of pPager->cksumInit.
// **
// ** This is not a real checksum. It is really just the sum of the
// ** random initial value (pPager->cksumInit) and every 200th byte
// ** of the page data, starting with byte offset (pPager->pageSize%200).
// ** Each byte is interpreted as an 8-bit unsigned integer.
// **
// ** Changing the formula used to compute this checksum results in an
// ** incompatible journal file format.
// **
// ** If journal corruption occurs due to a power failure, the most likely
// ** scenario is that one end or the other of the record will be changed.
// ** It is much less likely that the two ends of the journal record will be
// ** correct and the middle be corrupt. Thus, this "checksum" scheme,
// ** though fast and simple, catches the mostly likely kind of corruption.
// */
func _pager_cksum(tls *libc.TLS, pPager uintptr, aData uintptr) (r Tu32) {
var cksum Tu32
var i int32
_, _ = cksum, i
cksum = (*TPager)(unsafe.Pointer(pPager)).FcksumInit /* Checksum value to return */
i = int32((*TPager)(unsafe.Pointer(pPager)).FpageSize - int64(200)) /* Loop counter */
for i > 0 {
cksum += uint32(*(*Tu8)(unsafe.Pointer(aData + uintptr(i))))
i -= int32(200)
}
return cksum
}
// C documentation
//
// /*
// ** Read a single page from either the journal file (if isMainJrnl==1) or
// ** from the sub-journal (if isMainJrnl==0) and playback that page.
// ** The page begins at offset *pOffset into the file. The *pOffset
// ** value is increased to the start of the next page in the journal.
// **
// ** The main rollback journal uses checksums - the statement journal does
// ** not.
// **
// ** If the page number of the page record read from the (sub-)journal file
// ** is greater than the current value of Pager.dbSize, then playback is
// ** skipped and SQLITE_OK is returned.
// **
// ** If pDone is not NULL, then it is a record of pages that have already
// ** been played back. If the page at *pOffset has already been played back
// ** (if the corresponding pDone bit is set) then skip the playback.
// ** Make sure the pDone bit corresponding to the *pOffset page is set
// ** prior to returning.
// **
// ** If the page record is successfully read from the (sub-)journal file
// ** and played back, then SQLITE_OK is returned. If an IO error occurs
// ** while reading the record from the (sub-)journal file or while writing
// ** to the database file, then the IO error code is returned. If data
// ** is successfully read from the (sub-)journal file but appears to be
// ** corrupted, SQLITE_DONE is returned. Data is considered corrupted in
// ** two circumstances:
// **
// ** * If the record page-number is illegal (0 or PAGER_SJ_PGNO), or
// ** * If the record is being rolled back from the main journal file
// ** and the checksum field does not match the record content.
// **
// ** Neither of these two scenarios are possible during a savepoint rollback.
// **
// ** If this is a savepoint rollback, then memory may have to be dynamically
// ** allocated by this function. If this is the case and an allocation fails,
// ** SQLITE_NOMEM is returned.
// */
func _pager_playback_one_page(tls *libc.TLS, pPager uintptr, pOffset uintptr, pDone uintptr, isMainJrnl int32, isSavepnt int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aData, jfd, pData, v1, p4, p5 uintptr
var isSynced, rc, v2 int32
var ofst Ti64
var v3 bool
var _ /* cksum at bp+12 */ Tu32
var _ /* pPg at bp+0 */ uintptr
var _ /* pgno at bp+8 */ TPgno
_, _, _, _, _, _, _, _, _, _, _ = aData, isSynced, jfd, ofst, pData, rc, v1, v2, v3, p4, p5 /* True if journal page is synced */
/* isMainJrnl is 0 or 1 */
/* isSavepnt is 0 or 1 */
/* pDone always used on sub-journals */
/* pDone never used on non-savepoint */
aData = (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace
/* Temp storage must have already been allocated */
/* Either the state is greater than PAGER_WRITER_CACHEMOD (a transaction
** or savepoint rollback done at the request of the caller) or this is
** a hot-journal rollback. If it is a hot-journal rollback, the pager
** is in state OPEN and holds an EXCLUSIVE lock. Hot-journal rollback
** only reads from the main journal, not the sub-journal.
*/
/* Read the page number and page data from the journal or sub-journal
** file. Return an error code to the caller if an IO error occurs.
*/
if isMainJrnl != 0 {
v1 = (*TPager)(unsafe.Pointer(pPager)).Fjfd
} else {
v1 = (*TPager)(unsafe.Pointer(pPager)).Fsjfd
}
jfd = v1
rc = _read32bits(tls, jfd, *(*Ti64)(unsafe.Pointer(pOffset)), bp+8)
if rc != SQLITE_OK {
return rc
}
rc = _sqlite3OsRead(tls, jfd, aData, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), *(*Ti64)(unsafe.Pointer(pOffset))+int64(4))
if rc != SQLITE_OK {
return rc
}
*(*Ti64)(unsafe.Pointer(pOffset)) += (*TPager)(unsafe.Pointer(pPager)).FpageSize + int64(4) + int64(isMainJrnl*int32(4))
/* Sanity checking on the page. This is more important that I originally
** thought. If a power failure occurs while the journal is being written,
** it could cause invalid data to be written into the journal. We need to
** detect this invalid data (with high probability) and ignore it.
*/
if *(*TPgno)(unsafe.Pointer(bp + 8)) == uint32(0) || *(*TPgno)(unsafe.Pointer(bp + 8)) == (*TPager)(unsafe.Pointer(pPager)).FlckPgno {
return int32(SQLITE_DONE)
}
if *(*TPgno)(unsafe.Pointer(bp + 8)) > (*TPager)(unsafe.Pointer(pPager)).FdbSize || _sqlite3BitvecTest(tls, pDone, *(*TPgno)(unsafe.Pointer(bp + 8))) != 0 {
return SQLITE_OK
}
if isMainJrnl != 0 {
rc = _read32bits(tls, jfd, *(*Ti64)(unsafe.Pointer(pOffset))-int64(4), bp+12)
if rc != 0 {
return rc
}
if !(isSavepnt != 0) && _pager_cksum(tls, pPager, aData) != *(*Tu32)(unsafe.Pointer(bp + 12)) {
return int32(SQLITE_DONE)
}
}
/* If this page has already been played back before during the current
** rollback, then don't bother to play it back again.
*/
if v3 = pDone != 0; v3 {
v2 = _sqlite3BitvecSet(tls, pDone, *(*TPgno)(unsafe.Pointer(bp + 8)))
rc = v2
}
if v3 && v2 != SQLITE_OK {
return rc
}
/* When playing back page 1, restore the nReserve setting
*/
if *(*TPgno)(unsafe.Pointer(bp + 8)) == uint32(1) && int32((*TPager)(unsafe.Pointer(pPager)).FnReserve) != int32(*(*Tu8)(unsafe.Pointer(aData + 20))) {
(*TPager)(unsafe.Pointer(pPager)).FnReserve = int16(*(*Tu8)(unsafe.Pointer(aData + 20)))
}
/* If the pager is in CACHEMOD state, then there must be a copy of this
** page in the pager cache. In this case just update the pager cache,
** not the database file. The page is left marked dirty in this case.
**
** An exception to the above rule: If the database is in no-sync mode
** and a page is moved during an incremental vacuum then the page may
** not be in the pager cache. Later: if a malloc() or IO error occurs
** during a Movepage() call, then the page may not be in the cache
** either. So the condition described in the above paragraph is not
** assert()able.
**
** If in WRITER_DBMOD, WRITER_FINISHED or OPEN state, then we update the
** pager cache if it exists and the main file. The page is then marked
** not dirty. Since this code is only executed in PAGER_OPEN state for
** a hot-journal rollback, it is guaranteed that the page-cache is empty
** if the pager is in OPEN state.
**
** Ticket #1171: The statement journal might contain page content that is
** different from the page content at the start of the transaction.
** This occurs when a page is changed prior to the start of a statement
** then changed again within the statement. When rolling back such a
** statement we must not write to the original database unless we know
** for certain that original page contents are synced into the main rollback
** journal. Otherwise, a power loss might leave modified data in the
** database file without an entry in the rollback journal that can
** restore the database to its original form. Two conditions must be
** met before writing to the database files. (1) the database must be
** locked. (2) we know that the original page content is fully synced
** in the main journal either because the page is not in cache or else
** the page is marked as needSync==0.
**
** 2008-04-14: When attempting to vacuum a corrupt database file, it
** is possible to fail a statement on a database that does not yet exist.
** Do not attempt to write if database file has never been opened.
*/
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
*(*uintptr)(unsafe.Pointer(bp)) = _sqlite3PagerLookup(tls, pPager, *(*TPgno)(unsafe.Pointer(bp + 8)))
}
if isMainJrnl != 0 {
isSynced = libc.BoolInt32((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0 || *(*Ti64)(unsafe.Pointer(pOffset)) <= (*TPager)(unsafe.Pointer(pPager)).FjournalHdr)
} else {
isSynced = libc.BoolInt32(*(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) || 0 == int32((*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fflags)&int32(PGHDR_NEED_SYNC))
}
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) && (int32((*TPager)(unsafe.Pointer(pPager)).FeState) >= int32(PAGER_WRITER_DBMOD) || int32((*TPager)(unsafe.Pointer(pPager)).FeState) == PAGER_OPEN) && isSynced != 0 {
ofst = int64(*(*TPgno)(unsafe.Pointer(bp + 8))-libc.Uint32FromInt32(1)) * (*TPager)(unsafe.Pointer(pPager)).FpageSize
/* Write the data read from the journal back into the database file.
** This is usually safe even for an encrypted database - as the data
** was encrypted before it was written to the journal file. The exception
** is if the data was just read from an in-memory sub-journal. In that
** case it must be encrypted here before it is copied into the database
** file. */
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, aData, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), ofst)
if *(*TPgno)(unsafe.Pointer(bp + 8)) > (*TPager)(unsafe.Pointer(pPager)).FdbFileSize {
(*TPager)(unsafe.Pointer(pPager)).FdbFileSize = *(*TPgno)(unsafe.Pointer(bp + 8))
}
if (*TPager)(unsafe.Pointer(pPager)).FpBackup != 0 {
_sqlite3BackupUpdate(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup, *(*TPgno)(unsafe.Pointer(bp + 8)), aData)
}
} else {
if !(isMainJrnl != 0) && *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) {
/* If this is a rollback of a savepoint and data was not written to
** the database and the page is not in-memory, there is a potential
** problem. When the page is next fetched by the b-tree layer, it
** will be read from the database file, which may or may not be
** current.
**
** There are a couple of different ways this can happen. All are quite
** obscure. When running in synchronous mode, this can only happen
** if the page is on the free-list at the start of the transaction, then
** populated, then moved using sqlite3PagerMovepage().
**
** The solution is to add an in-memory page to the cache containing
** the data just read from the sub-journal. Mark the page as dirty
** and if the pager requires a journal-sync, then mark the page as
** requiring a journal-sync before it is written.
*/
p4 = pPager + 25
*(*Tu8)(unsafe.Pointer(p4)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p4))) | libc.Int32FromInt32(SPILLFLAG_ROLLBACK))
rc = _sqlite3PagerGet(tls, pPager, *(*TPgno)(unsafe.Pointer(bp + 8)), bp, int32(1))
p5 = pPager + 25
*(*Tu8)(unsafe.Pointer(p5)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p5))) & ^libc.Int32FromInt32(SPILLFLAG_ROLLBACK))
if rc != SQLITE_OK {
return rc
}
_sqlite3PcacheMakeDirty(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
pData = (*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpData
libc.Xmemcpy(tls, pData, aData, uint64((*TPager)(unsafe.Pointer(pPager)).FpageSize))
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TPager)(unsafe.Pointer(pPager)).FxReiniter})))(tls, *(*uintptr)(unsafe.Pointer(bp)))
/* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But
** that call was dangerous and had no detectable benefit since the cache
** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so
** has been removed. */
/* If this was page 1, then restore the value of Pager.dbFileVers.
** Do this before any decoding. */
if *(*TPgno)(unsafe.Pointer(bp + 8)) == uint32(1) {
libc.Xmemcpy(tls, pPager+136, pData+24, uint64(16))
}
_sqlite3PcacheRelease(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
return rc
}
// C documentation
//
// /*
// ** Parameter zSuper is the name of a super-journal file. A single journal
// ** file that referred to the super-journal file has just been rolled back.
// ** This routine checks if it is possible to delete the super-journal file,
// ** and does so if it is.
// **
// ** Argument zSuper may point to Pager.pTmpSpace. So that buffer is not
// ** available for use within this function.
// **
// ** When a super-journal file is created, it is populated with the names
// ** of all of its child journals, one after another, formatted as utf-8
// ** encoded text. The end of each child journal file is marked with a
// ** nul-terminator byte (0x00). i.e. the entire contents of a super-journal
// ** file for a transaction involving two databases might be:
// **
// ** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00"
// **
// ** A super-journal file may only be deleted once all of its child
// ** journals have been rolled back.
// **
// ** This function reads the contents of the super-journal file into
// ** memory and loops through each of the child journal names. For
// ** each child journal, it checks if:
// **
// ** * if the child journal exists, and if so
// ** * if the child journal contains a reference to super-journal
// ** file zSuper
// **
// ** If a child journal can be found that matches both of the criteria
// ** above, this function returns without doing anything. Otherwise, if
// ** no such child journal can be found, file zSuper is deleted from
// ** the file-system using sqlite3OsDelete().
// **
// ** If an IO error within this function, an error code is returned. This
// ** function allocates memory by calling sqlite3Malloc(). If an allocation
// ** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors
// ** occur, SQLITE_OK is returned.
// **
// ** TODO: This function allocates a single block of memory to load
// ** the entire contents of the super-journal file. This could be
// ** a couple of kilobytes or so - potentially larger than the page
// ** size.
// */
func _pager_delsuper(tls *libc.TLS, pPager uintptr, zSuper uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var c, flags, flags1, nSuperPtr, rc int32
var pJournal, pSuper, pVfs, zFree, zJournal, zSuperJournal, zSuperPtr uintptr
var v1, v2, v3 int8
var _ /* exists at bp+8 */ int32
var _ /* nSuperJournal at bp+0 */ Ti64
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = c, flags, flags1, nSuperPtr, pJournal, pSuper, pVfs, rc, zFree, zJournal, zSuperJournal, zSuperPtr, v1, v2, v3
pVfs = (*TPager)(unsafe.Pointer(pPager)).FpVfs /* Malloc'd child-journal file descriptor */
zSuperJournal = uintptr(0) /* Space to hold super-journal filename */
zFree = uintptr(0) /* Amount of space allocated to zSuperPtr[] */
/* Allocate space for both the pJournal and pSuper file descriptors.
** If successful, open the super-journal file for reading.
*/
pSuper = _sqlite3MallocZero(tls, uint64((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile*int32(2)))
if !(pSuper != 0) {
rc = int32(SQLITE_NOMEM)
pJournal = uintptr(0)
} else {
flags = libc.Int32FromInt32(SQLITE_OPEN_READONLY) | libc.Int32FromInt32(SQLITE_OPEN_SUPER_JOURNAL)
rc = _sqlite3OsOpen(tls, pVfs, zSuper, pSuper, flags, uintptr(0))
pJournal = pSuper + uintptr((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile)
}
if rc != SQLITE_OK {
goto delsuper_out
}
/* Load the entire super-journal file into space obtained from
** sqlite3_malloc() and pointed to by zSuperJournal. Also obtain
** sufficient space (in zSuperPtr) to hold the names of super-journal
** files extracted from regular rollback-journals.
*/
rc = _sqlite3OsFileSize(tls, pSuper, bp)
if rc != SQLITE_OK {
goto delsuper_out
}
nSuperPtr = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FmxPathname + int32(1)
zFree = _sqlite3Malloc(tls, uint64(int64(4)+*(*Ti64)(unsafe.Pointer(bp))+int64(nSuperPtr)+int64(2)))
if !(zFree != 0) {
rc = int32(SQLITE_NOMEM)
goto delsuper_out
}
v3 = libc.Int8FromInt32(0)
*(*int8)(unsafe.Pointer(zFree + 3)) = v3
v2 = v3
*(*int8)(unsafe.Pointer(zFree + 2)) = v2
v1 = v2
*(*int8)(unsafe.Pointer(zFree + 1)) = v1
*(*int8)(unsafe.Pointer(zFree)) = v1
zSuperJournal = zFree + 4
zSuperPtr = zSuperJournal + uintptr(*(*Ti64)(unsafe.Pointer(bp))+int64(2))
rc = _sqlite3OsRead(tls, pSuper, zSuperJournal, int32(*(*Ti64)(unsafe.Pointer(bp))), 0)
if rc != SQLITE_OK {
goto delsuper_out
}
*(*int8)(unsafe.Pointer(zSuperJournal + uintptr(*(*Ti64)(unsafe.Pointer(bp))))) = 0
*(*int8)(unsafe.Pointer(zSuperJournal + uintptr(*(*Ti64)(unsafe.Pointer(bp))+int64(1)))) = 0
zJournal = zSuperJournal
for int64(zJournal)-int64(zSuperJournal) < *(*Ti64)(unsafe.Pointer(bp)) {
rc = _sqlite3OsAccess(tls, pVfs, zJournal, SQLITE_ACCESS_EXISTS, bp+8)
if rc != SQLITE_OK {
goto delsuper_out
}
if *(*int32)(unsafe.Pointer(bp + 8)) != 0 {
flags1 = libc.Int32FromInt32(SQLITE_OPEN_READONLY) | libc.Int32FromInt32(SQLITE_OPEN_SUPER_JOURNAL)
rc = _sqlite3OsOpen(tls, pVfs, zJournal, pJournal, flags1, uintptr(0))
if rc != SQLITE_OK {
goto delsuper_out
}
rc = _readSuperJournal(tls, pJournal, zSuperPtr, uint32(nSuperPtr))
_sqlite3OsClose(tls, pJournal)
if rc != SQLITE_OK {
goto delsuper_out
}
c = libc.BoolInt32(int32(*(*int8)(unsafe.Pointer(zSuperPtr))) != 0 && libc.Xstrcmp(tls, zSuperPtr, zSuper) == 0)
if c != 0 {
/* We have a match. Do not delete the super-journal file. */
goto delsuper_out
}
}
zJournal += uintptr(_sqlite3Strlen30(tls, zJournal) + libc.Int32FromInt32(1))
}
_sqlite3OsClose(tls, pSuper)
rc = _sqlite3OsDelete(tls, pVfs, zSuper, 0)
goto delsuper_out
delsuper_out:
;
Xsqlite3_free(tls, zFree)
if pSuper != 0 {
_sqlite3OsClose(tls, pSuper)
Xsqlite3_free(tls, pSuper)
}
return rc
}
// C documentation
//
// /*
// ** This function is used to change the actual size of the database
// ** file in the file-system. This only happens when committing a transaction,
// ** or rolling back a transaction (including rolling back a hot-journal).
// **
// ** If the main database file is not open, or the pager is not in either
// ** DBMOD or OPEN state, this function is a no-op. Otherwise, the size
// ** of the file is changed to nPage pages (nPage*pPager->pageSize bytes).
// ** If the file on disk is currently larger than nPage pages, then use the VFS
// ** xTruncate() method to truncate it.
// **
// ** Or, it might be the case that the file on disk is smaller than
// ** nPage pages. Some operating system implementations can get confused if
// ** you try to truncate a file to some size that is larger than it
// ** currently is, so detect this case and write a single zero byte to
// ** the end of the new file instead.
// **
// ** If successful, return SQLITE_OK. If an IO error occurs while modifying
// ** the database file, return the error code to the caller.
// */
func _pager_truncate(tls *libc.TLS, pPager uintptr, nPage TPgno) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pTmp uintptr
var rc, szPage int32
var _ /* currentSize at bp+0 */ Ti64
var _ /* newSize at bp+8 */ Ti64
_, _, _ = pTmp, rc, szPage
rc = SQLITE_OK
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) && (int32((*TPager)(unsafe.Pointer(pPager)).FeState) >= int32(PAGER_WRITER_DBMOD) || int32((*TPager)(unsafe.Pointer(pPager)).FeState) == PAGER_OPEN) {
szPage = int32((*TPager)(unsafe.Pointer(pPager)).FpageSize)
/* TODO: Is it safe to use Pager.dbFileSize here? */
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, bp)
*(*Ti64)(unsafe.Pointer(bp + 8)) = int64(szPage) * int64(nPage)
if rc == SQLITE_OK && *(*Ti64)(unsafe.Pointer(bp)) != *(*Ti64)(unsafe.Pointer(bp + 8)) {
if *(*Ti64)(unsafe.Pointer(bp)) > *(*Ti64)(unsafe.Pointer(bp + 8)) {
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, *(*Ti64)(unsafe.Pointer(bp + 8)))
} else {
if *(*Ti64)(unsafe.Pointer(bp))+int64(szPage) <= *(*Ti64)(unsafe.Pointer(bp + 8)) {
pTmp = (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace
libc.Xmemset(tls, pTmp, 0, uint64(szPage))
_sqlite3OsFileControlHint(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_SIZE_HINT), bp+8)
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, pTmp, szPage, *(*Ti64)(unsafe.Pointer(bp + 8))-int64(szPage))
}
}
if rc == SQLITE_OK {
(*TPager)(unsafe.Pointer(pPager)).FdbFileSize = nPage
}
}
}
return rc
}
// C documentation
//
// /*
// ** Return a sanitized version of the sector-size of OS file pFile. The
// ** return value is guaranteed to lie between 32 and MAX_SECTOR_SIZE.
// */
func _sqlite3SectorSize(tls *libc.TLS, pFile uintptr) (r int32) {
var iRet int32
_ = iRet
iRet = _sqlite3OsSectorSize(tls, pFile)
if iRet < int32(32) {
iRet = int32(512)
} else {
if iRet > int32(MAX_SECTOR_SIZE) {
iRet = int32(MAX_SECTOR_SIZE)
}
}
return iRet
}
// C documentation
//
// /*
// ** Set the value of the Pager.sectorSize variable for the given
// ** pager based on the value returned by the xSectorSize method
// ** of the open database file. The sector size will be used
// ** to determine the size and alignment of journal header and
// ** super-journal pointers within created journal files.
// **
// ** For temporary files the effective sector size is always 512 bytes.
// **
// ** Otherwise, for non-temporary files, the effective sector size is
// ** the value returned by the xSectorSize() method rounded up to 32 if
// ** it is less than 32, or rounded down to MAX_SECTOR_SIZE if it
// ** is greater than MAX_SECTOR_SIZE.
// **
// ** If the file has the SQLITE_IOCAP_POWERSAFE_OVERWRITE property, then set
// ** the effective sector size to its minimum value (512). The purpose of
// ** pPager->sectorSize is to define the "blast radius" of bytes that
// ** might change if a crash occurs while writing to a single byte in
// ** that range. But with POWERSAFE_OVERWRITE, the blast radius is zero
// ** (that is what POWERSAFE_OVERWRITE means), so we minimize the sector
// ** size. For backwards compatibility of the rollback journal file format,
// ** we cannot reduce the effective sector size below 512.
// */
func _setSectorSize(tls *libc.TLS, pPager uintptr) {
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 || _sqlite3OsDeviceCharacteristics(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd)&int32(SQLITE_IOCAP_POWERSAFE_OVERWRITE) != 0 {
/* Sector size doesn't matter for temporary files. Also, the file
** may not have been opened yet, in which case the OsSectorSize()
** call will segfault. */
(*TPager)(unsafe.Pointer(pPager)).FsectorSize = uint32(512)
} else {
(*TPager)(unsafe.Pointer(pPager)).FsectorSize = uint32(_sqlite3SectorSize(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd))
}
}
// C documentation
//
// /*
// ** Playback the journal and thus restore the database file to
// ** the state it was in before we started making changes.
// **
// ** The journal file format is as follows:
// **
// ** (1) 8 byte prefix. A copy of aJournalMagic[].
// ** (2) 4 byte big-endian integer which is the number of valid page records
// ** in the journal. If this value is 0xffffffff, then compute the
// ** number of page records from the journal size.
// ** (3) 4 byte big-endian integer which is the initial value for the
// ** sanity checksum.
// ** (4) 4 byte integer which is the number of pages to truncate the
// ** database to during a rollback.
// ** (5) 4 byte big-endian integer which is the sector size. The header
// ** is this many bytes in size.
// ** (6) 4 byte big-endian integer which is the page size.
// ** (7) zero padding out to the next sector size.
// ** (8) Zero or more pages instances, each as follows:
// ** + 4 byte page number.
// ** + pPager->pageSize bytes of data.
// ** + 4 byte checksum
// **
// ** When we speak of the journal header, we mean the first 7 items above.
// ** Each entry in the journal is an instance of the 8th item.
// **
// ** Call the value from the second bullet "nRec". nRec is the number of
// ** valid page entries in the journal. In most cases, you can compute the
// ** value of nRec from the size of the journal file. But if a power
// ** failure occurred while the journal was being written, it could be the
// ** case that the size of the journal file had already been increased but
// ** the extra entries had not yet made it safely to disk. In such a case,
// ** the value of nRec computed from the file size would be too large. For
// ** that reason, we always use the nRec value in the header.
// **
// ** If the nRec value is 0xffffffff it means that nRec should be computed
// ** from the file size. This value is used when the user selects the
// ** no-sync option for the journal. A power failure could lead to corruption
// ** in this case. But for things like temporary table (which will be
// ** deleted when the power is restored) we don't care.
// **
// ** If the file opened as the journal file is not a well-formed
// ** journal file then all pages up to the first corrupted page are rolled
// ** back (or no pages if the journal header is corrupted). The journal file
// ** is then deleted and SQLITE_OK returned, just as if no corruption had
// ** been encountered.
// **
// ** If an I/O or malloc() error occurs, the journal-file is not deleted
// ** and an error code is returned.
// **
// ** The isHot parameter indicates that we are trying to rollback a journal
// ** that might be a hot journal. Or, it could be that the journal is
// ** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE.
// ** If the journal really is hot, reset the pager cache prior rolling
// ** back any content. If the journal is merely persistent, no reset is
// ** needed.
// */
func _pager_playback(tls *libc.TLS, pPager uintptr, isHot int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var nPlayback, needPagerReset, rc int32
var pVfs, zSuper uintptr
var u Tu32
var _ /* mxPg at bp+12 */ TPgno
var _ /* nRec at bp+8 */ Tu32
var _ /* res at bp+16 */ int32
var _ /* savedPageSize at bp+20 */ Tu32
var _ /* szJ at bp+0 */ Ti64
_, _, _, _, _, _ = nPlayback, needPagerReset, pVfs, rc, u, zSuper
pVfs = (*TPager)(unsafe.Pointer(pPager)).FpVfs /* Unsigned loop counter */
*(*TPgno)(unsafe.Pointer(bp + 12)) = uint32(0) /* Result code of a subroutine */
*(*int32)(unsafe.Pointer(bp + 16)) = int32(1) /* Value returned by sqlite3OsAccess() */
zSuper = uintptr(0) /* True to reset page prior to first page rollback */
nPlayback = 0 /* Total number of pages restored from journal */
*(*Tu32)(unsafe.Pointer(bp + 20)) = uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize)
/* Figure out how many records are in the journal. Abort early if
** the journal is empty.
*/
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp)
if rc != SQLITE_OK {
goto end_playback
}
/* Read the super-journal name from the journal, if it is present.
** If a super-journal file name is specified, but the file is not
** present on disk, then the journal is not hot and does not need to be
** played back.
**
** TODO: Technically the following is an error because it assumes that
** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that
** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c,
** mxPathname is 512, which is the same as the minimum allowable value
** for pageSize.
*/
zSuper = (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace
rc = _readSuperJournal(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, zSuper, uint32((*Tsqlite3_vfs)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FpVfs)).FmxPathname+int32(1)))
if rc == SQLITE_OK && *(*int8)(unsafe.Pointer(zSuper)) != 0 {
rc = _sqlite3OsAccess(tls, pVfs, zSuper, SQLITE_ACCESS_EXISTS, bp+16)
}
zSuper = uintptr(0)
if rc != SQLITE_OK || !(*(*int32)(unsafe.Pointer(bp + 16)) != 0) {
goto end_playback
}
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
needPagerReset = isHot
/* This loop terminates either when a readJournalHdr() or
** pager_playback_one_page() call returns SQLITE_DONE or an IO error
** occurs.
*/
for int32(1) != 0 {
/* Read the next journal header from the journal file. If there are
** not enough bytes left in the journal file for a complete header, or
** it is corrupted, then a process must have failed while writing it.
** This indicates nothing more needs to be rolled back.
*/
rc = _readJournalHdr(tls, pPager, isHot, *(*Ti64)(unsafe.Pointer(bp)), bp+8, bp+12)
if rc != SQLITE_OK {
if rc == int32(SQLITE_DONE) {
rc = SQLITE_OK
}
goto end_playback
}
/* If nRec is 0xffffffff, then this journal was created by a process
** working in no-sync mode. This means that the rest of the journal
** file consists of pages, there are no more journal headers. Compute
** the value of nRec based on this assumption.
*/
if *(*Tu32)(unsafe.Pointer(bp + 8)) == uint32(0xffffffff) {
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(int32((*(*Ti64)(unsafe.Pointer(bp)) - int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize)) / ((*TPager)(unsafe.Pointer(pPager)).FpageSize + libc.Int64FromInt32(8))))
}
/* If nRec is 0 and this rollback is of a transaction created by this
** process and if this is the final header in the journal, then it means
** that this part of the journal was being filled but has not yet been
** synced to disk. Compute the number of pages based on the remaining
** size of the file.
**
** The third term of the test was added to fix ticket #2565.
** When rolling back a hot journal, nRec==0 always means that the next
** chunk of the journal contains zero pages to be rolled back. But
** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in
** the journal, it means that the journal might contain additional
** pages that need to be rolled back and that the number of pages
** should be computed based on the journal file size.
*/
if *(*Tu32)(unsafe.Pointer(bp + 8)) == uint32(0) && !(isHot != 0) && (*TPager)(unsafe.Pointer(pPager)).FjournalHdr+int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) == (*TPager)(unsafe.Pointer(pPager)).FjournalOff {
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(int32((*(*Ti64)(unsafe.Pointer(bp)) - (*TPager)(unsafe.Pointer(pPager)).FjournalOff) / ((*TPager)(unsafe.Pointer(pPager)).FpageSize + libc.Int64FromInt32(8))))
}
/* If this is the first header read from the journal, truncate the
** database file back to its original size.
*/
if (*TPager)(unsafe.Pointer(pPager)).FjournalOff == int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) {
rc = _pager_truncate(tls, pPager, *(*TPgno)(unsafe.Pointer(bp + 12)))
if rc != SQLITE_OK {
goto end_playback
}
(*TPager)(unsafe.Pointer(pPager)).FdbSize = *(*TPgno)(unsafe.Pointer(bp + 12))
if (*TPager)(unsafe.Pointer(pPager)).FmxPgno < *(*TPgno)(unsafe.Pointer(bp + 12)) {
(*TPager)(unsafe.Pointer(pPager)).FmxPgno = *(*TPgno)(unsafe.Pointer(bp + 12))
}
}
/* Copy original pages out of the journal and back into the
** database file and/or page cache.
*/
u = uint32(0)
for {
if !(u < *(*Tu32)(unsafe.Pointer(bp + 8))) {
break
}
if needPagerReset != 0 {
_pager_reset(tls, pPager)
needPagerReset = 0
}
rc = _pager_playback_one_page(tls, pPager, pPager+96, uintptr(0), int32(1), 0)
if rc == SQLITE_OK {
nPlayback++
} else {
if rc == int32(SQLITE_DONE) {
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = *(*Ti64)(unsafe.Pointer(bp))
break
} else {
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(2)<= int32(PAGER_WRITER_DBMOD) || int32((*TPager)(unsafe.Pointer(pPager)).FeState) == PAGER_OPEN) {
rc = _sqlite3PagerSync(tls, pPager, uintptr(0))
}
if rc == SQLITE_OK {
rc = _pager_end_transaction(tls, pPager, libc.BoolInt32(int32(*(*int8)(unsafe.Pointer(zSuper))) != int32('\000')), 0)
}
if rc == SQLITE_OK && *(*int8)(unsafe.Pointer(zSuper)) != 0 && *(*int32)(unsafe.Pointer(bp + 16)) != 0 {
/* If there was a super-journal and this routine will return success,
** see if it is possible to delete the super-journal.
*/
libc.Xmemset(tls, (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace, 0, uint64(4))
rc = _pager_delsuper(tls, pPager, zSuper)
}
if isHot != 0 && nPlayback != 0 {
Xsqlite3_log(tls, libc.Int32FromInt32(SQLITE_NOTICE)|libc.Int32FromInt32(2)<pData. A shared lock or greater must be held on the database
// ** file before this function is called.
// **
// ** If page 1 is read, then the value of Pager.dbFileVers[] is set to
// ** the value read from the database file.
// **
// ** If an IO error occurs, then the IO error is returned to the caller.
// ** Otherwise, SQLITE_OK is returned.
// */
func _readDbPage(tls *libc.TLS, pPg uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var dbFileVers, pPager uintptr
var iOffset Ti64
var rc int32
var _ /* iFrame at bp+0 */ Tu32
_, _, _, _ = dbFileVers, iOffset, pPager, rc
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager /* Pager object associated with page pPg */
rc = SQLITE_OK /* Return code */
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0) /* Frame of WAL containing pgno */
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
rc = _sqlite3WalFindFrame(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno, bp)
if rc != 0 {
return rc
}
}
if *(*Tu32)(unsafe.Pointer(bp)) != 0 {
rc = _sqlite3WalReadFrame(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, *(*Tu32)(unsafe.Pointer(bp)), int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), (*TPgHdr)(unsafe.Pointer(pPg)).FpData)
} else {
iOffset = int64((*TPgHdr)(unsafe.Pointer(pPg)).Fpgno-libc.Uint32FromInt32(1)) * (*TPager)(unsafe.Pointer(pPager)).FpageSize
rc = _sqlite3OsRead(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, (*TPgHdr)(unsafe.Pointer(pPg)).FpData, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), iOffset)
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(2)<dbFileVers[] with all 0xff bytes should suffice.
**
** For an encrypted database, the situation is more complex: bytes
** 24..39 of the database are white noise. But the probability of
** white noise equaling 16 bytes of 0xff is vanishingly small so
** we should still be ok.
*/
libc.Xmemset(tls, pPager+136, int32(0xff), uint64(16))
} else {
dbFileVers = (*TPgHdr)(unsafe.Pointer(pPg)).FpData + 24
libc.Xmemcpy(tls, pPager+136, dbFileVers, uint64(16))
}
}
return rc
}
// C documentation
//
// /*
// ** Update the value of the change-counter at offsets 24 and 92 in
// ** the header and the sqlite version number at offset 96.
// **
// ** This is an unconditional update. See also the pager_incr_changecounter()
// ** routine which only updates the change-counter if the update is actually
// ** needed, as determined by the pPager->changeCountDone state variable.
// */
func _pager_write_changecounter(tls *libc.TLS, pPg uintptr) {
var change_counter Tu32
_ = change_counter
if pPg == uintptr(0) {
return
}
/* Increment the value just read and write it back to byte 24. */
change_counter = _sqlite3Get4byte(tls, (*TPgHdr)(unsafe.Pointer(pPg)).FpPager+136) + uint32(1)
_sqlite3Put4byte(tls, (*TPgHdr)(unsafe.Pointer(pPg)).FpData+uintptr(24), change_counter)
/* Also store the SQLite version number in bytes 96..99 and in
** bytes 92..95 store the change counter for which the version number
** is valid. */
_sqlite3Put4byte(tls, (*TPgHdr)(unsafe.Pointer(pPg)).FpData+uintptr(92), change_counter)
_sqlite3Put4byte(tls, (*TPgHdr)(unsafe.Pointer(pPg)).FpData+uintptr(96), uint32(SQLITE_VERSION_NUMBER))
}
// C documentation
//
// /*
// ** This function is invoked once for each page that has already been
// ** written into the log file when a WAL transaction is rolled back.
// ** Parameter iPg is the page number of said page. The pCtx argument
// ** is actually a pointer to the Pager structure.
// **
// ** If page iPg is present in the cache, and has no outstanding references,
// ** it is discarded. Otherwise, if there are one or more outstanding
// ** references, the page content is reloaded from the database. If the
// ** attempt to reload content from the database is required and fails,
// ** return an SQLite error code. Otherwise, SQLITE_OK.
// */
func _pagerUndoCallback(tls *libc.TLS, pCtx uintptr, iPg TPgno) (r int32) {
var pPager, pPg uintptr
var rc int32
_, _, _ = pPager, pPg, rc
rc = SQLITE_OK
pPager = pCtx
pPg = _sqlite3PagerLookup(tls, pPager, iPg)
if pPg != 0 {
if _sqlite3PcachePageRefcount(tls, pPg) == int64(1) {
_sqlite3PcacheDrop(tls, pPg)
} else {
rc = _readDbPage(tls, pPg)
if rc == SQLITE_OK {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TPager)(unsafe.Pointer(pPager)).FxReiniter})))(tls, pPg)
}
_sqlite3PagerUnrefNotNull(tls, pPg)
}
}
/* Normally, if a transaction is rolled back, any backup processes are
** updated as data is copied out of the rollback journal and into the
** database. This is not generally possible with a WAL database, as
** rollback involves simply truncating the log file. Therefore, if one
** or more frames have already been written to the log (and therefore
** also copied into the backup databases) as part of this transaction,
** the backups must be restarted.
*/
_sqlite3BackupRestart(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup)
return rc
}
// C documentation
//
// /*
// ** This function is called to rollback a transaction on a WAL database.
// */
func _pagerRollbackWal(tls *libc.TLS, pPager uintptr) (r int32) {
var pList, pNext uintptr
var rc int32
_, _, _ = pList, pNext, rc /* List of dirty pages to revert */
/* For all pages in the cache that are currently dirty or have already
** been written (but not committed) to the log file, do one of the
** following:
**
** + Discard the cached page (if refcount==0), or
** + Reload page content from the database (if refcount>0).
*/
(*TPager)(unsafe.Pointer(pPager)).FdbSize = (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize
rc = _sqlite3WalUndo(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, __ccgo_fp(_pagerUndoCallback), pPager)
pList = _sqlite3PcacheDirtyList(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
for pList != 0 && rc == SQLITE_OK {
pNext = (*TPgHdr)(unsafe.Pointer(pList)).FpDirty
rc = _pagerUndoCallback(tls, pPager, (*TPgHdr)(unsafe.Pointer(pList)).Fpgno)
pList = pNext
}
return rc
}
// C documentation
//
// /*
// ** This function is a wrapper around sqlite3WalFrames(). As well as logging
// ** the contents of the list of pages headed by pList (connected by pDirty),
// ** this function notifies any active backup processes that the pages have
// ** changed.
// **
// ** The list of pages passed into this routine is always sorted by page number.
// ** Hence, if page 1 appears anywhere on the list, it will be the first page.
// */
func _pagerWalFrames(tls *libc.TLS, pPager uintptr, _pList uintptr, nTruncate TPgno, isCommit int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
*(*uintptr)(unsafe.Pointer(bp)) = _pList
var nList, rc int32
var p, ppNext, v2 uintptr
_, _, _, _, _ = nList, p, ppNext, rc, v2 /* For looping over pages */
if isCommit != 0 {
/* If a WAL transaction is being committed, there is no point in writing
** any pages with page numbers greater than nTruncate into the WAL file.
** They will never be read by any client. So remove them from the pDirty
** list here. */
ppNext = bp
nList = 0
p = *(*uintptr)(unsafe.Pointer(bp))
for {
v2 = p
*(*uintptr)(unsafe.Pointer(ppNext)) = v2
if !(v2 != uintptr(0)) {
break
}
if (*TPgHdr)(unsafe.Pointer(p)).Fpgno <= nTruncate {
ppNext = p + 32
nList++
}
goto _1
_1:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
}
} else {
nList = int32(1)
}
*(*Tu32)(unsafe.Pointer(pPager + 248 + 2*4)) += uint32(nList)
if (*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fpgno == uint32(1) {
_pager_write_changecounter(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
rc = _sqlite3WalFrames(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), *(*uintptr)(unsafe.Pointer(bp)), nTruncate, isCommit, int32((*TPager)(unsafe.Pointer(pPager)).FwalSyncFlags))
if rc == SQLITE_OK && (*TPager)(unsafe.Pointer(pPager)).FpBackup != 0 {
p = *(*uintptr)(unsafe.Pointer(bp))
for {
if !(p != 0) {
break
}
_sqlite3BackupUpdate(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup, (*TPgHdr)(unsafe.Pointer(p)).Fpgno, (*TPgHdr)(unsafe.Pointer(p)).FpData)
goto _3
_3:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
}
}
return rc
}
// C documentation
//
// /*
// ** Begin a read transaction on the WAL.
// **
// ** This routine used to be called "pagerOpenSnapshot()" because it essentially
// ** makes a snapshot of the database at the current point in time and preserves
// ** that snapshot for use by the reader in spite of concurrently changes by
// ** other writers or checkpointers.
// */
func _pagerBeginReadTransaction(tls *libc.TLS, pPager uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* changed at bp+0 */ int32
_ = rc /* Return code */
*(*int32)(unsafe.Pointer(bp)) = 0 /* True if cache must be reset */
/* sqlite3WalEndReadTransaction() was not called for the previous
** transaction in locking_mode=EXCLUSIVE. So call it now. If we
** are in locking_mode=NORMAL and EndRead() was previously called,
** the duplicate call is harmless.
*/
_sqlite3WalEndReadTransaction(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
rc = _sqlite3WalBeginReadTransaction(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, bp)
if rc != SQLITE_OK || *(*int32)(unsafe.Pointer(bp)) != 0 {
_pager_reset(tls, pPager)
if (*TPager)(unsafe.Pointer(pPager)).FbUseFetch != 0 {
_sqlite3OsUnfetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, 0, uintptr(0))
}
}
return rc
}
// C documentation
//
// /*
// ** This function is called as part of the transition from PAGER_OPEN
// ** to PAGER_READER state to determine the size of the database file
// ** in pages (assuming the page size currently stored in Pager.pageSize).
// **
// ** If no error occurs, SQLITE_OK is returned and the size of the database
// ** in pages is stored in *pnPage. Otherwise, an error code (perhaps
// ** SQLITE_IOERR_FSTAT) is returned and *pnPage is left unmodified.
// */
func _pagerPagecount(tls *libc.TLS, pPager uintptr, pnPage uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nPage TPgno
var rc int32
var _ /* n at bp+0 */ Ti64
_, _ = nPage, rc /* Value to return via *pnPage */
/* Query the WAL sub-system for the database size. The WalDbsize()
** function returns zero if the WAL is not open (i.e. Pager.pWal==0), or
** if the database size is not available. The database size is not
** available from the WAL sub-system if the log file is empty or
** contains no valid committed transactions.
*/
nPage = _sqlite3WalDbsize(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
/* If the number of pages in the database is not available from the
** WAL sub-system, determine the page count based on the size of
** the database file. If the size of the database file is not an
** integer multiple of the page-size, round up the result.
*/
if nPage == uint32(0) && (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) {
*(*Ti64)(unsafe.Pointer(bp)) = 0 /* Size of db file in bytes */
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, bp)
if rc != SQLITE_OK {
return rc
}
nPage = uint32((*(*Ti64)(unsafe.Pointer(bp)) + (*TPager)(unsafe.Pointer(pPager)).FpageSize - libc.Int64FromInt32(1)) / (*TPager)(unsafe.Pointer(pPager)).FpageSize)
}
/* If the current number of pages in the file is greater than the
** configured maximum pager number, increase the allowed limit so
** that the file can be read.
*/
if nPage > (*TPager)(unsafe.Pointer(pPager)).FmxPgno {
(*TPager)(unsafe.Pointer(pPager)).FmxPgno = nPage
}
*(*TPgno)(unsafe.Pointer(pnPage)) = nPage
return SQLITE_OK
}
// C documentation
//
// /*
// ** Check if the *-wal file that corresponds to the database opened by pPager
// ** exists if the database is not empty, or verify that the *-wal file does
// ** not exist (by deleting it) if the database file is empty.
// **
// ** If the database is not empty and the *-wal file exists, open the pager
// ** in WAL mode. If the database is empty or if no *-wal file exists and
// ** if no error occurs, make sure Pager.journalMode is not set to
// ** PAGER_JOURNALMODE_WAL.
// **
// ** Return SQLITE_OK or an error code.
// **
// ** The caller must hold a SHARED lock on the database file to call this
// ** function. Because an EXCLUSIVE lock on the db file is required to delete
// ** a WAL on a none-empty database, this ensures there is no race condition
// ** between the xAccess() below and an xDelete() being executed by some
// ** other connection.
// */
func _pagerOpenWalIfPresent(tls *libc.TLS, pPager uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* isWal at bp+0 */ int32
var _ /* nPage at bp+4 */ TPgno
_ = rc
rc = SQLITE_OK
if !((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0) { /* True if WAL file exists */
rc = _sqlite3OsAccess(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzWal, SQLITE_ACCESS_EXISTS, bp)
if rc == SQLITE_OK {
if *(*int32)(unsafe.Pointer(bp)) != 0 { /* Size of the database file */
rc = _pagerPagecount(tls, pPager, bp+4)
if rc != 0 {
return rc
}
if *(*TPgno)(unsafe.Pointer(bp + 4)) == uint32(0) {
rc = _sqlite3OsDelete(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzWal, 0)
} else {
rc = _sqlite3PagerOpenWal(tls, pPager, uintptr(0))
}
} else {
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_WAL) {
(*TPager)(unsafe.Pointer(pPager)).FjournalMode = uint8(PAGER_JOURNALMODE_DELETE)
}
}
}
}
return rc
}
// C documentation
//
// /*
// ** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback
// ** the entire super-journal file. The case pSavepoint==NULL occurs when
// ** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction
// ** savepoint.
// **
// ** When pSavepoint is not NULL (meaning a non-transaction savepoint is
// ** being rolled back), then the rollback consists of up to three stages,
// ** performed in the order specified:
// **
// ** * Pages are played back from the main journal starting at byte
// ** offset PagerSavepoint.iOffset and continuing to
// ** PagerSavepoint.iHdrOffset, or to the end of the main journal
// ** file if PagerSavepoint.iHdrOffset is zero.
// **
// ** * If PagerSavepoint.iHdrOffset is not zero, then pages are played
// ** back starting from the journal header immediately following
// ** PagerSavepoint.iHdrOffset to the end of the main journal file.
// **
// ** * Pages are then played back from the sub-journal file, starting
// ** with the PagerSavepoint.iSubRec and continuing to the end of
// ** the journal file.
// **
// ** Throughout the rollback process, each time a page is rolled back, the
// ** corresponding bit is set in a bitvec structure (variable pDone in the
// ** implementation below). This is used to ensure that a page is only
// ** rolled back the first time it is encountered in either journal.
// **
// ** If pSavepoint is NULL, then pages are only played back from the main
// ** journal file. There is no need for a bitvec in this case.
// **
// ** In either case, before playback commences the Pager.dbSize variable
// ** is reset to the value that it held at the start of the savepoint
// ** (or transaction). No page with a page-number greater than this value
// ** is played back. If one is encountered it is simply skipped.
// */
func _pagerPlaybackSavepoint(tls *libc.TLS, pPager uintptr, pSavepoint uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iHdrOff, szJ Ti64
var ii, ii1 Tu32
var pDone uintptr
var rc int32
var v1 uint32
var v2 int64
var _ /* dummy at bp+4 */ Tu32
var _ /* nJRec at bp+0 */ Tu32
var _ /* offset at bp+8 */ Ti64
_, _, _, _, _, _, _, _ = iHdrOff, ii, ii1, pDone, rc, szJ, v1, v2 /* End of first segment of main-journal records */
rc = SQLITE_OK /* Return code */
pDone = uintptr(0) /* Bitvec to ensure pages played back only once */
/* Allocate a bitvec to use to store the set of pages rolled back */
if pSavepoint != 0 {
pDone = _sqlite3BitvecCreate(tls, (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FnOrig)
if !(pDone != 0) {
return int32(SQLITE_NOMEM)
}
}
/* Set the database size back to the value it was before the savepoint
** being reverted was opened.
*/
if pSavepoint != 0 {
v1 = (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FnOrig
} else {
v1 = (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize
}
(*TPager)(unsafe.Pointer(pPager)).FdbSize = v1
(*TPager)(unsafe.Pointer(pPager)).FchangeCountDone = (*TPager)(unsafe.Pointer(pPager)).FtempFile
if !(pSavepoint != 0) && (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
return _pagerRollbackWal(tls, pPager)
}
/* Use pPager->journalOff as the effective size of the main rollback
** journal. The actual file might be larger than this in
** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything
** past pPager->journalOff is off-limits to us.
*/
szJ = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
/* Begin by rolling back records from the main journal starting at
** PagerSavepoint.iOffset and continuing to the next journal header.
** There might be records in the main journal that have a page number
** greater than the current database size (pPager->dbSize) but those
** will be skipped automatically. Pages are added to pDone as they
** are played back.
*/
if pSavepoint != 0 && !((*TPager)(unsafe.Pointer(pPager)).FpWal != libc.UintptrFromInt32(0)) {
if (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FiHdrOffset != 0 {
v2 = (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FiHdrOffset
} else {
v2 = szJ
}
iHdrOff = v2
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FiOffset
for rc == SQLITE_OK && (*TPager)(unsafe.Pointer(pPager)).FjournalOff < iHdrOff {
rc = _pager_playback_one_page(tls, pPager, pPager+96, pDone, int32(1), int32(1))
}
} else {
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
}
/* Continue rolling back records out of the main journal starting at
** the first journal header seen and continuing until the effective end
** of the main journal file. Continue to skip out-of-range pages and
** continue adding pages rolled back to pDone.
*/
for rc == SQLITE_OK && (*TPager)(unsafe.Pointer(pPager)).FjournalOff < szJ { /* Loop counter */
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0)
rc = _readJournalHdr(tls, pPager, 0, szJ, bp, bp+4)
/*
** The "pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff"
** test is related to ticket #2565. See the discussion in the
** pager_playback() function for additional information.
*/
if *(*Tu32)(unsafe.Pointer(bp)) == uint32(0) && (*TPager)(unsafe.Pointer(pPager)).FjournalHdr+int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) == (*TPager)(unsafe.Pointer(pPager)).FjournalOff {
*(*Tu32)(unsafe.Pointer(bp)) = uint32((szJ - (*TPager)(unsafe.Pointer(pPager)).FjournalOff) / ((*TPager)(unsafe.Pointer(pPager)).FpageSize + libc.Int64FromInt32(8)))
}
ii = uint32(0)
for {
if !(rc == SQLITE_OK && ii < *(*Tu32)(unsafe.Pointer(bp)) && (*TPager)(unsafe.Pointer(pPager)).FjournalOff < szJ) {
break
}
rc = _pager_playback_one_page(tls, pPager, pPager+96, pDone, int32(1), int32(1))
goto _3
_3:
;
ii++
}
}
/* Finally, rollback pages from the sub-journal. Page that were
** previously rolled back out of the main journal (and are hence in pDone)
** will be skipped. Out-of-range pages are also skipped.
*/
if pSavepoint != 0 { /* Loop counter */
*(*Ti64)(unsafe.Pointer(bp + 8)) = int64((*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FiSubRec) * (int64(4) + (*TPager)(unsafe.Pointer(pPager)).FpageSize)
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
rc = _sqlite3WalSavepointUndo(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, pSavepoint+36)
}
ii1 = (*TPagerSavepoint)(unsafe.Pointer(pSavepoint)).FiSubRec
for {
if !(rc == SQLITE_OK && ii1 < (*TPager)(unsafe.Pointer(pPager)).FnSubRec) {
break
}
rc = _pager_playback_one_page(tls, pPager, bp+8, pDone, 0, int32(1))
goto _4
_4:
;
ii1++
}
}
_sqlite3BitvecDestroy(tls, pDone)
if rc == SQLITE_OK {
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = szJ
}
return rc
}
// C documentation
//
// /*
// ** Change the maximum number of in-memory pages that are allowed
// ** before attempting to recycle clean and unused pages.
// */
func _sqlite3PagerSetCachesize(tls *libc.TLS, pPager uintptr, mxPage int32) {
_sqlite3PcacheSetCachesize(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, mxPage)
}
// C documentation
//
// /*
// ** Change the maximum number of in-memory pages that are allowed
// ** before attempting to spill pages to journal.
// */
func _sqlite3PagerSetSpillsize(tls *libc.TLS, pPager uintptr, mxPage int32) (r int32) {
return _sqlite3PcacheSetSpillsize(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, mxPage)
}
// C documentation
//
// /*
// ** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap.
// */
func _pagerFixMaplimit(tls *libc.TLS, pPager uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var fd uintptr
var _ /* sz at bp+0 */ Tsqlite3_int64
_ = fd
fd = (*TPager)(unsafe.Pointer(pPager)).Ffd
if (*Tsqlite3_file)(unsafe.Pointer(fd)).FpMethods != uintptr(0) && (*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(fd)).FpMethods)).FiVersion >= int32(3) {
*(*Tsqlite3_int64)(unsafe.Pointer(bp)) = (*TPager)(unsafe.Pointer(pPager)).FszMmap
(*TPager)(unsafe.Pointer(pPager)).FbUseFetch = libc.BoolUint8(*(*Tsqlite3_int64)(unsafe.Pointer(bp)) > libc.Int64FromInt32(0))
_setGetterMethod(tls, pPager)
_sqlite3OsFileControlHint(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_MMAP_SIZE), bp)
}
}
// C documentation
//
// /*
// ** Change the maximum size of any memory mapping made of the database file.
// */
func _sqlite3PagerSetMmapLimit(tls *libc.TLS, pPager uintptr, szMmap Tsqlite3_int64) {
(*TPager)(unsafe.Pointer(pPager)).FszMmap = szMmap
_pagerFixMaplimit(tls, pPager)
}
// C documentation
//
// /*
// ** Free as much memory as possible from the pager.
// */
func _sqlite3PagerShrink(tls *libc.TLS, pPager uintptr) {
_sqlite3PcacheShrink(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
}
// C documentation
//
// /*
// ** Adjust settings of the pager to those specified in the pgFlags parameter.
// **
// ** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness
// ** of the database to damage due to OS crashes or power failures by
// ** changing the number of syncs()s when writing the journals.
// ** There are four levels:
// **
// ** OFF sqlite3OsSync() is never called. This is the default
// ** for temporary and transient files.
// **
// ** NORMAL The journal is synced once before writes begin on the
// ** database. This is normally adequate protection, but
// ** it is theoretically possible, though very unlikely,
// ** that an inopertune power failure could leave the journal
// ** in a state which would cause damage to the database
// ** when it is rolled back.
// **
// ** FULL The journal is synced twice before writes begin on the
// ** database (with some additional information - the nRec field
// ** of the journal header - being written in between the two
// ** syncs). If we assume that writing a
// ** single disk sector is atomic, then this mode provides
// ** assurance that the journal will not be corrupted to the
// ** point of causing damage to the database during rollback.
// **
// ** EXTRA This is like FULL except that is also syncs the directory
// ** that contains the rollback journal after the rollback
// ** journal is unlinked.
// **
// ** The above is for a rollback-journal mode. For WAL mode, OFF continues
// ** to mean that no syncs ever occur. NORMAL means that the WAL is synced
// ** prior to the start of checkpoint and that the database file is synced
// ** at the conclusion of the checkpoint if the entire content of the WAL
// ** was written back into the database. But no sync operations occur for
// ** an ordinary commit in NORMAL mode with WAL. FULL means that the WAL
// ** file is synced following each commit operation, in addition to the
// ** syncs associated with NORMAL. There is no difference between FULL
// ** and EXTRA for WAL mode.
// **
// ** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL. The
// ** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync
// ** using fcntl(F_FULLFSYNC). SQLITE_SYNC_NORMAL means to do an
// ** ordinary fsync() call. There is no difference between SQLITE_SYNC_FULL
// ** and SQLITE_SYNC_NORMAL on platforms other than MacOSX. But the
// ** synchronous=FULL versus synchronous=NORMAL setting determines when
// ** the xSync primitive is called and is relevant to all platforms.
// **
// ** Numeric values associated with these states are OFF==1, NORMAL=2,
// ** and FULL=3.
// */
func _sqlite3PagerSetFlags(tls *libc.TLS, pPager uintptr, pgFlags uint32) {
var level uint32
var v1, v2, v3 int32
var p4, p5, p6, p7 uintptr
_, _, _, _, _, _, _, _ = level, v1, v2, v3, p4, p5, p6, p7
level = pgFlags & uint32(PAGER_SYNCHRONOUS_MASK)
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
(*TPager)(unsafe.Pointer(pPager)).FnoSync = uint8(1)
(*TPager)(unsafe.Pointer(pPager)).FfullSync = uint8(0)
(*TPager)(unsafe.Pointer(pPager)).FextraSync = uint8(0)
} else {
if level == uint32(PAGER_SYNCHRONOUS_OFF) {
v1 = int32(1)
} else {
v1 = 0
}
(*TPager)(unsafe.Pointer(pPager)).FnoSync = uint8(v1)
if level >= uint32(PAGER_SYNCHRONOUS_FULL) {
v2 = int32(1)
} else {
v2 = 0
}
(*TPager)(unsafe.Pointer(pPager)).FfullSync = uint8(v2)
if level == uint32(PAGER_SYNCHRONOUS_EXTRA) {
v3 = int32(1)
} else {
v3 = 0
}
(*TPager)(unsafe.Pointer(pPager)).FextraSync = uint8(v3)
}
if (*TPager)(unsafe.Pointer(pPager)).FnoSync != 0 {
(*TPager)(unsafe.Pointer(pPager)).FsyncFlags = uint8(0)
} else {
if pgFlags&uint32(PAGER_FULLFSYNC) != 0 {
(*TPager)(unsafe.Pointer(pPager)).FsyncFlags = uint8(SQLITE_SYNC_FULL)
} else {
(*TPager)(unsafe.Pointer(pPager)).FsyncFlags = uint8(SQLITE_SYNC_NORMAL)
}
}
(*TPager)(unsafe.Pointer(pPager)).FwalSyncFlags = uint8(int32((*TPager)(unsafe.Pointer(pPager)).FsyncFlags) << libc.Int32FromInt32(2))
if (*TPager)(unsafe.Pointer(pPager)).FfullSync != 0 {
p4 = pPager + 15
*(*Tu8)(unsafe.Pointer(p4)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p4))) | int32((*TPager)(unsafe.Pointer(pPager)).FsyncFlags))
}
if pgFlags&uint32(PAGER_CKPT_FULLFSYNC) != 0 && !((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0) {
p5 = pPager + 15
*(*Tu8)(unsafe.Pointer(p5)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p5))) | libc.Int32FromInt32(SQLITE_SYNC_FULL)< SHARED_LOCK | Yes
// ** SHARED_LOCK -> RESERVED_LOCK | No
// ** SHARED_LOCK -> EXCLUSIVE_LOCK | No
// ** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes
// **
// ** If the busy-handler callback returns non-zero, the lock is
// ** retried. If it returns zero, then the SQLITE_BUSY error is
// ** returned to the caller of the pager API function.
// */
func _sqlite3PagerSetBusyHandler(tls *libc.TLS, pPager uintptr, xBusyHandler uintptr, pBusyHandlerArg uintptr) {
var ap uintptr
_ = ap
(*TPager)(unsafe.Pointer(pPager)).FxBusyHandler = xBusyHandler
(*TPager)(unsafe.Pointer(pPager)).FpBusyHandlerArg = pBusyHandlerArg
ap = pPager + 232
_sqlite3OsFileControlHint(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_BUSYHANDLER), ap)
}
// C documentation
//
// /*
// ** Change the page size used by the Pager object. The new page size
// ** is passed in *pPageSize.
// **
// ** If the pager is in the error state when this function is called, it
// ** is a no-op. The value returned is the error state error code (i.e.
// ** one of SQLITE_IOERR, an SQLITE_IOERR_xxx sub-code or SQLITE_FULL).
// **
// ** Otherwise, if all of the following are true:
// **
// ** * the new page size (value of *pPageSize) is valid (a power
// ** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and
// **
// ** * there are no outstanding page references, and
// **
// ** * the database is either not an in-memory database or it is
// ** an in-memory database that currently consists of zero pages.
// **
// ** then the pager object page size is set to *pPageSize.
// **
// ** If the page size is changed, then this function uses sqlite3PagerMalloc()
// ** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt
// ** fails, SQLITE_NOMEM is returned and the page size remains unchanged.
// ** In all other cases, SQLITE_OK is returned.
// **
// ** If the page size is not changed, either because one of the enumerated
// ** conditions above is not true, the pager was in error state when this
// ** function was called, or because the memory allocation attempt failed,
// ** then *pPageSize is set to the old, retained page size before returning.
// */
func _sqlite3PagerSetPagesize(tls *libc.TLS, pPager uintptr, pPageSize uintptr, nReserve int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pNew uintptr
var pageSize Tu32
var rc int32
var _ /* nByte at bp+0 */ Ti64
_, _, _ = pNew, pageSize, rc
rc = SQLITE_OK
/* It is not possible to do a full assert_pager_state() here, as this
** function may be called from within PagerOpen(), before the state
** of the Pager object is internally consistent.
**
** At one point this function returned an error if the pager was in
** PAGER_ERROR state. But since PAGER_ERROR state guarantees that
** there is at least one outstanding page reference, this function
** is a no-op for that case anyhow.
*/
pageSize = *(*Tu32)(unsafe.Pointer(pPageSize))
if (int32((*TPager)(unsafe.Pointer(pPager)).FmemDb) == 0 || (*TPager)(unsafe.Pointer(pPager)).FdbSize == uint32(0)) && _sqlite3PcacheRefCount(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache) == 0 && pageSize != 0 && pageSize != uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize) {
pNew = libc.UintptrFromInt32(0) /* New temp space */
*(*Ti64)(unsafe.Pointer(bp)) = 0
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) > PAGER_OPEN && (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) {
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, bp)
}
if rc == SQLITE_OK {
/* 8 bytes of zeroed overrun space is sufficient so that the b-tree
* cell header parser will never run off the end of the allocation */
pNew = _sqlite3PageMalloc(tls, int32(pageSize+uint32(8)))
if !(pNew != 0) {
rc = int32(SQLITE_NOMEM)
} else {
libc.Xmemset(tls, pNew+uintptr(pageSize), 0, uint64(8))
}
}
if rc == SQLITE_OK {
_pager_reset(tls, pPager)
rc = _sqlite3PcacheSetPageSize(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, int32(pageSize))
}
if rc == SQLITE_OK {
_sqlite3PageFree(tls, (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace)
(*TPager)(unsafe.Pointer(pPager)).FpTmpSpace = pNew
(*TPager)(unsafe.Pointer(pPager)).FdbSize = uint32((*(*Ti64)(unsafe.Pointer(bp)) + int64(pageSize) - libc.Int64FromInt32(1)) / int64(pageSize))
(*TPager)(unsafe.Pointer(pPager)).FpageSize = int64(pageSize)
(*TPager)(unsafe.Pointer(pPager)).FlckPgno = uint32(_sqlite3PendingByte)/pageSize + uint32(1)
} else {
_sqlite3PageFree(tls, pNew)
}
}
*(*Tu32)(unsafe.Pointer(pPageSize)) = uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize)
if rc == SQLITE_OK {
if nReserve < 0 {
nReserve = int32((*TPager)(unsafe.Pointer(pPager)).FnReserve)
}
(*TPager)(unsafe.Pointer(pPager)).FnReserve = int16(nReserve)
_pagerFixMaplimit(tls, pPager)
}
return rc
}
// C documentation
//
// /*
// ** Return a pointer to the "temporary page" buffer held internally
// ** by the pager. This is a buffer that is big enough to hold the
// ** entire content of a database page. This buffer is used internally
// ** during rollback and will be overwritten whenever a rollback
// ** occurs. But other modules are free to use it too, as long as
// ** no rollbacks are happening.
// */
func _sqlite3PagerTempSpace(tls *libc.TLS, pPager uintptr) (r uintptr) {
return (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace
}
// C documentation
//
// /*
// ** Attempt to set the maximum database page count if mxPage is positive.
// ** Make no changes if mxPage is zero or negative. And never reduce the
// ** maximum page count below the current size of the database.
// **
// ** Regardless of mxPage, return the current maximum page count.
// */
func _sqlite3PagerMaxPageCount(tls *libc.TLS, pPager uintptr, mxPage TPgno) (r TPgno) {
if mxPage > uint32(0) {
(*TPager)(unsafe.Pointer(pPager)).FmxPgno = mxPage
}
/* Called only by OP_MaxPgcnt */
/* assert( pPager->mxPgno>=pPager->dbSize ); */
/* OP_MaxPgcnt ensures that the parameter passed to this function is not
** less than the total number of valid pages in the database. But this
** may be less than Pager.dbSize, and so the assert() above is not valid */
return (*TPager)(unsafe.Pointer(pPager)).FmxPgno
}
/*
** The following set of routines are used to disable the simulated
** I/O error mechanism. These routines are used to avoid simulated
** errors in places where we do not care about errors.
**
** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops
** and generate no code.
*/
// C documentation
//
// /*
// ** Read the first N bytes from the beginning of the file into memory
// ** that pDest points to.
// **
// ** If the pager was opened on a transient file (zFilename==""), or
// ** opened on a file less than N bytes in size, the output buffer is
// ** zeroed and SQLITE_OK returned. The rationale for this is that this
// ** function is used to read database headers, and a new transient or
// ** zero sized database has a header than consists entirely of zeroes.
// **
// ** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered,
// ** the error code is returned to the caller and the contents of the
// ** output buffer undefined.
// */
func _sqlite3PagerReadFileheader(tls *libc.TLS, pPager uintptr, N int32, pDest uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
libc.Xmemset(tls, pDest, 0, uint64(N))
/* This routine is only called by btree immediately after creating
** the Pager object. There has not been an opportunity to transition
** to WAL mode yet.
*/
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods != uintptr(0) {
rc = _sqlite3OsRead(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, pDest, N, 0)
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(2)< bytes in size, then
// ** this is considered a 1 page file.
// */
func _sqlite3PagerPagecount(tls *libc.TLS, pPager uintptr, pnPage uintptr) {
*(*int32)(unsafe.Pointer(pnPage)) = int32((*TPager)(unsafe.Pointer(pPager)).FdbSize)
}
// C documentation
//
// /*
// ** Try to obtain a lock of type locktype on the database file. If
// ** a similar or greater lock is already held, this function is a no-op
// ** (returning SQLITE_OK immediately).
// **
// ** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke
// ** the busy callback if the lock is currently not available. Repeat
// ** until the busy callback returns false or until the attempt to
// ** obtain the lock succeeds.
// **
// ** Return SQLITE_OK on success and an error code if we cannot obtain
// ** the lock. If the lock is obtained successfully, set the Pager.state
// ** variable to locktype before returning.
// */
func _pager_wait_on_lock(tls *libc.TLS, pPager uintptr, locktype int32) (r int32) {
var rc int32
_ = rc /* Return code */
/* Check that this is either a no-op (because the requested lock is
** already held), or one of the transitions that the busy-handler
** may be invoked during, according to the comment above
** sqlite3PagerSetBusyhandler().
*/
for cond := true; cond; cond = rc == int32(SQLITE_BUSY) && (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TPager)(unsafe.Pointer(pPager)).FxBusyHandler})))(tls, (*TPager)(unsafe.Pointer(pPager)).FpBusyHandlerArg) != 0 {
rc = _pagerLockDb(tls, pPager, locktype)
}
return rc
}
/*
** Function assertTruncateConstraint(pPager) checks that one of the
** following is true for all dirty pages currently in the page-cache:
**
** a) The page number is less than or equal to the size of the
** current database image, in pages, OR
**
** b) if the page content were written at this time, it would not
** be necessary to write the current content out to the sub-journal.
**
** If the condition asserted by this function were not true, and the
** dirty page were to be discarded from the cache via the pagerStress()
** routine, pagerStress() would not write the current page content to
** the database file. If a savepoint transaction were rolled back after
** this happened, the correct behavior would be to restore the current
** content of the page. However, since this content is not present in either
** the database file or the portion of the rollback journal and
** sub-journal rolled back the content could not be restored and the
** database image would become corrupt. It is therefore fortunate that
** this circumstance cannot arise.
*/
// C documentation
//
// /*
// ** Truncate the in-memory database file image to nPage pages. This
// ** function does not actually modify the database file on disk. It
// ** just sets the internal state of the pager object so that the
// ** truncation will be done when the current transaction is committed.
// **
// ** This function is only called right before committing a transaction.
// ** Once this function has been called, the transaction must either be
// ** rolled back or committed. It is not safe to call this function and
// ** then continue writing to the database.
// */
func _sqlite3PagerTruncateImage(tls *libc.TLS, pPager uintptr, nPage TPgno) {
(*TPager)(unsafe.Pointer(pPager)).FdbSize = nPage
/* At one point the code here called assertTruncateConstraint() to
** ensure that all pages being truncated away by this operation are,
** if one or more savepoints are open, present in the savepoint
** journal so that they can be restored if the savepoint is rolled
** back. This is no longer necessary as this function is now only
** called right before committing a transaction. So although the
** Pager object may still have open savepoints (Pager.nSavepoint!=0),
** they cannot be rolled back. So the assertTruncateConstraint() call
** is no longer correct. */
}
// C documentation
//
// /*
// ** This function is called before attempting a hot-journal rollback. It
// ** syncs the journal file to disk, then sets pPager->journalHdr to the
// ** size of the journal file so that the pager_playback() routine knows
// ** that the entire journal file has been synced.
// **
// ** Syncing a hot-journal to disk before attempting to roll it back ensures
// ** that if a power-failure occurs during the rollback, the process that
// ** attempts rollback following system recovery sees the same journal
// ** content as this process.
// **
// ** If everything goes as planned, SQLITE_OK is returned. Otherwise,
// ** an SQLite error code.
// */
func _pagerSyncHotJournal(tls *libc.TLS, pPager uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if !((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0) {
rc = _sqlite3OsSync(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, int32(SQLITE_SYNC_NORMAL))
}
if rc == SQLITE_OK {
rc = _sqlite3OsFileSize(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, pPager+104)
}
return rc
}
// C documentation
//
// /*
// ** Obtain a reference to a memory mapped page object for page number pgno.
// ** The new object will use the pointer pData, obtained from xFetch().
// ** If successful, set *ppPage to point to the new page reference
// ** and return SQLITE_OK. Otherwise, return an SQLite error code and set
// ** *ppPage to zero.
// **
// ** Page references obtained by calling this function should be released
// ** by calling pagerReleaseMapPage().
// */
func _pagerAcquireMapPage(tls *libc.TLS, pPager uintptr, pgno TPgno, pData uintptr, ppPage uintptr) (r int32) {
var p, v1, v2 uintptr
_, _, _ = p, v1, v2 /* Memory mapped page to return */
if (*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist != 0 {
v1 = (*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist
p = v1
*(*uintptr)(unsafe.Pointer(ppPage)) = v1
(*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
(*TPgHdr)(unsafe.Pointer(p)).FpDirty = uintptr(0)
libc.Xmemset(tls, (*TPgHdr)(unsafe.Pointer(p)).FpExtra, 0, uint64(8))
} else {
v2 = _sqlite3MallocZero(tls, uint64(80)+uint64((*TPager)(unsafe.Pointer(pPager)).FnExtra))
p = v2
*(*uintptr)(unsafe.Pointer(ppPage)) = v2
if p == uintptr(0) {
_sqlite3OsUnfetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int64(pgno-libc.Uint32FromInt32(1))*(*TPager)(unsafe.Pointer(pPager)).FpageSize, pData)
return int32(SQLITE_NOMEM)
}
(*TPgHdr)(unsafe.Pointer(p)).FpExtra = p + 1*80
(*TPgHdr)(unsafe.Pointer(p)).Fflags = uint16(PGHDR_MMAP)
(*TPgHdr)(unsafe.Pointer(p)).FnRef = int64(1)
(*TPgHdr)(unsafe.Pointer(p)).FpPager = pPager
}
(*TPgHdr)(unsafe.Pointer(p)).Fpgno = pgno
(*TPgHdr)(unsafe.Pointer(p)).FpData = pData
(*TPager)(unsafe.Pointer(pPager)).FnMmapOut++
return SQLITE_OK
}
// C documentation
//
// /*
// ** Release a reference to page pPg. pPg must have been returned by an
// ** earlier call to pagerAcquireMapPage().
// */
func _pagerReleaseMapPage(tls *libc.TLS, pPg uintptr) {
var pPager uintptr
_ = pPager
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
(*TPager)(unsafe.Pointer(pPager)).FnMmapOut--
(*TPgHdr)(unsafe.Pointer(pPg)).FpDirty = (*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist
(*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist = pPg
_sqlite3OsUnfetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int64((*TPgHdr)(unsafe.Pointer(pPg)).Fpgno-libc.Uint32FromInt32(1))*(*TPager)(unsafe.Pointer(pPager)).FpageSize, (*TPgHdr)(unsafe.Pointer(pPg)).FpData)
}
// C documentation
//
// /*
// ** Free all PgHdr objects stored in the Pager.pMmapFreelist list.
// */
func _pagerFreeMapHdrs(tls *libc.TLS, pPager uintptr) {
var p, pNext uintptr
_, _ = p, pNext
p = (*TPager)(unsafe.Pointer(pPager)).FpMmapFreelist
for {
if !(p != 0) {
break
}
pNext = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
Xsqlite3_free(tls, p)
goto _1
_1:
;
p = pNext
}
}
// C documentation
//
// /* Verify that the database file has not be deleted or renamed out from
// ** under the pager. Return SQLITE_OK if the database is still where it ought
// ** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error
// ** code from sqlite3OsAccess()) if the database has gone missing.
// */
func _databaseIsUnmoved(tls *libc.TLS, pPager uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* bHasMoved at bp+0 */ int32
_ = rc
*(*int32)(unsafe.Pointer(bp)) = 0
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
return SQLITE_OK
}
if (*TPager)(unsafe.Pointer(pPager)).FdbSize == uint32(0) {
return SQLITE_OK
}
rc = _sqlite3OsFileControl(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_HAS_MOVED), bp)
if rc == int32(SQLITE_NOTFOUND) {
/* If the HAS_MOVED file-control is unimplemented, assume that the file
** has not been moved. That is the historical behavior of SQLite: prior to
** version 3.8.3, it never checked */
rc = SQLITE_OK
} else {
if rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp)) != 0 {
rc = libc.Int32FromInt32(SQLITE_READONLY) | libc.Int32FromInt32(4)<errCode = 0; */
(*TPager)(unsafe.Pointer(pPager)).FexclusiveMode = uint8(0)
a = uintptr(0)
if db != 0 && uint64(0) == (*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_NoCkptOnClose) && SQLITE_OK == _databaseIsUnmoved(tls, pPager) {
a = pTmp
}
_sqlite3WalClose(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, db, int32((*TPager)(unsafe.Pointer(pPager)).FwalSyncFlags), int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), a)
(*TPager)(unsafe.Pointer(pPager)).FpWal = uintptr(0)
_pager_reset(tls, pPager)
if (*TPager)(unsafe.Pointer(pPager)).FmemDb != 0 {
_pager_unlock(tls, pPager)
} else {
/* If it is open, sync the journal file before calling UnlockAndRollback.
** If this is not done, then an unsynced portion of the open journal
** file may be played back into the database. If a power failure occurs
** while this is happening, the database could become corrupt.
**
** If an error occurs while trying to sync the journal, shift the pager
** into the ERROR state. This causes UnlockAndRollback to unlock the
** database and close the journal file without attempting to roll it
** back or finalize it. The next database user will have to do hot-journal
** rollback before accessing the database file.
*/
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) {
_pager_error(tls, pPager, _pagerSyncHotJournal(tls, pPager))
}
_pagerUnlockAndRollback(tls, pPager)
}
_sqlite3EndBenignMalloc(tls)
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd)
_sqlite3PageFree(tls, pTmp)
_sqlite3PcacheClose(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
Xsqlite3_free(tls, pPager)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Increment the reference count for page pPg.
// */
func _sqlite3PagerRef(tls *libc.TLS, pPg uintptr) {
_sqlite3PcacheRef(tls, pPg)
}
// C documentation
//
// /*
// ** Sync the journal. In other words, make sure all the pages that have
// ** been written to the journal have actually reached the surface of the
// ** disk and can be restored in the event of a hot-journal rollback.
// **
// ** If the Pager.noSync flag is set, then this function is a no-op.
// ** Otherwise, the actions required depend on the journal-mode and the
// ** device characteristics of the file-system, as follows:
// **
// ** * If the journal file is an in-memory journal file, no action need
// ** be taken.
// **
// ** * Otherwise, if the device does not support the SAFE_APPEND property,
// ** then the nRec field of the most recently written journal header
// ** is updated to contain the number of journal records that have
// ** been written following it. If the pager is operating in full-sync
// ** mode, then the journal file is synced before this field is updated.
// **
// ** * If the device does not support the SEQUENTIAL property, then
// ** journal file is synced.
// **
// ** Or, in pseudo-code:
// **
// ** if( NOT ){
// ** if( NOT SAFE_APPEND ){
// ** if( ) xSync();
// **
// ** }
// ** if( NOT SEQUENTIAL ) xSync();
// ** }
// **
// ** If successful, this routine clears the PGHDR_NEED_SYNC flag of every
// ** page currently held in memory before returning SQLITE_OK. If an IO
// ** error is encountered, then the IO error code is returned to the caller.
// */
func _syncJournal(tls *libc.TLS, pPager uintptr, newHdr int32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iDc, rc, v1 int32
var iNextHdrOffset Ti64
var _ /* aMagic at bp+0 */ [8]Tu8
var _ /* zHeader at bp+8 */ [12]Tu8
_, _, _, _ = iDc, iNextHdrOffset, rc, v1 /* Return code */
rc = _sqlite3PagerExclusiveLock(tls, pPager)
if rc != SQLITE_OK {
return rc
}
if !((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0) {
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) != int32(PAGER_JOURNALMODE_MEMORY) {
iDc = _sqlite3OsDeviceCharacteristics(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd)
if 0 == iDc&int32(SQLITE_IOCAP_SAFE_APPEND) {
libc.Xmemcpy(tls, bp+8, uintptr(unsafe.Pointer(&_aJournalMagic)), uint64(8))
_sqlite3Put4byte(tls, bp+8+uintptr(8), uint32((*TPager)(unsafe.Pointer(pPager)).FnRec))
iNextHdrOffset = _journalHdrOffset(tls, pPager)
rc = _sqlite3OsRead(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp, int32(8), iNextHdrOffset)
if rc == SQLITE_OK && 0 == libc.Xmemcmp(tls, bp, uintptr(unsafe.Pointer(&_aJournalMagic)), uint64(8)) {
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, uintptr(unsafe.Pointer(&_zerobyte)), int32(1), iNextHdrOffset)
}
if rc != SQLITE_OK && rc != libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(2)< (*TPager)(unsafe.Pointer(pPager)).FdbHintSize) {
*(*Tsqlite3_int64)(unsafe.Pointer(bp)) = (*TPager)(unsafe.Pointer(pPager)).FpageSize * int64((*TPager)(unsafe.Pointer(pPager)).FdbSize)
_sqlite3OsFileControlHint(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_SIZE_HINT), bp)
(*TPager)(unsafe.Pointer(pPager)).FdbHintSize = (*TPager)(unsafe.Pointer(pPager)).FdbSize
}
for rc == SQLITE_OK && pList != 0 {
pgno = (*TPgHdr)(unsafe.Pointer(pList)).Fpgno
/* If there are dirty pages in the page cache with page numbers greater
** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to
** make the file smaller (presumably by auto-vacuum code). Do not write
** any such pages to the file.
**
** Also, do not write out any page that has the PGHDR_DONT_WRITE flag
** set (set by sqlite3PagerDontWrite()).
*/
if pgno <= (*TPager)(unsafe.Pointer(pPager)).FdbSize && 0 == int32((*TPgHdr)(unsafe.Pointer(pList)).Fflags)&int32(PGHDR_DONT_WRITE) {
offset = int64(pgno-libc.Uint32FromInt32(1)) * (*TPager)(unsafe.Pointer(pPager)).FpageSize /* Data to write */
if (*TPgHdr)(unsafe.Pointer(pList)).Fpgno == uint32(1) {
_pager_write_changecounter(tls, pList)
}
pData = (*TPgHdr)(unsafe.Pointer(pList)).FpData
/* Write out the page data. */
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, pData, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), offset)
/* If page 1 was just written, update Pager.dbFileVers to match
** the value now stored in the database file. If writing this
** page caused the database file to grow, update dbFileSize.
*/
if pgno == uint32(1) {
libc.Xmemcpy(tls, pPager+136, pData+24, uint64(16))
}
if pgno > (*TPager)(unsafe.Pointer(pPager)).FdbFileSize {
(*TPager)(unsafe.Pointer(pPager)).FdbFileSize = pgno
}
*(*Tu32)(unsafe.Pointer(pPager + 248 + 2*4))++
/* Update any backup objects copying the contents of this pager. */
_sqlite3BackupUpdate(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup, pgno, (*TPgHdr)(unsafe.Pointer(pList)).FpData)
} else {
}
pList = (*TPgHdr)(unsafe.Pointer(pList)).FpDirty
}
return rc
}
// C documentation
//
// /*
// ** Ensure that the sub-journal file is open. If it is already open, this
// ** function is a no-op.
// **
// ** SQLITE_OK is returned if everything goes according to plan. An
// ** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen()
// ** fails.
// */
func _openSubJournal(tls *libc.TLS, pPager uintptr) (r int32) {
var flags, nStmtSpill, rc int32
_, _, _ = flags, nStmtSpill, rc
rc = SQLITE_OK
if !((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fsjfd)).FpMethods != libc.UintptrFromInt32(0)) {
flags = libc.Int32FromInt32(SQLITE_OPEN_SUBJOURNAL) | libc.Int32FromInt32(SQLITE_OPEN_READWRITE) | libc.Int32FromInt32(SQLITE_OPEN_CREATE) | libc.Int32FromInt32(SQLITE_OPEN_EXCLUSIVE) | libc.Int32FromInt32(SQLITE_OPEN_DELETEONCLOSE)
nStmtSpill = _sqlite3Config.FnStmtSpill
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_MEMORY) || (*TPager)(unsafe.Pointer(pPager)).FsubjInMemory != 0 {
nStmtSpill = -int32(1)
}
rc = _sqlite3JournalOpen(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, uintptr(0), (*TPager)(unsafe.Pointer(pPager)).Fsjfd, flags, nStmtSpill)
}
return rc
}
// C documentation
//
// /*
// ** Append a record of the current state of page pPg to the sub-journal.
// **
// ** If successful, set the bit corresponding to pPg->pgno in the bitvecs
// ** for all open savepoints before returning.
// **
// ** This function returns SQLITE_OK if everything is successful, an IO
// ** error code if the attempt to write to the sub-journal fails, or
// ** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint
// ** bitvec.
// */
func _subjournalPage(tls *libc.TLS, pPg uintptr) (r int32) {
var offset Ti64
var pData, pData2, pPager uintptr
var rc int32
_, _, _, _, _ = offset, pData, pData2, pPager, rc
rc = SQLITE_OK
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) != int32(PAGER_JOURNALMODE_OFF) {
/* Open the sub-journal, if it has not already been opened */
rc = _openSubJournal(tls, pPager)
/* If the sub-journal was opened successfully (or was already open),
** write the journal record into the file. */
if rc == SQLITE_OK {
pData = (*TPgHdr)(unsafe.Pointer(pPg)).FpData
offset = int64((*TPager)(unsafe.Pointer(pPager)).FnSubRec) * (int64(4) + (*TPager)(unsafe.Pointer(pPager)).FpageSize)
pData2 = pData
rc = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd, offset, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno)
if rc == SQLITE_OK {
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd, pData2, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), offset+int64(4))
}
}
}
if rc == SQLITE_OK {
(*TPager)(unsafe.Pointer(pPager)).FnSubRec++
rc = _addToSavepointBitvecs(tls, pPager, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno)
}
return rc
}
func _subjournalPageIfRequired(tls *libc.TLS, pPg uintptr) (r int32) {
if _subjRequiresPage(tls, pPg) != 0 {
return _subjournalPage(tls, pPg)
} else {
return SQLITE_OK
}
return r
}
// C documentation
//
// /*
// ** This function is called by the pcache layer when it has reached some
// ** soft memory limit. The first argument is a pointer to a Pager object
// ** (cast as a void*). The pager is always 'purgeable' (not an in-memory
// ** database). The second argument is a reference to a page that is
// ** currently dirty but has no outstanding references. The page
// ** is always associated with the Pager object passed as the first
// ** argument.
// **
// ** The job of this function is to make pPg clean by writing its contents
// ** out to the database file, if possible. This may involve syncing the
// ** journal file.
// **
// ** If successful, sqlite3PcacheMakeClean() is called on the page and
// ** SQLITE_OK returned. If an IO error occurs while trying to make the
// ** page clean, the IO error code is returned. If the page cannot be
// ** made clean for some other reason, but no error occurs, then SQLITE_OK
// ** is returned by sqlite3PcacheMakeClean() is not called.
// */
func _pagerStress(tls *libc.TLS, p uintptr, pPg uintptr) (r int32) {
var pPager uintptr
var rc int32
_, _ = pPager, rc
pPager = p
rc = SQLITE_OK
/* The doNotSpill NOSYNC bit is set during times when doing a sync of
** journal (and adding a new header) is not allowed. This occurs
** during calls to sqlite3PagerWrite() while trying to journal multiple
** pages belonging to the same sector.
**
** The doNotSpill ROLLBACK and OFF bits inhibits all cache spilling
** regardless of whether or not a sync is required. This is set during
** a rollback or by user request, respectively.
**
** Spilling is also prohibited when in an error state since that could
** lead to database corruption. In the current implementation it
** is impossible for sqlite3PcacheFetch() to be called with createFlag==3
** while in the error state, hence it is impossible for this routine to
** be called in the error state. Nevertheless, we include a NEVER()
** test for the error state as a safeguard against future changes.
*/
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return SQLITE_OK
}
if (*TPager)(unsafe.Pointer(pPager)).FdoNotSpill != 0 && (int32((*TPager)(unsafe.Pointer(pPager)).FdoNotSpill)&(libc.Int32FromInt32(SPILLFLAG_ROLLBACK)|libc.Int32FromInt32(SPILLFLAG_OFF)) != 0 || int32((*TPgHdr)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_NEED_SYNC) != 0) {
return SQLITE_OK
}
*(*Tu32)(unsafe.Pointer(pPager + 248 + 3*4))++
(*TPgHdr)(unsafe.Pointer(pPg)).FpDirty = uintptr(0)
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
/* Write a single frame for this page to the log. */
rc = _subjournalPageIfRequired(tls, pPg)
if rc == SQLITE_OK {
rc = _pagerWalFrames(tls, pPager, pPg, uint32(0), 0)
}
} else {
/* Sync the journal file if required. */
if int32((*TPgHdr)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_NEED_SYNC) != 0 || int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_WRITER_CACHEMOD) {
rc = _syncJournal(tls, pPager, int32(1))
}
/* Write the contents of the page out to the database file. */
if rc == SQLITE_OK {
rc = _pager_write_pagelist(tls, pPager, pPg)
}
}
/* Mark the page as clean. */
if rc == SQLITE_OK {
_sqlite3PcacheMakeClean(tls, pPg)
}
return _pager_error(tls, pPager, rc)
}
// C documentation
//
// /*
// ** Flush all unreferenced dirty pages to disk.
// */
func _sqlite3PagerFlush(tls *libc.TLS, pPager uintptr) (r int32) {
var pList, pNext uintptr
var rc int32
_, _, _ = pList, pNext, rc
rc = (*TPager)(unsafe.Pointer(pPager)).FerrCode
if !((*TPager)(unsafe.Pointer(pPager)).FmemDb != 0) {
pList = _sqlite3PcacheDirtyList(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
for rc == SQLITE_OK && pList != 0 {
pNext = (*TPgHdr)(unsafe.Pointer(pList)).FpDirty
if (*TPgHdr)(unsafe.Pointer(pList)).FnRef == 0 {
rc = _pagerStress(tls, pPager, pList)
}
pList = pNext
}
}
return rc
}
// C documentation
//
// /*
// ** Allocate and initialize a new Pager object and put a pointer to it
// ** in *ppPager. The pager should eventually be freed by passing it
// ** to sqlite3PagerClose().
// **
// ** The zFilename argument is the path to the database file to open.
// ** If zFilename is NULL then a randomly-named temporary file is created
// ** and used as the file to be cached. Temporary files are be deleted
// ** automatically when they are closed. If zFilename is ":memory:" then
// ** all information is held in cache. It is never written to disk.
// ** This can be used to implement an in-memory database.
// **
// ** The nExtra parameter specifies the number of bytes of space allocated
// ** along with each page reference. This space is available to the user
// ** via the sqlite3PagerGetExtra() API. When a new page is allocated, the
// ** first 8 bytes of this space are zeroed but the remainder is uninitialized.
// ** (The extra space is used by btree as the MemPage object.)
// **
// ** The flags argument is used to specify properties that affect the
// ** operation of the pager. It should be passed some bitwise combination
// ** of the PAGER_* flags.
// **
// ** The vfsFlags parameter is a bitmask to pass to the flags parameter
// ** of the xOpen() method of the supplied VFS when opening files.
// **
// ** If the pager object is allocated and the specified file opened
// ** successfully, SQLITE_OK is returned and *ppPager set to point to
// ** the new pager object. If an error occurs, *ppPager is set to NULL
// ** and error code returned. This function may return SQLITE_NOMEM
// ** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or
// ** various SQLITE_IO_XXX errors.
// */
func _sqlite3PagerOpen(tls *libc.TLS, pVfs uintptr, ppPager uintptr, zFilename uintptr, nExtra int32, flags int32, vfsFlags int32, xReinit uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iDc, journalFileSize, memDb, memJM, nPathname, nUriByte, pcacheSize, rc, readOnly, tempFile, useJournal, v4 int32
var pPtr, z, zPathname, zUri, v1, v5 uintptr
var _ /* fout at bp+12 */ int32
var _ /* pPager at bp+0 */ uintptr
var _ /* szPageDflt at bp+8 */ Tu32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = iDc, journalFileSize, memDb, memJM, nPathname, nUriByte, pPtr, pcacheSize, rc, readOnly, tempFile, useJournal, z, zPathname, zUri, v1, v4, v5
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) /* Pager object to allocate and return */
rc = SQLITE_OK /* Return code */
tempFile = 0 /* True for temp files (incl. in-memory files) */
memDb = 0 /* True if this is an in-memory file */
memJM = 0 /* Memory journal mode */
readOnly = 0 /* Bytes to allocate for each journal fd */
zPathname = uintptr(0) /* Full path to database file */
nPathname = 0 /* Number of bytes in zPathname */
useJournal = libc.BoolInt32(flags&int32(PAGER_OMIT_JOURNAL) == 0) /* False to omit journal */
pcacheSize = _sqlite3PcacheSize(tls) /* Bytes to allocate for PCache */
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(SQLITE_DEFAULT_PAGE_SIZE) /* Default page size */
zUri = uintptr(0) /* URI args to copy */
nUriByte = int32(1) /* Number of bytes of URI args at *zUri */
/* Figure out how much space is required for each journal file-handle
** (there are two of them, the main journal and the sub-journal). */
journalFileSize = (_sqlite3JournalSize(tls, pVfs) + int32(7)) & ^libc.Int32FromInt32(7)
/* Set the output variable to NULL in case an error occurs. */
*(*uintptr)(unsafe.Pointer(ppPager)) = uintptr(0)
if flags&int32(PAGER_MEMORY) != 0 {
memDb = int32(1)
if zFilename != 0 && *(*int8)(unsafe.Pointer(zFilename)) != 0 {
zPathname = _sqlite3DbStrDup(tls, uintptr(0), zFilename)
if zPathname == uintptr(0) {
return int32(SQLITE_NOMEM)
}
nPathname = _sqlite3Strlen30(tls, zPathname)
zFilename = uintptr(0)
}
}
/* Compute and store the full pathname in an allocated buffer pointed
** to by zPathname, length nPathname. Or, if this is a temporary file,
** leave both nPathname and zPathname set to 0.
*/
if zFilename != 0 && *(*int8)(unsafe.Pointer(zFilename)) != 0 {
nPathname = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FmxPathname + int32(1)
zPathname = _sqlite3DbMallocRaw(tls, uintptr(0), uint64(nPathname*int32(2)))
if zPathname == uintptr(0) {
return int32(SQLITE_NOMEM)
}
*(*int8)(unsafe.Pointer(zPathname)) = 0 /* Make sure initialized even if FullPathname() fails */
rc = _sqlite3OsFullPathname(tls, pVfs, zFilename, nPathname, zPathname)
if rc != SQLITE_OK {
if rc == libc.Int32FromInt32(SQLITE_OK)|libc.Int32FromInt32(2)< (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FmxPathname {
/* This branch is taken when the journal path required by
** the database being opened will be more than pVfs->mxPathname
** bytes in length. This means the database cannot be opened,
** as it will not be possible to open the journal file or even
** check for a hot-journal before reading.
*/
rc = _sqlite3CantopenError(tls, int32(61558))
}
if rc != SQLITE_OK {
_sqlite3DbFree(tls, uintptr(0), zPathname)
return rc
}
}
/* Allocate memory for the Pager structure, PCache object, the
** three file descriptors, the database file name and the journal
** file name. The layout in memory is as follows:
**
** Pager object (sizeof(Pager) bytes)
** PCache object (sqlite3PcacheSize() bytes)
** Database file handle (pVfs->szOsFile bytes)
** Sub-journal file handle (journalFileSize bytes)
** Main journal file handle (journalFileSize bytes)
** Ptr back to the Pager (sizeof(Pager*) bytes)
** \0\0\0\0 database prefix (4 bytes)
** Database file name (nPathname+1 bytes)
** URI query parameters (nUriByte bytes)
** Journal filename (nPathname+8+1 bytes)
** WAL filename (nPathname+4+1 bytes)
** \0\0\0 terminator (3 bytes)
**
** Some 3rd-party software, over which we have no control, depends on
** the specific order of the filenames and the \0 separators between them
** so that it can (for example) find the database filename given the WAL
** filename without using the sqlite3_filename_database() API. This is a
** misuse of SQLite and a bug in the 3rd-party software, but the 3rd-party
** software is in widespread use, so we try to avoid changing the filename
** order and formatting if possible. In particular, the details of the
** filename format expected by 3rd-party software should be as follows:
**
** - Main Database Path
** - \0
** - Multiple URI components consisting of:
** - Key
** - \0
** - Value
** - \0
** - \0
** - Journal Path
** - \0
** - WAL Path (zWALName)
** - \0
**
** The sqlite3_create_filename() interface and the databaseFilename() utility
** that is used by sqlite3_filename_database() and kin also depend on the
** specific formatting and order of the various filenames, so if the format
** changes here, be sure to change it there as well.
*/
pPtr = _sqlite3MallocZero(tls, (libc.Uint64FromInt64(312)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7))+uint64((pcacheSize+libc.Int32FromInt32(7)) & ^libc.Int32FromInt32(7))+uint64(((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile+libc.Int32FromInt32(7)) & ^libc.Int32FromInt32(7))+uint64(journalFileSize*int32(2))+uint64(__SIZEOF_POINTER__)+uint64(4)+uint64(nPathname)+uint64(1)+uint64(nUriByte)+uint64(nPathname)+uint64(8)+uint64(1)+uint64(nPathname)+uint64(4)+uint64(1)+uint64(3))
if !(pPtr != 0) {
_sqlite3DbFree(tls, uintptr(0), zPathname)
return int32(SQLITE_NOMEM)
}
*(*uintptr)(unsafe.Pointer(bp)) = pPtr
pPtr += uintptr((libc.Uint64FromInt64(312) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpPCache = pPtr
pPtr += uintptr((pcacheSize + libc.Int32FromInt32(7)) & ^libc.Int32FromInt32(7))
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Ffd = pPtr
pPtr += uintptr(((*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile + libc.Int32FromInt32(7)) & ^libc.Int32FromInt32(7))
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fsjfd = pPtr
pPtr += uintptr(journalFileSize)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fjfd = pPtr
pPtr += uintptr(journalFileSize)
libc.Xmemcpy(tls, pPtr, bp, uint64(__SIZEOF_POINTER__))
pPtr += uintptr(__SIZEOF_POINTER__)
/* Fill in the Pager.zFilename and pPager.zQueryParam fields */
pPtr += uintptr(4) /* Skip zero prefix */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzFilename = pPtr
if nPathname > 0 {
libc.Xmemcpy(tls, pPtr, zPathname, uint64(nPathname))
pPtr += uintptr(nPathname + int32(1))
if zUri != 0 {
libc.Xmemcpy(tls, pPtr, zUri, uint64(nUriByte))
pPtr += uintptr(nUriByte)
} else {
pPtr++
}
}
/* Fill in Pager.zJournal */
if nPathname > 0 {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzJournal = pPtr
libc.Xmemcpy(tls, pPtr, zPathname, uint64(nPathname))
pPtr += uintptr(nPathname)
libc.Xmemcpy(tls, pPtr, __ccgo_ts+4080, uint64(8))
pPtr += uintptr(libc.Int32FromInt32(8) + libc.Int32FromInt32(1))
} else {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzJournal = uintptr(0)
}
/* Fill in Pager.zWal */
if nPathname > 0 {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzWal = pPtr
libc.Xmemcpy(tls, pPtr, zPathname, uint64(nPathname))
pPtr += uintptr(nPathname)
libc.Xmemcpy(tls, pPtr, __ccgo_ts+4089, uint64(4))
pPtr += uintptr(libc.Int32FromInt32(4) + libc.Int32FromInt32(1))
} else {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzWal = uintptr(0)
}
_ = pPtr /* Suppress warning about unused pPtr value */
if nPathname != 0 {
_sqlite3DbFree(tls, uintptr(0), zPathname)
}
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpVfs = pVfs
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FvfsFlags = uint32(vfsFlags)
/* Open the pager file.
*/
if !(zFilename != 0 && *(*int8)(unsafe.Pointer(zFilename)) != 0) {
goto _2
}
*(*int32)(unsafe.Pointer(bp + 12)) = 0 /* VFS flags returned by xOpen() */
rc = _sqlite3OsOpen(tls, pVfs, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzFilename, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Ffd, vfsFlags, bp+12)
v4 = libc.BoolInt32(*(*int32)(unsafe.Pointer(bp + 12))&libc.Int32FromInt32(SQLITE_OPEN_MEMORY) != libc.Int32FromInt32(0))
memJM = v4
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FmemVfs = uint8(v4)
readOnly = libc.BoolInt32(*(*int32)(unsafe.Pointer(bp + 12))&int32(SQLITE_OPEN_READONLY) != 0)
/* If the file was successfully opened for read/write access,
** choose a default page size in case we have to create the
** database file. The default page size is the maximum of:
**
** + SQLITE_DEFAULT_PAGE_SIZE,
** + The value returned by sqlite3OsSectorSize()
** + The largest page size that can be written atomically.
*/
if rc == SQLITE_OK {
iDc = _sqlite3OsDeviceCharacteristics(tls, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Ffd)
if !(readOnly != 0) {
_setSectorSize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if *(*Tu32)(unsafe.Pointer(bp + 8)) < (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FsectorSize {
if (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FsectorSize > uint32(SQLITE_MAX_DEFAULT_PAGE_SIZE) {
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(SQLITE_MAX_DEFAULT_PAGE_SIZE)
} else {
*(*Tu32)(unsafe.Pointer(bp + 8)) = (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FsectorSize
}
}
}
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnoLock = uint8(Xsqlite3_uri_boolean(tls, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzFilename, __ccgo_ts+4094, 0))
if iDc&int32(SQLITE_IOCAP_IMMUTABLE) != 0 || Xsqlite3_uri_boolean(tls, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FzFilename, __ccgo_ts+4101, 0) != 0 {
vfsFlags |= int32(SQLITE_OPEN_READONLY)
goto act_like_temp_file
}
}
goto _3
_2:
;
/* If a temporary file is requested, it is not opened immediately.
** In this case we accept the default page size and delay actually
** opening the file until the first call to OsWrite().
**
** This branch is also run for an in-memory database. An in-memory
** database is the same as a temp-file that is never written out to
** disk and uses an in-memory rollback journal.
**
** This branch also runs for files marked as immutable.
*/
goto act_like_temp_file
act_like_temp_file:
;
tempFile = int32(1)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FeState = uint8(PAGER_READER) /* Pretend we already have a lock */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FeLock = uint8(EXCLUSIVE_LOCK) /* Pretend we are in EXCLUSIVE mode */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnoLock = uint8(1) /* Do no locking */
readOnly = vfsFlags & int32(SQLITE_OPEN_READONLY)
_3:
;
/* The following call to PagerSetPagesize() serves to set the value of
** Pager.pageSize and to allocate the Pager.pTmpSpace buffer.
*/
if rc == SQLITE_OK {
rc = _sqlite3PagerSetPagesize(tls, *(*uintptr)(unsafe.Pointer(bp)), bp+8, -int32(1))
}
/* Initialize the PCache object. */
if rc == SQLITE_OK {
nExtra = (nExtra + int32(7)) & ^libc.Int32FromInt32(7)
if !(memDb != 0) {
v5 = __ccgo_fp(_pagerStress)
} else {
v5 = uintptr(0)
}
rc = _sqlite3PcacheOpen(tls, int32(*(*Tu32)(unsafe.Pointer(bp + 8))), nExtra, libc.BoolInt32(!(memDb != 0)), v5, *(*uintptr)(unsafe.Pointer(bp)), (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpPCache)
}
/* If an error occurred above, free the Pager structure and close the file.
*/
if rc != SQLITE_OK {
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Ffd)
_sqlite3PageFree(tls, (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpTmpSpace)
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp)))
return rc
}
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FuseJournal = uint8(useJournal)
/* pPager->stmtOpen = 0; */
/* pPager->stmtInUse = 0; */
/* pPager->nRef = 0; */
/* pPager->stmtSize = 0; */
/* pPager->stmtJSize = 0; */
/* pPager->nPage = 0; */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FmxPgno = uint32(SQLITE_MAX_PAGE_COUNT)
/* pPager->state = PAGER_UNLOCK; */
/* pPager->errMask = 0; */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FtempFile = uint8(tempFile)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FexclusiveMode = uint8(tempFile)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FchangeCountDone = (*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FtempFile
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FmemDb = uint8(memDb)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FreadOnly = uint8(readOnly)
_sqlite3PagerSetFlags(tls, *(*uintptr)(unsafe.Pointer(bp)), uint32(libc.Int32FromInt32(SQLITE_DEFAULT_SYNCHRONOUS)+libc.Int32FromInt32(1)|libc.Int32FromInt32(PAGER_CACHESPILL)))
/* pPager->pFirst = 0; */
/* pPager->pFirstSynced = 0; */
/* pPager->pLast = 0; */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnExtra = uint16(nExtra)
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FjournalSizeLimit = int64(-int32(1))
_setSectorSize(tls, *(*uintptr)(unsafe.Pointer(bp)))
if !(useJournal != 0) {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FjournalMode = uint8(PAGER_JOURNALMODE_OFF)
} else {
if memDb != 0 || memJM != 0 {
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FjournalMode = uint8(PAGER_JOURNALMODE_MEMORY)
}
}
/* pPager->xBusyHandler = 0; */
/* pPager->pBusyHandlerArg = 0; */
(*TPager)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FxReiniter = xReinit
_setGetterMethod(tls, *(*uintptr)(unsafe.Pointer(bp)))
/* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */
/* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */
*(*uintptr)(unsafe.Pointer(ppPager)) = *(*uintptr)(unsafe.Pointer(bp))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return the sqlite3_file for the main database given the name
// ** of the corresponding WAL or Journal name as passed into
// ** xOpen.
// */
func Xsqlite3_database_file_object(tls *libc.TLS, zName uintptr) (r uintptr) {
var p, pPager uintptr
_, _ = p, pPager
for int32(*(*int8)(unsafe.Pointer(zName + uintptr(-libc.Int32FromInt32(1))))) != 0 || int32(*(*int8)(unsafe.Pointer(zName + uintptr(-libc.Int32FromInt32(2))))) != 0 || int32(*(*int8)(unsafe.Pointer(zName + uintptr(-libc.Int32FromInt32(3))))) != 0 || int32(*(*int8)(unsafe.Pointer(zName + uintptr(-libc.Int32FromInt32(4))))) != 0 {
zName--
}
p = zName - uintptr(4) - uintptr(8)
pPager = *(*uintptr)(unsafe.Pointer(p))
return (*TPager)(unsafe.Pointer(pPager)).Ffd
}
// C documentation
//
// /*
// ** This function is called after transitioning from PAGER_UNLOCK to
// ** PAGER_SHARED state. It tests if there is a hot journal present in
// ** the file-system for the given pager. A hot journal is one that
// ** needs to be played back. According to this function, a hot-journal
// ** file exists if the following criteria are met:
// **
// ** * The journal file exists in the file system, and
// ** * No process holds a RESERVED or greater lock on the database file, and
// ** * The database file itself is greater than 0 bytes in size, and
// ** * The first byte of the journal file exists and is not 0x00.
// **
// ** If the current size of the database file is 0 but a journal file
// ** exists, that is probably an old journal left over from a prior
// ** database with the same name. In this case the journal file is
// ** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK
// ** is returned.
// **
// ** This routine does not check if there is a super-journal filename
// ** at the end of the file. If there is, and that super-journal file
// ** does not exist, then the journal file is not really hot. In this
// ** case this routine will return a false-positive. The pager_playback()
// ** routine will discover that the journal file is not really hot and
// ** will not roll it back.
// **
// ** If a hot-journal file is found to exist, *pExists is set to 1 and
// ** SQLITE_OK returned. If no hot-journal file is present, *pExists is
// ** set to 0 and SQLITE_OK returned. If an IO error occurs while trying
// ** to determine whether or not a hot-journal file exists, the IO error
// ** code is returned and the value of *pExists is undefined.
// */
func _hasHotJournal(tls *libc.TLS, pPager uintptr, pExists uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var jrnlOpen, rc int32
var pVfs uintptr
var _ /* exists at bp+0 */ int32
var _ /* f at bp+12 */ int32
var _ /* first at bp+16 */ Tu8
var _ /* locked at bp+4 */ int32
var _ /* nPage at bp+8 */ TPgno
_, _, _ = jrnlOpen, pVfs, rc
pVfs = (*TPager)(unsafe.Pointer(pPager)).FpVfs
rc = SQLITE_OK /* Return code */
*(*int32)(unsafe.Pointer(bp)) = int32(1) /* True if a journal file is present */
jrnlOpen = libc.BoolInt32(!!((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != libc.UintptrFromInt32(0)))
*(*int32)(unsafe.Pointer(pExists)) = 0
if !(jrnlOpen != 0) {
rc = _sqlite3OsAccess(tls, pVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, SQLITE_ACCESS_EXISTS, bp)
}
if rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp)) != 0 {
*(*int32)(unsafe.Pointer(bp + 4)) = 0 /* True if some process holds a RESERVED lock */
/* Race condition here: Another process might have been holding the
** the RESERVED lock and have a journal open at the sqlite3OsAccess()
** call above, but then delete the journal and drop the lock before
** we get to the following sqlite3OsCheckReservedLock() call. If that
** is the case, this routine might think there is a hot journal when
** in fact there is none. This results in a false-positive which will
** be dealt with by the playback routine. Ticket #3883.
*/
rc = _sqlite3OsCheckReservedLock(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, bp+4)
if rc == SQLITE_OK && !(*(*int32)(unsafe.Pointer(bp + 4)) != 0) { /* Number of pages in database file */
rc = _pagerPagecount(tls, pPager, bp+8)
if rc == SQLITE_OK {
/* If the database is zero pages in size, that means that either (1) the
** journal is a remnant from a prior database with the same name where
** the database file but not the journal was deleted, or (2) the initial
** transaction that populates a new database is being rolled back.
** In either case, the journal file can be deleted. However, take care
** not to delete the journal file if it is already open due to
** journal_mode=PERSIST.
*/
if *(*TPgno)(unsafe.Pointer(bp + 8)) == uint32(0) && !(jrnlOpen != 0) {
_sqlite3BeginBenignMalloc(tls)
if _pagerLockDb(tls, pPager, int32(RESERVED_LOCK)) == SQLITE_OK {
_sqlite3OsDelete(tls, pVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, 0)
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) {
_pagerUnlockDb(tls, pPager, int32(SHARED_LOCK))
}
}
_sqlite3EndBenignMalloc(tls)
} else {
/* The journal file exists and no other connection has a reserved
** or greater lock on the database file. Now check that there is
** at least one non-zero bytes at the start of the journal file.
** If there is, then we consider this journal to be hot. If not,
** it can be ignored.
*/
if !(jrnlOpen != 0) {
*(*int32)(unsafe.Pointer(bp + 12)) = libc.Int32FromInt32(SQLITE_OPEN_READONLY) | libc.Int32FromInt32(SQLITE_OPEN_MAIN_JOURNAL)
rc = _sqlite3OsOpen(tls, pVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, (*TPager)(unsafe.Pointer(pPager)).Fjfd, *(*int32)(unsafe.Pointer(bp + 12)), bp+12)
}
if rc == SQLITE_OK {
*(*Tu8)(unsafe.Pointer(bp + 16)) = uint8(0)
rc = _sqlite3OsRead(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, bp+16, int32(1), 0)
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(2)< (*TPager)(unsafe.Pointer(pPager)).FmxPgno {
rc = int32(SQLITE_FULL)
if pgno <= (*TPager)(unsafe.Pointer(pPager)).FdbSize {
_sqlite3PcacheRelease(tls, pPg)
pPg = uintptr(0)
}
goto pager_acquire_err
}
if noContent != 0 {
/* Failure to set the bits in the InJournal bit-vectors is benign.
** It merely means that we might do some extra work to journal a
** page that does not need to be journaled. Nevertheless, be sure
** to test the case where a malloc error occurs while trying to set
** a bit in a bit vector.
*/
_sqlite3BeginBenignMalloc(tls)
if pgno <= (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize {
_sqlite3BitvecSet(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal, pgno)
}
_addToSavepointBitvecs(tls, pPager, pgno)
_sqlite3EndBenignMalloc(tls)
}
libc.Xmemset(tls, (*TPgHdr)(unsafe.Pointer(pPg)).FpData, 0, uint64((*TPager)(unsafe.Pointer(pPager)).FpageSize))
} else {
*(*Tu32)(unsafe.Pointer(pPager + 248 + 1*4))++
rc = _readDbPage(tls, pPg)
if rc != SQLITE_OK {
goto pager_acquire_err
}
}
}
return SQLITE_OK
goto pager_acquire_err
pager_acquire_err:
;
if pPg != 0 {
_sqlite3PcacheDrop(tls, pPg)
}
_pagerUnlockIfUnused(tls, pPager)
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return rc
}
// C documentation
//
// /* The page getter for when memory-mapped I/O is enabled */
func _getPageMMap(tls *libc.TLS, pPager uintptr, pgno TPgno, ppPage uintptr, flags int32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var bMmapOk, rc int32
var _ /* iFrame at bp+8 */ Tu32
var _ /* pData at bp+16 */ uintptr
var _ /* pPg at bp+0 */ uintptr
_, _ = bMmapOk, rc
rc = SQLITE_OK
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(0) /* Frame to read from WAL file */
/* It is acceptable to use a read-only (mmap) page for any page except
** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
** flag was specified by the caller. And so long as the db is not a
** temporary or in-memory database. */
bMmapOk = libc.BoolInt32(pgno > uint32(1) && (int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_READER) || flags&int32(PAGER_GET_READONLY) != 0))
/* Optimization note: Adding the "pgno<=1" term before "pgno==0" here
** allows the compiler optimizer to reuse the results of the "pgno>1"
** test in the previous statement, and avoid testing pgno==0 in the
** common case where pgno is large. */
if pgno <= uint32(1) && pgno == uint32(0) {
return _sqlite3CorruptError(tls, int32(62407))
}
if bMmapOk != 0 && (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
rc = _sqlite3WalFindFrame(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, pgno, bp+8)
if rc != SQLITE_OK {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return rc
}
}
if bMmapOk != 0 && *(*Tu32)(unsafe.Pointer(bp + 8)) == uint32(0) {
*(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0)
rc = _sqlite3OsFetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int64(pgno-libc.Uint32FromInt32(1))*(*TPager)(unsafe.Pointer(pPager)).FpageSize, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), bp+16)
if rc == SQLITE_OK && *(*uintptr)(unsafe.Pointer(bp + 16)) != 0 {
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) > int32(PAGER_READER) || (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
*(*uintptr)(unsafe.Pointer(bp)) = _sqlite3PagerLookup(tls, pPager, pgno)
}
if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) {
rc = _pagerAcquireMapPage(tls, pPager, pgno, *(*uintptr)(unsafe.Pointer(bp + 16)), bp)
} else {
_sqlite3OsUnfetch(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int64(pgno-libc.Uint32FromInt32(1))*(*TPager)(unsafe.Pointer(pPager)).FpageSize, *(*uintptr)(unsafe.Pointer(bp + 16)))
}
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
*(*uintptr)(unsafe.Pointer(ppPage)) = *(*uintptr)(unsafe.Pointer(bp))
return SQLITE_OK
}
}
if rc != SQLITE_OK {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return rc
}
}
return _getPageNormal(tls, pPager, pgno, ppPage, flags)
}
// C documentation
//
// /* The page getter method for when the pager is an error state */
func _getPageError(tls *libc.TLS, pPager uintptr, pgno TPgno, ppPage uintptr, flags int32) (r int32) {
_ = pgno
_ = flags
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
// C documentation
//
// /* Dispatch all page fetch requests to the appropriate getter method.
// */
func _sqlite3PagerGet(tls *libc.TLS, pPager uintptr, pgno TPgno, ppPage uintptr, flags int32) (r int32) {
/* Normal, high-speed version of sqlite3PagerGet() */
return (*(*func(*libc.TLS, uintptr, TPgno, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*TPager)(unsafe.Pointer(pPager)).FxGet})))(tls, pPager, pgno, ppPage, flags)
}
// C documentation
//
// /*
// ** Acquire a page if it is already in the in-memory cache. Do
// ** not read the page from disk. Return a pointer to the page,
// ** or 0 if the page is not in cache.
// **
// ** See also sqlite3PagerGet(). The difference between this routine
// ** and sqlite3PagerGet() is that _get() will go to the disk and read
// ** in the page if the page is not already in cache. This routine
// ** returns NULL if the page is not in cache or if a disk I/O error
// ** has ever happened.
// */
func _sqlite3PagerLookup(tls *libc.TLS, pPager uintptr, pgno TPgno) (r uintptr) {
var pPage uintptr
_ = pPage
pPage = _sqlite3PcacheFetch(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, pgno, 0)
if pPage == uintptr(0) {
return uintptr(0)
}
return _sqlite3PcacheFetchFinish(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache, pgno, pPage)
}
// C documentation
//
// /*
// ** Release a page reference.
// **
// ** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be used
// ** if we know that the page being released is not the last reference to page1.
// ** The btree layer always holds page1 open until the end, so these first
// ** two routines can be used to release any page other than BtShared.pPage1.
// ** The assert() at tag-20230419-2 proves that this constraint is always
// ** honored.
// **
// ** Use sqlite3PagerUnrefPageOne() to release page1. This latter routine
// ** checks the total number of outstanding pages and if the number of
// ** pages reaches zero it drops the database lock.
// */
func _sqlite3PagerUnrefNotNull(tls *libc.TLS, pPg uintptr) {
if int32((*TDbPage)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_MMAP) != 0 {
/* Page1 is never memory mapped */
_pagerReleaseMapPage(tls, pPg)
} else {
_sqlite3PcacheRelease(tls, pPg)
}
/* Do not use this routine to release the last reference to page1 */
/* tag-20230419-2 */
}
func _sqlite3PagerUnref(tls *libc.TLS, pPg uintptr) {
if pPg != 0 {
_sqlite3PagerUnrefNotNull(tls, pPg)
}
}
func _sqlite3PagerUnrefPageOne(tls *libc.TLS, pPg uintptr) {
var pPager uintptr
_ = pPager
/* Page1 is never memory mapped */
pPager = (*TDbPage)(unsafe.Pointer(pPg)).FpPager
_sqlite3PcacheRelease(tls, pPg)
_pagerUnlockIfUnused(tls, pPager)
}
// C documentation
//
// /*
// ** This function is called at the start of every write transaction.
// ** There must already be a RESERVED or EXCLUSIVE lock on the database
// ** file when this routine is called.
// **
// ** Open the journal file for pager pPager and write a journal header
// ** to the start of it. If there are active savepoints, open the sub-journal
// ** as well. This function is only used when the journal file is being
// ** opened to write a rollback log for a transaction. It is not used
// ** when opening a hot journal file to roll it back.
// **
// ** If the journal file is already open (as it may be in exclusive mode),
// ** then this function just writes a journal header to the start of the
// ** already open file.
// **
// ** Whether or not the journal file is opened by this function, the
// ** Pager.pInJournal bitvec structure is allocated.
// **
// ** Return SQLITE_OK if everything is successful. Otherwise, return
// ** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or
// ** an IO error code if opening or writing the journal file fails.
// */
func _pager_open_journal(tls *libc.TLS, pPager uintptr) (r int32) {
var flags, nSpill, rc int32
var pVfs uintptr
_, _, _, _ = flags, nSpill, pVfs, rc
rc = SQLITE_OK /* Return code */
pVfs = (*TPager)(unsafe.Pointer(pPager)).FpVfs /* Local cache of vfs pointer */
/* If already in the error state, this function is a no-op. But on
** the other hand, this routine is never called if we are already in
** an error state. */
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
if !((*TPager)(unsafe.Pointer(pPager)).FpWal != libc.UintptrFromInt32(0)) && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) != int32(PAGER_JOURNALMODE_OFF) {
(*TPager)(unsafe.Pointer(pPager)).FpInJournal = _sqlite3BitvecCreate(tls, (*TPager)(unsafe.Pointer(pPager)).FdbSize)
if (*TPager)(unsafe.Pointer(pPager)).FpInJournal == uintptr(0) {
return int32(SQLITE_NOMEM)
}
/* Open the journal file if it is not already open. */
if !((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != libc.UintptrFromInt32(0)) {
if int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_MEMORY) {
_sqlite3MemJournalOpen(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
} else {
flags = libc.Int32FromInt32(SQLITE_OPEN_READWRITE) | libc.Int32FromInt32(SQLITE_OPEN_CREATE)
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
flags |= libc.Int32FromInt32(SQLITE_OPEN_DELETEONCLOSE) | libc.Int32FromInt32(SQLITE_OPEN_TEMP_JOURNAL)
flags |= int32(SQLITE_OPEN_EXCLUSIVE)
nSpill = _sqlite3Config.FnStmtSpill
} else {
flags |= int32(SQLITE_OPEN_MAIN_JOURNAL)
nSpill = _jrnlBufferSize(tls, pPager)
}
/* Verify that the database still has the same name as it did when
** it was originally opened. */
rc = _databaseIsUnmoved(tls, pPager)
if rc == SQLITE_OK {
rc = _sqlite3JournalOpen(tls, pVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, (*TPager)(unsafe.Pointer(pPager)).Fjfd, flags, nSpill)
}
}
}
/* Write the first journal header to the journal file and open
** the sub-journal if necessary.
*/
if rc == SQLITE_OK {
/* TODO: Check if all of these are really required. */
(*TPager)(unsafe.Pointer(pPager)).FnRec = 0
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
(*TPager)(unsafe.Pointer(pPager)).FsetSuper = uint8(0)
(*TPager)(unsafe.Pointer(pPager)).FjournalHdr = 0
rc = _writeJournalHdr(tls, pPager)
}
}
if rc != SQLITE_OK {
_sqlite3BitvecDestroy(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal)
(*TPager)(unsafe.Pointer(pPager)).FpInJournal = uintptr(0)
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
} else {
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_WRITER_CACHEMOD)
}
return rc
}
// C documentation
//
// /*
// ** Begin a write-transaction on the specified pager object. If a
// ** write-transaction has already been opened, this function is a no-op.
// **
// ** If the exFlag argument is false, then acquire at least a RESERVED
// ** lock on the database file. If exFlag is true, then acquire at least
// ** an EXCLUSIVE lock. If such a lock is already held, no locking
// ** functions need be called.
// **
// ** If the subjInMemory argument is non-zero, then any sub-journal opened
// ** within this transaction will be opened as an in-memory file. This
// ** has no effect if the sub-journal is already opened (as it may be when
// ** running in exclusive mode) or if the transaction does not require a
// ** sub-journal. If the subjInMemory argument is zero, then any required
// ** sub-journal is implemented in-memory if pPager is an in-memory database,
// ** or using a temporary file otherwise.
// */
func _sqlite3PagerBegin(tls *libc.TLS, pPager uintptr, exFlag int32, subjInMemory int32) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
(*TPager)(unsafe.Pointer(pPager)).FsubjInMemory = uint8(subjInMemory)
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_READER) {
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
/* If the pager is configured to use locking_mode=exclusive, and an
** exclusive lock on the database is not already held, obtain it now.
*/
if (*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0 && _sqlite3WalExclusiveMode(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, -int32(1)) != 0 {
rc = _pagerLockDb(tls, pPager, int32(EXCLUSIVE_LOCK))
if rc != SQLITE_OK {
return rc
}
_sqlite3WalExclusiveMode(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, int32(1))
}
/* Grab the write lock on the log file. If successful, upgrade to
** PAGER_RESERVED state. Otherwise, return an error code to the caller.
** The busy-handler is not invoked if another connection already
** holds the write-lock. If possible, the upper layer will call it.
*/
rc = _sqlite3WalBeginWriteTransaction(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
} else {
/* Obtain a RESERVED lock on the database file. If the exFlag parameter
** is true, then immediately upgrade this to an EXCLUSIVE lock. The
** busy-handler callback can be used when upgrading to the EXCLUSIVE
** lock, but not when obtaining the RESERVED lock.
*/
rc = _pagerLockDb(tls, pPager, int32(RESERVED_LOCK))
if rc == SQLITE_OK && exFlag != 0 {
rc = _pager_wait_on_lock(tls, pPager, int32(EXCLUSIVE_LOCK))
}
}
if rc == SQLITE_OK {
/* Change to WRITER_LOCKED state.
**
** WAL mode sets Pager.eState to PAGER_WRITER_LOCKED or CACHEMOD
** when it has an open transaction, but never to DBMOD or FINISHED.
** This is because in those states the code to roll back savepoint
** transactions may copy data from the sub-journal into the database
** file as well as into the page cache. Which would be incorrect in
** WAL mode.
*/
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_WRITER_LOCKED)
(*TPager)(unsafe.Pointer(pPager)).FdbHintSize = (*TPager)(unsafe.Pointer(pPager)).FdbSize
(*TPager)(unsafe.Pointer(pPager)).FdbFileSize = (*TPager)(unsafe.Pointer(pPager)).FdbSize
(*TPager)(unsafe.Pointer(pPager)).FdbOrigSize = (*TPager)(unsafe.Pointer(pPager)).FdbSize
(*TPager)(unsafe.Pointer(pPager)).FjournalOff = 0
}
}
return rc
}
// C documentation
//
// /*
// ** Write page pPg onto the end of the rollback journal.
// */
func _pagerAddPageToRollbackJournal(tls *libc.TLS, pPg uintptr) (r int32) {
var cksum Tu32
var iOff Ti64
var pData2, pPager, p1 uintptr
var rc int32
_, _, _, _, _, _ = cksum, iOff, pData2, pPager, rc, p1
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
iOff = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
/* We should never write to the journal file the page that
** contains the database locks. The following assert verifies
** that we do not. */
pData2 = (*TPgHdr)(unsafe.Pointer(pPg)).FpData
cksum = _pager_cksum(tls, pPager, pData2)
/* Even if an IO or diskfull error occurs while journalling the
** page in the block above, set the need-sync flag for the page.
** Otherwise, when the transaction is rolled back, the logic in
** playback_one_page() will think that the page needs to be restored
** in the database file. And if an IO error occurs while doing so,
** then corruption may follow.
*/
p1 = pPg + 52
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(PGHDR_NEED_SYNC))
rc = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iOff, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno)
if rc != SQLITE_OK {
return rc
}
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, pData2, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), iOff+int64(4))
if rc != SQLITE_OK {
return rc
}
rc = _write32bits(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd, iOff+(*TPager)(unsafe.Pointer(pPager)).FpageSize+int64(4), cksum)
if rc != SQLITE_OK {
return rc
}
*(*Ti64)(unsafe.Pointer(pPager + 96)) += int64(8) + (*TPager)(unsafe.Pointer(pPager)).FpageSize
(*TPager)(unsafe.Pointer(pPager)).FnRec++
rc = _sqlite3BitvecSet(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno)
rc |= _addToSavepointBitvecs(tls, pPager, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno)
return rc
}
// C documentation
//
// /*
// ** Mark a single data page as writeable. The page is written into the
// ** main journal or sub-journal as required. If the page is written into
// ** one of the journals, the corresponding bit is set in the
// ** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs
// ** of any open savepoints as appropriate.
// */
func _pager_write(tls *libc.TLS, pPg uintptr) (r int32) {
var pPager, p1, p2 uintptr
var rc int32
_, _, _, _ = pPager, rc, p1, p2
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
rc = SQLITE_OK
/* This routine is not called unless a write-transaction has already
** been started. The journal file may or may not be open at this point.
** It is never called in the ERROR state.
*/
/* The journal file needs to be opened. Higher level routines have already
** obtained the necessary locks to begin the write-transaction, but the
** rollback journal might not yet be open. Open it now if this is the case.
**
** This is done before calling sqlite3PcacheMakeDirty() on the page.
** Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then
** an error might occur and the pager would end up in WRITER_LOCKED state
** with pages marked as dirty in the cache.
*/
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_WRITER_LOCKED) {
rc = _pager_open_journal(tls, pPager)
if rc != SQLITE_OK {
return rc
}
}
/* Mark the page that is about to be modified as dirty. */
_sqlite3PcacheMakeDirty(tls, pPg)
/* If a rollback journal is in use, them make sure the page that is about
** to change is in the rollback journal, or if the page is a new page off
** then end of the file, make sure it is marked as PGHDR_NEED_SYNC.
*/
if (*TPager)(unsafe.Pointer(pPager)).FpInJournal != uintptr(0) && _sqlite3BitvecTestNotNull(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal, (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno) == 0 {
if (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno <= (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize {
rc = _pagerAddPageToRollbackJournal(tls, pPg)
if rc != SQLITE_OK {
return rc
}
} else {
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) != int32(PAGER_WRITER_DBMOD) {
p1 = pPg + 52
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(PGHDR_NEED_SYNC))
}
}
}
/* The PGHDR_DIRTY bit is set above when the page was added to the dirty-list
** and before writing the page into the rollback journal. Wait until now,
** after the page has been successfully journalled, before setting the
** PGHDR_WRITEABLE bit that indicates that the page can be safely modified.
*/
p2 = pPg + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(PGHDR_WRITEABLE))
/* If the statement journal is open and the page is not in it,
** then write the page into the statement journal.
*/
if (*TPager)(unsafe.Pointer(pPager)).FnSavepoint > 0 {
rc = _subjournalPageIfRequired(tls, pPg)
}
/* Update the database size and return. */
if (*TPager)(unsafe.Pointer(pPager)).FdbSize < (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno {
(*TPager)(unsafe.Pointer(pPager)).FdbSize = (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno
}
return rc
}
// C documentation
//
// /*
// ** This is a variant of sqlite3PagerWrite() that runs when the sector size
// ** is larger than the page size. SQLite makes the (reasonable) assumption that
// ** all bytes of a sector are written together by hardware. Hence, all bytes of
// ** a sector need to be journalled in case of a power loss in the middle of
// ** a write.
// **
// ** Usually, the sector size is less than or equal to the page size, in which
// ** case pages can be individually written. This routine only runs in the
// ** exceptional case where the page size is smaller than the sector size.
// */
func _pagerWriteLargeSector(tls *libc.TLS, pPg uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var ii, nPage, needSync, rc int32
var nPageCount, nPagePerSector, pg, pg1 TPgno
var pPage1, pPager, v3, p1, p5, p6 uintptr
var _ /* pPage at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = ii, nPage, nPageCount, nPagePerSector, needSync, pPage1, pPager, pg, pg1, rc, v3, p1, p5, p6
rc = SQLITE_OK /* First page of the sector pPg is located on. */
nPage = 0 /* Loop counter */
needSync = 0 /* True if any page has PGHDR_NEED_SYNC */
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager /* The pager that owns pPg */
nPagePerSector = uint32(int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize) / (*TPager)(unsafe.Pointer(pPager)).FpageSize)
/* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow
** a journal header to be written between the pages journaled by
** this function.
*/
p1 = pPager + 25
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(SPILLFLAG_NOSYNC))
/* This trick assumes that both the page-size and sector-size are
** an integer power of 2. It sets variable pg1 to the identifier
** of the first page of the sector pPg is located on.
*/
pg1 = ((*TPgHdr)(unsafe.Pointer(pPg)).Fpgno-uint32(1)) & ^(nPagePerSector-libc.Uint32FromInt32(1)) + uint32(1)
nPageCount = (*TPager)(unsafe.Pointer(pPager)).FdbSize
if (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno > nPageCount {
nPage = int32((*TPgHdr)(unsafe.Pointer(pPg)).Fpgno - pg1 + uint32(1))
} else {
if pg1+nPagePerSector-uint32(1) > nPageCount {
nPage = int32(nPageCount + uint32(1) - pg1)
} else {
nPage = int32(nPagePerSector)
}
}
ii = 0
for {
if !(ii < nPage && rc == SQLITE_OK) {
break
}
pg = pg1 + uint32(ii)
if pg == (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno || !(_sqlite3BitvecTest(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal, pg) != 0) {
if pg != (*TPager)(unsafe.Pointer(pPager)).FlckPgno {
rc = _sqlite3PagerGet(tls, pPager, pg, bp, 0)
if rc == SQLITE_OK {
rc = _pager_write(tls, *(*uintptr)(unsafe.Pointer(bp)))
if int32((*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fflags)&int32(PGHDR_NEED_SYNC) != 0 {
needSync = int32(1)
}
_sqlite3PagerUnrefNotNull(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
} else {
v3 = _sqlite3PagerLookup(tls, pPager, pg)
*(*uintptr)(unsafe.Pointer(bp)) = v3
if v3 != uintptr(0) {
if int32((*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fflags)&int32(PGHDR_NEED_SYNC) != 0 {
needSync = int32(1)
}
_sqlite3PagerUnrefNotNull(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
goto _2
_2:
;
ii++
}
/* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages
** starting at pg1, then it needs to be set for all of them. Because
** writing to any of these nPage pages may damage the others, the
** journal file must contain sync()ed copies of all of them
** before any of them can be written out to the database file.
*/
if rc == SQLITE_OK && needSync != 0 {
ii = 0
for {
if !(ii < nPage) {
break
}
pPage1 = _sqlite3PagerLookup(tls, pPager, pg1+uint32(ii))
if pPage1 != 0 {
p5 = pPage1 + 52
*(*Tu16)(unsafe.Pointer(p5)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p5))) | libc.Int32FromInt32(PGHDR_NEED_SYNC))
_sqlite3PagerUnrefNotNull(tls, pPage1)
}
goto _4
_4:
;
ii++
}
}
p6 = pPager + 25
*(*Tu8)(unsafe.Pointer(p6)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p6))) & ^libc.Int32FromInt32(SPILLFLAG_NOSYNC))
return rc
}
// C documentation
//
// /*
// ** Mark a data page as writeable. This routine must be called before
// ** making changes to a page. The caller must check the return value
// ** of this function and be careful not to change any page data unless
// ** this routine returns SQLITE_OK.
// **
// ** The difference between this function and pager_write() is that this
// ** function also deals with the special case where 2 or more pages
// ** fit on a single disk sector. In this case all co-resident pages
// ** must have been written to the journal file before returning.
// **
// ** If an error occurs, SQLITE_NOMEM or an IO error code is returned
// ** as appropriate. Otherwise, SQLITE_OK.
// */
func _sqlite3PagerWrite(tls *libc.TLS, pPg uintptr) (r int32) {
var pPager uintptr
_ = pPager
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
if int32((*TPgHdr)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_WRITEABLE) != 0 && (*TPager)(unsafe.Pointer(pPager)).FdbSize >= (*TPgHdr)(unsafe.Pointer(pPg)).Fpgno {
if (*TPager)(unsafe.Pointer(pPager)).FnSavepoint != 0 {
return _subjournalPageIfRequired(tls, pPg)
}
return SQLITE_OK
} else {
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
} else {
if (*TPager)(unsafe.Pointer(pPager)).FsectorSize > uint32((*TPager)(unsafe.Pointer(pPager)).FpageSize) {
return _pagerWriteLargeSector(tls, pPg)
} else {
return _pager_write(tls, pPg)
}
}
}
return r
}
/*
** Return TRUE if the page given in the argument was previously passed
** to sqlite3PagerWrite(). In other words, return TRUE if it is ok
** to change the content of the page.
*/
// C documentation
//
// /*
// ** A call to this routine tells the pager that it is not necessary to
// ** write the information on page pPg back to the disk, even though
// ** that page might be marked as dirty. This happens, for example, when
// ** the page has been added as a leaf of the freelist and so its
// ** content no longer matters.
// **
// ** The overlying software layer calls this routine when all of the data
// ** on the given page is unused. The pager marks the page as clean so
// ** that it does not get written to disk.
// **
// ** Tests show that this optimization can quadruple the speed of large
// ** DELETE operations.
// **
// ** This optimization cannot be used with a temp-file, as the page may
// ** have been dirty at the start of the transaction. In that case, if
// ** memory pressure forces page pPg out of the cache, the data does need
// ** to be written out to disk so that it may be read back in if the
// ** current transaction is rolled back.
// */
func _sqlite3PagerDontWrite(tls *libc.TLS, pPg uintptr) {
var pPager, p1, p2 uintptr
_, _, _ = pPager, p1, p2
pPager = (*TPgHdr)(unsafe.Pointer(pPg)).FpPager
if !((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0) && int32((*TPgHdr)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_DIRTY) != 0 && (*TPager)(unsafe.Pointer(pPager)).FnSavepoint == 0 {
p1 = pPg + 52
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(PGHDR_DONT_WRITE))
p2 = pPg + 52
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^libc.Int32FromInt32(PGHDR_WRITEABLE))
}
}
// C documentation
//
// /*
// ** This routine is called to increment the value of the database file
// ** change-counter, stored as a 4-byte big-endian integer starting at
// ** byte offset 24 of the pager file. The secondary change counter at
// ** 92 is also updated, as is the SQLite version number at offset 96.
// **
// ** But this only happens if the pPager->changeCountDone flag is false.
// ** To avoid excess churning of page 1, the update only happens once.
// ** See also the pager_write_changecounter() routine that does an
// ** unconditional update of the change counters.
// **
// ** If the isDirectMode flag is zero, then this is done by calling
// ** sqlite3PagerWrite() on page 1, then modifying the contents of the
// ** page data. In this case the file will be updated when the current
// ** transaction is committed.
// **
// ** The isDirectMode flag may only be non-zero if the library was compiled
// ** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case,
// ** if isDirect is non-zero, then the database file is updated directly
// ** by writing an updated version of page 1 using a call to the
// ** sqlite3OsWrite() function.
// */
func _pager_incr_changecounter(tls *libc.TLS, pPager uintptr, isDirectMode int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pCopy, zBuf uintptr
var rc int32
var _ /* pPgHdr at bp+0 */ uintptr
_, _, _ = pCopy, rc, zBuf
rc = SQLITE_OK
/* Declare and initialize constant integer 'isDirect'. If the
** atomic-write optimization is enabled in this build, then isDirect
** is initialized to the value passed as the isDirectMode parameter
** to this function. Otherwise, it is always set to zero.
**
** The idea is that if the atomic-write optimization is not
** enabled at compile time, the compiler can omit the tests of
** 'isDirect' below, as well as the block enclosed in the
** "if( isDirect )" condition.
*/
_ = isDirectMode
if !((*TPager)(unsafe.Pointer(pPager)).FchangeCountDone != 0) && (*TPager)(unsafe.Pointer(pPager)).FdbSize > uint32(0) { /* Reference to page 1 */
/* Open page 1 of the file for writing. */
rc = _sqlite3PagerGet(tls, pPager, uint32(1), bp, 0)
/* If page one was fetched successfully, and this function is not
** operating in direct-mode, make page 1 writable. When not in
** direct mode, page 1 is always held in cache and hence the PagerGet()
** above is always successful - hence the ALWAYS on rc==SQLITE_OK.
*/
if libc.Bool(!(libc.Int32FromInt32(DIRECT_MODE) != 0)) && rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
if rc == SQLITE_OK {
/* Actually do the update of the change counter */
_pager_write_changecounter(tls, *(*uintptr)(unsafe.Pointer(bp)))
/* If running in direct mode, write the contents of page 1 to the file. */
if DIRECT_MODE != 0 {
zBuf = (*TPgHdr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpData
if rc == SQLITE_OK {
rc = _sqlite3OsWrite(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, zBuf, int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), 0)
*(*Tu32)(unsafe.Pointer(pPager + 248 + 2*4))++
}
if rc == SQLITE_OK {
/* Update the pager's copy of the change-counter. Otherwise, the
** next time a read transaction is opened the cache will be
** flushed (as the change-counter values will not match). */
pCopy = zBuf + 24
libc.Xmemcpy(tls, pPager+136, pCopy, uint64(16))
(*TPager)(unsafe.Pointer(pPager)).FchangeCountDone = uint8(1)
}
} else {
(*TPager)(unsafe.Pointer(pPager)).FchangeCountDone = uint8(1)
}
}
/* Release the page reference. */
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
return rc
}
// C documentation
//
// /*
// ** Sync the database file to disk. This is a no-op for in-memory databases
// ** or pages with the Pager.noSync flag set.
// **
// ** If successful, or if called on a pager for which it is a no-op, this
// ** function returns SQLITE_OK. Otherwise, an IO error code is returned.
// */
func _sqlite3PagerSync(tls *libc.TLS, pPager uintptr, zSuper uintptr) (r int32) {
var pArg uintptr
var rc int32
_, _ = pArg, rc
rc = SQLITE_OK
pArg = zSuper
rc = _sqlite3OsFileControl(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32(SQLITE_FCNTL_SYNC), pArg)
if rc == int32(SQLITE_NOTFOUND) {
rc = SQLITE_OK
}
if rc == SQLITE_OK && !((*TPager)(unsafe.Pointer(pPager)).FnoSync != 0) {
rc = _sqlite3OsSync(tls, (*TPager)(unsafe.Pointer(pPager)).Ffd, int32((*TPager)(unsafe.Pointer(pPager)).FsyncFlags))
}
return rc
}
// C documentation
//
// /*
// ** This function may only be called while a write-transaction is active in
// ** rollback. If the connection is in WAL mode, this call is a no-op.
// ** Otherwise, if the connection does not already have an EXCLUSIVE lock on
// ** the database file, an attempt is made to obtain one.
// **
// ** If the EXCLUSIVE lock is already held or the attempt to obtain it is
// ** successful, or the connection is in WAL mode, SQLITE_OK is returned.
// ** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is
// ** returned.
// */
func _sqlite3PagerExclusiveLock(tls *libc.TLS, pPager uintptr) (r int32) {
var rc int32
_ = rc
rc = (*TPager)(unsafe.Pointer(pPager)).FerrCode
if rc == SQLITE_OK {
if 0 == libc.BoolInt32((*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0)) {
rc = _pager_wait_on_lock(tls, pPager, int32(EXCLUSIVE_LOCK))
}
}
return rc
}
// C documentation
//
// /*
// ** Sync the database file for the pager pPager. zSuper points to the name
// ** of a super-journal file that should be written into the individual
// ** journal file. zSuper may be NULL, which is interpreted as no
// ** super-journal (a single database transaction).
// **
// ** This routine ensures that:
// **
// ** * The database file change-counter is updated,
// ** * the journal is synced (unless the atomic-write optimization is used),
// ** * all dirty pages are written to the database file,
// ** * the database file is truncated (if required), and
// ** * the database file synced.
// **
// ** The only thing that remains to commit the transaction is to finalize
// ** (delete, truncate or zero the first part of) the journal file (or
// ** delete the super-journal file if specified).
// **
// ** Note that if zSuper==NULL, this does not overwrite a previous value
// ** passed to an sqlite3PagerCommitPhaseOne() call.
// **
// ** If the final parameter - noSync - is true, then the database file itself
// ** is not synced. The caller must call sqlite3PagerSync() directly to
// ** sync the database file before calling CommitPhaseTwo() to delete the
// ** journal file in this case.
// */
func _sqlite3PagerCommitPhaseOne(tls *libc.TLS, pPager uintptr, zSuper uintptr, noSync int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nNew TPgno
var pList uintptr
var rc int32
var _ /* pPageOne at bp+0 */ uintptr
_, _, _ = nNew, pList, rc
rc = SQLITE_OK /* Return code */
/* If a prior error occurred, report that error again. */
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
/* Provide the ability to easily simulate an I/O error during testing */
if _sqlite3FaultSim(tls, int32(400)) != 0 {
return int32(SQLITE_IOERR)
}
/* If no database changes have been made, return early. */
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) < int32(PAGER_WRITER_CACHEMOD) {
return SQLITE_OK
}
if 0 == _pagerFlushOnCommit(tls, pPager, int32(1)) {
/* If this is an in-memory db, or no pages have been written to, or this
** function has already been called, it is mostly a no-op. However, any
** backup in progress needs to be restarted. */
_sqlite3BackupRestart(tls, (*TPager)(unsafe.Pointer(pPager)).FpBackup)
} else {
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
pList = _sqlite3PcacheDirtyList(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
if pList == uintptr(0) {
/* Must have at least one page for the WAL commit flag.
** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */
rc = _sqlite3PagerGet(tls, pPager, uint32(1), bp, 0)
pList = *(*uintptr)(unsafe.Pointer(bp))
(*TPgHdr)(unsafe.Pointer(pList)).FpDirty = uintptr(0)
}
if pList != 0 {
rc = _pagerWalFrames(tls, pPager, pList, (*TPager)(unsafe.Pointer(pPager)).FdbSize, int32(1))
}
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc == SQLITE_OK {
_sqlite3PcacheCleanAll(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
}
} else {
/* The bBatch boolean is true if the batch-atomic-write commit method
** should be used. No rollback journal is created if batch-atomic-write
** is enabled.
*/
rc = _pager_incr_changecounter(tls, pPager, 0)
if rc != SQLITE_OK {
goto commit_phase_one_exit
}
/* Write the super-journal name into the journal file. If a
** super-journal file name has already been written to the journal file,
** or if zSuper is NULL (no super-journal), then this call is a no-op.
*/
rc = _writeSuperJournal(tls, pPager, zSuper)
if rc != SQLITE_OK {
goto commit_phase_one_exit
}
/* Sync the journal file and write all dirty pages to the database.
** If the atomic-update optimization is being used, this sync will not
** create the journal file or perform any real IO.
**
** Because the change-counter page was just modified, unless the
** atomic-update optimization is used it is almost certain that the
** journal requires a sync here. However, in locking_mode=exclusive
** on a system under memory pressure it is just possible that this is
** not the case. In this case it is likely enough that the redundant
** xSync() call will be changed to a no-op by the OS anyhow.
*/
rc = _syncJournal(tls, pPager, 0)
if rc != SQLITE_OK {
goto commit_phase_one_exit
}
pList = _sqlite3PcacheDirtyList(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
if true {
rc = _pager_write_pagelist(tls, pPager, pList)
}
if rc != SQLITE_OK {
goto commit_phase_one_exit
}
_sqlite3PcacheCleanAll(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)
/* If the file on disk is smaller than the database image, use
** pager_truncate to grow the file here. This can happen if the database
** image was extended as part of the current transaction and then the
** last page in the db image moved to the free-list. In this case the
** last page is never written out to disk, leaving the database file
** undersized. Fix this now if it is the case. */
if (*TPager)(unsafe.Pointer(pPager)).FdbSize > (*TPager)(unsafe.Pointer(pPager)).FdbFileSize {
nNew = (*TPager)(unsafe.Pointer(pPager)).FdbSize - libc.BoolUint32((*TPager)(unsafe.Pointer(pPager)).FdbSize == (*TPager)(unsafe.Pointer(pPager)).FlckPgno)
rc = _pager_truncate(tls, pPager, nNew)
if rc != SQLITE_OK {
goto commit_phase_one_exit
}
}
/* Finally, sync the database file. */
if !(noSync != 0) {
rc = _sqlite3PagerSync(tls, pPager, zSuper)
}
}
}
goto commit_phase_one_exit
commit_phase_one_exit:
;
if rc == SQLITE_OK && !((*TPager)(unsafe.Pointer(pPager)).FpWal != libc.UintptrFromInt32(0)) {
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_WRITER_FINISHED)
}
return rc
}
// C documentation
//
// /*
// ** When this function is called, the database file has been completely
// ** updated to reflect the changes made by the current transaction and
// ** synced to disk. The journal file still exists in the file-system
// ** though, and if a failure occurs at this point it will eventually
// ** be used as a hot-journal and the current transaction rolled back.
// **
// ** This function finalizes the journal file, either by deleting,
// ** truncating or partially zeroing it, so that it cannot be used
// ** for hot-journal rollback. Once this is done the transaction is
// ** irrevocably committed.
// **
// ** If an error occurs, an IO error code is returned and the pager
// ** moves into the error state. Otherwise, SQLITE_OK is returned.
// */
func _sqlite3PagerCommitPhaseTwo(tls *libc.TLS, pPager uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK /* Return code */
/* This routine should not be called if a prior error has occurred.
** But if (due to a coding error elsewhere in the system) it does get
** called, just return the same error code without doing anything. */
if (*TPager)(unsafe.Pointer(pPager)).FerrCode != 0 {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
(*TPager)(unsafe.Pointer(pPager)).FiDataVersion++
/* An optimization. If the database was not actually modified during
** this transaction, the pager is running in exclusive-mode and is
** using persistent journals, then this function is a no-op.
**
** The start of the journal file currently contains a single journal
** header with the nRec field set to 0. If such a journal is used as
** a hot-journal during hot-journal rollback, 0 changes will be made
** to the database file. So there is no need to zero the journal
** header. Since the pager is in exclusive mode, there is no need
** to drop any locks either.
*/
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_WRITER_LOCKED) && (*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0 && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_PERSIST) {
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_READER)
return SQLITE_OK
}
rc = _pager_end_transaction(tls, pPager, int32((*TPager)(unsafe.Pointer(pPager)).FsetSuper), int32(1))
return _pager_error(tls, pPager, rc)
}
// C documentation
//
// /*
// ** If a write transaction is open, then all changes made within the
// ** transaction are reverted and the current write-transaction is closed.
// ** The pager falls back to PAGER_READER state if successful, or PAGER_ERROR
// ** state if an error occurs.
// **
// ** If the pager is already in PAGER_ERROR state when this function is called,
// ** it returns Pager.errCode immediately. No work is performed in this case.
// **
// ** Otherwise, in rollback mode, this function performs two functions:
// **
// ** 1) It rolls back the journal file, restoring all database file and
// ** in-memory cache pages to the state they were in when the transaction
// ** was opened, and
// **
// ** 2) It finalizes the journal file, so that it is not used for hot
// ** rollback at any point in the future.
// **
// ** Finalization of the journal file (task 2) is only performed if the
// ** rollback is successful.
// **
// ** In WAL mode, all cache-entries containing data modified within the
// ** current transaction are either expelled from the cache or reverted to
// ** their pre-transaction state by re-reading data from the database or
// ** WAL files. The WAL transaction is then closed.
// */
func _sqlite3PagerRollback(tls *libc.TLS, pPager uintptr) (r int32) {
var eState, rc, rc2 int32
_, _, _ = eState, rc, rc2
rc = SQLITE_OK /* Return code */
/* PagerRollback() is a no-op if called in READER or OPEN state. If
** the pager is already in the ERROR state, the rollback is not
** attempted here. Instead, the error code is returned to the caller.
*/
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_ERROR) {
return (*TPager)(unsafe.Pointer(pPager)).FerrCode
}
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) <= int32(PAGER_READER) {
return SQLITE_OK
}
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
rc = _sqlite3PagerSavepoint(tls, pPager, int32(SAVEPOINT_ROLLBACK), -int32(1))
rc2 = _pager_end_transaction(tls, pPager, int32((*TPager)(unsafe.Pointer(pPager)).FsetSuper), 0)
if rc == SQLITE_OK {
rc = rc2
}
} else {
if !((*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != libc.UintptrFromInt32(0)) || int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_WRITER_LOCKED) {
eState = int32((*TPager)(unsafe.Pointer(pPager)).FeState)
rc = _pager_end_transaction(tls, pPager, 0, 0)
if !((*TPager)(unsafe.Pointer(pPager)).FmemDb != 0) && eState > int32(PAGER_WRITER_LOCKED) {
/* This can happen using journal_mode=off. Move the pager to the error
** state to indicate that the contents of the cache may not be trusted.
** Any active readers will get SQLITE_ABORT.
*/
(*TPager)(unsafe.Pointer(pPager)).FerrCode = int32(SQLITE_ABORT)
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_ERROR)
_setGetterMethod(tls, pPager)
return rc
}
} else {
rc = _pager_playback(tls, pPager, 0)
}
}
/* If an error occurs during a ROLLBACK, we can no longer trust the pager
** cache. So call pager_error() on the way out to make any error persistent.
*/
return _pager_error(tls, pPager, rc)
}
// C documentation
//
// /*
// ** Return TRUE if the database file is opened read-only. Return FALSE
// ** if the database is (in theory) writable.
// */
func _sqlite3PagerIsreadonly(tls *libc.TLS, pPager uintptr) (r Tu8) {
return (*TPager)(unsafe.Pointer(pPager)).FreadOnly
}
// C documentation
//
// /*
// ** Return the approximate number of bytes of memory currently
// ** used by the pager and its associated cache.
// */
func _sqlite3PagerMemUsed(tls *libc.TLS, pPager uintptr) (r int32) {
var perPageSize int32
_ = perPageSize
perPageSize = int32((*TPager)(unsafe.Pointer(pPager)).FpageSize + int64((*TPager)(unsafe.Pointer(pPager)).FnExtra) + int64(int32(libc.Uint64FromInt64(80)+libc.Uint64FromInt32(5)*libc.Uint64FromInt64(8))))
return int32(int64(perPageSize*_sqlite3PcachePagecount(tls, (*TPager)(unsafe.Pointer(pPager)).FpPCache)+_sqlite3MallocSize(tls, pPager)) + (*TPager)(unsafe.Pointer(pPager)).FpageSize)
}
// C documentation
//
// /*
// ** Return the number of references to the specified page.
// */
func _sqlite3PagerPageRefcount(tls *libc.TLS, pPage uintptr) (r int32) {
return int32(_sqlite3PcachePageRefcount(tls, pPage))
}
// C documentation
//
// /*
// ** Parameter eStat must be one of SQLITE_DBSTATUS_CACHE_HIT, _MISS, _WRITE,
// ** or _WRITE+1. The SQLITE_DBSTATUS_CACHE_WRITE+1 case is a translation
// ** of SQLITE_DBSTATUS_CACHE_SPILL. The _SPILL case is not contiguous because
// ** it was added later.
// **
// ** Before returning, *pnVal is incremented by the
// ** current cache hit or miss count, according to the value of eStat. If the
// ** reset parameter is non-zero, the cache hit or miss count is zeroed before
// ** returning.
// */
func _sqlite3PagerCacheStat(tls *libc.TLS, pPager uintptr, eStat int32, reset int32, pnVal uintptr) {
eStat -= int32(SQLITE_DBSTATUS_CACHE_HIT)
*(*Tu64)(unsafe.Pointer(pnVal)) += uint64(*(*Tu32)(unsafe.Pointer(pPager + 248 + uintptr(eStat)*4)))
if reset != 0 {
*(*Tu32)(unsafe.Pointer(pPager + 248 + uintptr(eStat)*4)) = uint32(0)
}
}
// C documentation
//
// /*
// ** Return true if this is an in-memory or temp-file backed pager.
// */
func _sqlite3PagerIsMemdb(tls *libc.TLS, pPager uintptr) (r int32) {
return libc.BoolInt32((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 || (*TPager)(unsafe.Pointer(pPager)).FmemVfs != 0)
}
// C documentation
//
// /*
// ** Check that there are at least nSavepoint savepoints open. If there are
// ** currently less than nSavepoints open, then open one or more savepoints
// ** to make up the difference. If the number of savepoints is already
// ** equal to nSavepoint, then this function is a no-op.
// **
// ** If a memory allocation fails, SQLITE_NOMEM is returned. If an error
// ** occurs while opening the sub-journal file, then an IO error code is
// ** returned. Otherwise, SQLITE_OK.
// */
func _pagerOpenSavepoint(tls *libc.TLS, pPager uintptr, nSavepoint int32) (r int32) {
var aNew uintptr
var ii, nCurrent, rc int32
_, _, _, _ = aNew, ii, nCurrent, rc
rc = SQLITE_OK /* Return code */
nCurrent = (*TPager)(unsafe.Pointer(pPager)).FnSavepoint /* New Pager.aSavepoint array */
/* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM
** if the allocation fails. Otherwise, zero the new portion in case a
** malloc failure occurs while populating it in the for(...) loop below.
*/
aNew = _sqlite3Realloc(tls, (*TPager)(unsafe.Pointer(pPager)).FaSavepoint, uint64(56)*uint64(nSavepoint))
if !(aNew != 0) {
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, aNew+uintptr(nCurrent)*56, 0, uint64(nSavepoint-nCurrent)*uint64(56))
(*TPager)(unsafe.Pointer(pPager)).FaSavepoint = aNew
/* Populate the PagerSavepoint structures just allocated. */
ii = nCurrent
for {
if !(ii < nSavepoint) {
break
}
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FnOrig = (*TPager)(unsafe.Pointer(pPager)).FdbSize
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) && (*TPager)(unsafe.Pointer(pPager)).FjournalOff > 0 {
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FiOffset = (*TPager)(unsafe.Pointer(pPager)).FjournalOff
} else {
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FiOffset = int64((*TPager)(unsafe.Pointer(pPager)).FsectorSize)
}
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FiSubRec = (*TPager)(unsafe.Pointer(pPager)).FnSubRec
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FpInSavepoint = _sqlite3BitvecCreate(tls, (*TPager)(unsafe.Pointer(pPager)).FdbSize)
(*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FbTruncateOnRelease = int32(1)
if !((*(*TPagerSavepoint)(unsafe.Pointer(aNew + uintptr(ii)*56))).FpInSavepoint != 0) {
return int32(SQLITE_NOMEM)
}
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) {
_sqlite3WalSavepoint(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, aNew+uintptr(ii)*56+36)
}
(*TPager)(unsafe.Pointer(pPager)).FnSavepoint = ii + int32(1)
goto _1
_1:
;
ii++
}
return rc
}
func _sqlite3PagerOpenSavepoint(tls *libc.TLS, pPager uintptr, nSavepoint int32) (r int32) {
if nSavepoint > (*TPager)(unsafe.Pointer(pPager)).FnSavepoint && (*TPager)(unsafe.Pointer(pPager)).FuseJournal != 0 {
return _pagerOpenSavepoint(tls, pPager, nSavepoint)
} else {
return SQLITE_OK
}
return r
}
// C documentation
//
// /*
// ** This function is called to rollback or release (commit) a savepoint.
// ** The savepoint to release or rollback need not be the most recently
// ** created savepoint.
// **
// ** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE.
// ** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with
// ** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes
// ** that have occurred since the specified savepoint was created.
// **
// ** The savepoint to rollback or release is identified by parameter
// ** iSavepoint. A value of 0 means to operate on the outermost savepoint
// ** (the first created). A value of (Pager.nSavepoint-1) means operate
// ** on the most recently created savepoint. If iSavepoint is greater than
// ** (Pager.nSavepoint-1), then this function is a no-op.
// **
// ** If a negative value is passed to this function, then the current
// ** transaction is rolled back. This is different to calling
// ** sqlite3PagerRollback() because this function does not terminate
// ** the transaction or unlock the database, it just restores the
// ** contents of the database to its original state.
// **
// ** In any case, all savepoints with an index greater than iSavepoint
// ** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE),
// ** then savepoint iSavepoint is also destroyed.
// **
// ** This function may return SQLITE_NOMEM if a memory allocation fails,
// ** or an IO error code if an IO error occurs while rolling back a
// ** savepoint. If no errors occur, SQLITE_OK is returned.
// */
func _sqlite3PagerSavepoint(tls *libc.TLS, pPager uintptr, op int32, iSavepoint int32) (r int32) {
var ii, nNew, rc, v1 int32
var pRel, pSavepoint, v3 uintptr
var sz Ti64
_, _, _, _, _, _, _, _ = ii, nNew, pRel, pSavepoint, rc, sz, v1, v3
rc = (*TPager)(unsafe.Pointer(pPager)).FerrCode
if rc == SQLITE_OK && iSavepoint < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint { /* Number of remaining savepoints after this op. */
/* Figure out how many savepoints will still be active after this
** operation. Store this value in nNew. Then free resources associated
** with any savepoints that are destroyed by this operation.
*/
if op == int32(SAVEPOINT_RELEASE) {
v1 = 0
} else {
v1 = int32(1)
}
nNew = iSavepoint + v1
ii = nNew
for {
if !(ii < (*TPager)(unsafe.Pointer(pPager)).FnSavepoint) {
break
}
_sqlite3BitvecDestroy(tls, (*(*TPagerSavepoint)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(ii)*56))).FpInSavepoint)
goto _2
_2:
;
ii++
}
(*TPager)(unsafe.Pointer(pPager)).FnSavepoint = nNew
/* Truncate the sub-journal so that it only includes the parts
** that are still in use. */
if op == int32(SAVEPOINT_RELEASE) {
pRel = (*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(nNew)*56
if (*TPagerSavepoint)(unsafe.Pointer(pRel)).FbTruncateOnRelease != 0 && (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fsjfd)).FpMethods != uintptr(0) {
/* Only truncate if it is an in-memory sub-journal. */
if _sqlite3JournalIsInMemory(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd) != 0 {
sz = ((*TPager)(unsafe.Pointer(pPager)).FpageSize + int64(4)) * int64((*TPagerSavepoint)(unsafe.Pointer(pRel)).FiSubRec)
rc = _sqlite3OsTruncate(tls, (*TPager)(unsafe.Pointer(pPager)).Fsjfd, sz)
}
(*TPager)(unsafe.Pointer(pPager)).FnSubRec = (*TPagerSavepoint)(unsafe.Pointer(pRel)).FiSubRec
}
} else {
if (*TPager)(unsafe.Pointer(pPager)).FpWal != uintptr(0) || (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) {
if nNew == 0 {
v3 = uintptr(0)
} else {
v3 = (*TPager)(unsafe.Pointer(pPager)).FaSavepoint + uintptr(nNew-int32(1))*56
}
pSavepoint = v3
rc = _pagerPlaybackSavepoint(tls, pPager, pSavepoint)
}
}
}
return rc
}
// C documentation
//
// /*
// ** Return the full pathname of the database file.
// **
// ** Except, if the pager is in-memory only, then return an empty string if
// ** nullIfMemDb is true. This routine is called with nullIfMemDb==1 when
// ** used to report the filename to the user, for compatibility with legacy
// ** behavior. But when the Btree needs to know the filename for matching to
// ** shared cache, it uses nullIfMemDb==0 so that in-memory databases can
// ** participate in shared-cache.
// **
// ** The return value to this routine is always safe to use with
// ** sqlite3_uri_parameter() and sqlite3_filename_database() and friends.
// */
func _sqlite3PagerFilename(tls *libc.TLS, pPager uintptr, nullIfMemDb int32) (r uintptr) {
if nullIfMemDb != 0 && ((*TPager)(unsafe.Pointer(pPager)).FmemDb != 0 || _sqlite3IsMemdb(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs) != 0) {
return uintptr(unsafe.Pointer(&_zFake)) + 4
} else {
return (*TPager)(unsafe.Pointer(pPager)).FzFilename
}
return r
}
var _zFake = [8]int8{}
// C documentation
//
// /*
// ** Return the VFS structure for the pager.
// */
func _sqlite3PagerVfs(tls *libc.TLS, pPager uintptr) (r uintptr) {
return (*TPager)(unsafe.Pointer(pPager)).FpVfs
}
// C documentation
//
// /*
// ** Return the file handle for the database file associated
// ** with the pager. This might return NULL if the file has
// ** not yet been opened.
// */
func _sqlite3PagerFile(tls *libc.TLS, pPager uintptr) (r uintptr) {
return (*TPager)(unsafe.Pointer(pPager)).Ffd
}
// C documentation
//
// /*
// ** Return the file handle for the journal file (if it exists).
// ** This will be either the rollback journal or the WAL file.
// */
func _sqlite3PagerJrnlFile(tls *libc.TLS, pPager uintptr) (r uintptr) {
var v1 uintptr
_ = v1
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
v1 = _sqlite3WalFile(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
} else {
v1 = (*TPager)(unsafe.Pointer(pPager)).Fjfd
}
return v1
}
// C documentation
//
// /*
// ** Return the full pathname of the journal file.
// */
func _sqlite3PagerJournalname(tls *libc.TLS, pPager uintptr) (r uintptr) {
return (*TPager)(unsafe.Pointer(pPager)).FzJournal
}
// C documentation
//
// /*
// ** Move the page pPg to location pgno in the file.
// **
// ** There must be no references to the page previously located at
// ** pgno (which we call pPgOld) though that page is allowed to be
// ** in cache. If the page previously located at pgno is not already
// ** in the rollback journal, it is not put there by by this routine.
// **
// ** References to the page pPg remain valid. Updating any
// ** meta-data associated with pPg (i.e. data stored in the nExtra bytes
// ** allocated along with the page) is the responsibility of the caller.
// **
// ** A transaction must be active when this routine is called. It used to be
// ** required that a statement transaction was not active, but this restriction
// ** has been removed (CREATE INDEX needs to move a page when a statement
// ** transaction is active).
// **
// ** If the fourth argument, isCommit, is non-zero, then this page is being
// ** moved as part of a database reorganization just before the transaction
// ** is being committed. In this case, it is guaranteed that the database page
// ** pPg refers to will not be written to again within this transaction.
// **
// ** This function may return SQLITE_NOMEM or an IO error code if an error
// ** occurs. Otherwise, it returns SQLITE_OK.
// */
func _sqlite3PagerMovepage(tls *libc.TLS, pPager uintptr, pPg uintptr, pgno TPgno, isCommit int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var needSyncPgno, origPgno TPgno
var pPgOld, p3, p4, p5 uintptr
var rc, v1 int32
var v2 bool
var _ /* pPgHdr at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _ = needSyncPgno, origPgno, pPgOld, rc, v1, v2, p3, p4, p5 /* The page being overwritten. */
needSyncPgno = uint32(0) /* The original page number */
/* In order to be able to rollback, an in-memory database must journal
** the page we are moving from.
*/
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
rc = _sqlite3PagerWrite(tls, pPg)
if rc != 0 {
return rc
}
}
/* If the page being moved is dirty and has not been saved by the latest
** savepoint, then save the current contents of the page into the
** sub-journal now. This is required to handle the following scenario:
**
** BEGIN;
**
** SAVEPOINT one;
**
** ROLLBACK TO one;
**
** If page X were not written to the sub-journal here, it would not
** be possible to restore its contents when the "ROLLBACK TO one"
** statement were is processed.
**
** subjournalPage() may need to allocate space to store pPg->pgno into
** one or more savepoint bitvecs. This is the reason this function
** may return SQLITE_NOMEM.
*/
if v2 = int32((*TDbPage)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_DIRTY) != 0; v2 {
v1 = _subjournalPageIfRequired(tls, pPg)
rc = v1
}
if v2 && SQLITE_OK != v1 {
return rc
}
/* If the journal needs to be sync()ed before page pPg->pgno can
** be written to, store pPg->pgno in local variable needSyncPgno.
**
** If the isCommit flag is set, there is no need to remember that
** the journal needs to be sync()ed before database page pPg->pgno
** can be written to. The caller has already promised not to write to it.
*/
if int32((*TDbPage)(unsafe.Pointer(pPg)).Fflags)&int32(PGHDR_NEED_SYNC) != 0 && !(isCommit != 0) {
needSyncPgno = (*TDbPage)(unsafe.Pointer(pPg)).Fpgno
}
/* If the cache contains a page with page-number pgno, remove it
** from its hash chain. Also, if the PGHDR_NEED_SYNC flag was set for
** page pgno before the 'move' operation, it needs to be retained
** for the page moved there.
*/
p3 = pPg + 52
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(PGHDR_NEED_SYNC))
pPgOld = _sqlite3PagerLookup(tls, pPager, pgno)
if pPgOld != 0 {
if (*TPgHdr)(unsafe.Pointer(pPgOld)).FnRef > int64(1) {
_sqlite3PagerUnrefNotNull(tls, pPgOld)
return _sqlite3CorruptError(tls, int32(63973))
}
p4 = pPg + 52
*(*Tu16)(unsafe.Pointer(p4)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p4))) | int32((*TPgHdr)(unsafe.Pointer(pPgOld)).Fflags)&libc.Int32FromInt32(PGHDR_NEED_SYNC))
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 {
/* Do not discard pages from an in-memory database since we might
** need to rollback later. Just move the page out of the way. */
_sqlite3PcacheMove(tls, pPgOld, (*TPager)(unsafe.Pointer(pPager)).FdbSize+uint32(1))
} else {
_sqlite3PcacheDrop(tls, pPgOld)
}
}
origPgno = (*TDbPage)(unsafe.Pointer(pPg)).Fpgno
_sqlite3PcacheMove(tls, pPg, pgno)
_sqlite3PcacheMakeDirty(tls, pPg)
/* For an in-memory database, make sure the original page continues
** to exist, in case the transaction needs to roll back. Use pPgOld
** as the original page since it has already been allocated.
*/
if (*TPager)(unsafe.Pointer(pPager)).FtempFile != 0 && pPgOld != 0 {
_sqlite3PcacheMove(tls, pPgOld, origPgno)
_sqlite3PagerUnrefNotNull(tls, pPgOld)
}
if needSyncPgno != 0 {
rc = _sqlite3PagerGet(tls, pPager, needSyncPgno, bp, 0)
if rc != SQLITE_OK {
if needSyncPgno <= (*TPager)(unsafe.Pointer(pPager)).FdbOrigSize {
_sqlite3BitvecClear(tls, (*TPager)(unsafe.Pointer(pPager)).FpInJournal, needSyncPgno, (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace)
}
return rc
}
p5 = *(*uintptr)(unsafe.Pointer(bp)) + 52
*(*Tu16)(unsafe.Pointer(p5)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p5))) | libc.Int32FromInt32(PGHDR_NEED_SYNC))
_sqlite3PcacheMakeDirty(tls, *(*uintptr)(unsafe.Pointer(bp)))
_sqlite3PagerUnrefNotNull(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** The page handle passed as the first argument refers to a dirty page
// ** with a page number other than iNew. This function changes the page's
// ** page number to iNew and sets the value of the PgHdr.flags field to
// ** the value passed as the third parameter.
// */
func _sqlite3PagerRekey(tls *libc.TLS, pPg uintptr, iNew TPgno, flags Tu16) {
(*TDbPage)(unsafe.Pointer(pPg)).Fflags = flags
_sqlite3PcacheMove(tls, pPg, iNew)
}
// C documentation
//
// /*
// ** Return a pointer to the data for the specified page.
// */
func _sqlite3PagerGetData(tls *libc.TLS, pPg uintptr) (r uintptr) {
return (*TDbPage)(unsafe.Pointer(pPg)).FpData
}
// C documentation
//
// /*
// ** Return a pointer to the Pager.nExtra bytes of "extra" space
// ** allocated along with the specified page.
// */
func _sqlite3PagerGetExtra(tls *libc.TLS, pPg uintptr) (r uintptr) {
return (*TDbPage)(unsafe.Pointer(pPg)).FpExtra
}
// C documentation
//
// /*
// ** Get/set the locking-mode for this pager. Parameter eMode must be one
// ** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or
// ** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then
// ** the locking-mode is set to the value specified.
// **
// ** The returned value is either PAGER_LOCKINGMODE_NORMAL or
// ** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated)
// ** locking-mode.
// */
func _sqlite3PagerLockingMode(tls *libc.TLS, pPager uintptr, eMode int32) (r int32) {
if eMode >= 0 && !((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0) && !(_sqlite3WalHeapMemory(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal) != 0) {
(*TPager)(unsafe.Pointer(pPager)).FexclusiveMode = uint8(eMode)
}
return int32((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode)
}
// C documentation
//
// /*
// ** Set the journal-mode for this pager. Parameter eMode must be one of:
// **
// ** PAGER_JOURNALMODE_DELETE
// ** PAGER_JOURNALMODE_TRUNCATE
// ** PAGER_JOURNALMODE_PERSIST
// ** PAGER_JOURNALMODE_OFF
// ** PAGER_JOURNALMODE_MEMORY
// ** PAGER_JOURNALMODE_WAL
// **
// ** The journalmode is set to the value specified if the change is allowed.
// ** The change may be disallowed for the following reasons:
// **
// ** * An in-memory database can only have its journal_mode set to _OFF
// ** or _MEMORY.
// **
// ** * Temporary databases cannot have _WAL journalmode.
// **
// ** The returned indicate the current (possibly updated) journal-mode.
// */
func _sqlite3PagerSetJournalMode(tls *libc.TLS, pPager uintptr, eMode int32) (r int32) {
var eOld Tu8
var rc, state int32
_, _, _ = eOld, rc, state
eOld = (*TPager)(unsafe.Pointer(pPager)).FjournalMode /* Prior journalmode */
/* The eMode parameter is always valid */
/* This routine is only called from the OP_JournalMode opcode, and
** the logic there will never allow a temporary file to be changed
** to WAL mode.
*/
/* Do allow the journalmode of an in-memory database to be set to
** anything other than MEMORY or OFF
*/
if (*TPager)(unsafe.Pointer(pPager)).FmemDb != 0 {
if eMode != int32(PAGER_JOURNALMODE_MEMORY) && eMode != int32(PAGER_JOURNALMODE_OFF) {
eMode = int32(eOld)
}
}
if eMode != int32(eOld) {
/* Change the journal mode. */
(*TPager)(unsafe.Pointer(pPager)).FjournalMode = uint8(eMode)
/* When transitioning from TRUNCATE or PERSIST to any other journal
** mode except WAL, unless the pager is in locking_mode=exclusive mode,
** delete the journal file.
*/
if !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) && int32(eOld)&int32(5) == int32(1) && eMode&int32(1) == 0 {
/* In this case we would like to delete the journal file. If it is
** not possible, then that is not a problem. Deleting the journal file
** here is an optimization only.
**
** Before deleting the journal file, obtain a RESERVED lock on the
** database file. This ensures that the journal file is not deleted
** while it is in use by some other client.
*/
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
if int32((*TPager)(unsafe.Pointer(pPager)).FeLock) >= int32(RESERVED_LOCK) {
_sqlite3OsDelete(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, 0)
} else {
rc = SQLITE_OK
state = int32((*TPager)(unsafe.Pointer(pPager)).FeState)
if state == PAGER_OPEN {
rc = _sqlite3PagerSharedLock(tls, pPager)
}
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) == int32(PAGER_READER) {
rc = _pagerLockDb(tls, pPager, int32(RESERVED_LOCK))
}
if rc == SQLITE_OK {
_sqlite3OsDelete(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzJournal, 0)
}
if rc == SQLITE_OK && state == int32(PAGER_READER) {
_pagerUnlockDb(tls, pPager, int32(SHARED_LOCK))
} else {
if state == PAGER_OPEN {
_pager_unlock(tls, pPager)
}
}
}
} else {
if eMode == int32(PAGER_JOURNALMODE_OFF) || eMode == int32(PAGER_JOURNALMODE_MEMORY) {
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
}
}
}
/* Return the new journal mode */
return int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode)
}
// C documentation
//
// /*
// ** Return the current journal mode.
// */
func _sqlite3PagerGetJournalMode(tls *libc.TLS, pPager uintptr) (r int32) {
return int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode)
}
// C documentation
//
// /*
// ** Return TRUE if the pager is in a state where it is OK to change the
// ** journalmode. Journalmode changes can only happen when the database
// ** is unmodified.
// */
func _sqlite3PagerOkToChangeJournalMode(tls *libc.TLS, pPager uintptr) (r int32) {
if int32((*TPager)(unsafe.Pointer(pPager)).FeState) >= int32(PAGER_WRITER_CACHEMOD) {
return 0
}
if (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Fjfd)).FpMethods != uintptr(0) && (*TPager)(unsafe.Pointer(pPager)).FjournalOff > 0 {
return 0
}
return int32(1)
}
// C documentation
//
// /*
// ** Get/set the size-limit used for persistent journal files.
// **
// ** Setting the size limit to -1 means no limit is enforced.
// ** An attempt to set a limit smaller than -1 is a no-op.
// */
func _sqlite3PagerJournalSizeLimit(tls *libc.TLS, pPager uintptr, iLimit Ti64) (r Ti64) {
if iLimit >= int64(-int32(1)) {
(*TPager)(unsafe.Pointer(pPager)).FjournalSizeLimit = iLimit
_sqlite3WalLimit(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, iLimit)
}
return (*TPager)(unsafe.Pointer(pPager)).FjournalSizeLimit
}
// C documentation
//
// /*
// ** Return a pointer to the pPager->pBackup variable. The backup module
// ** in backup.c maintains the content of this variable. This module
// ** uses it opaquely as an argument to sqlite3BackupRestart() and
// ** sqlite3BackupUpdate() only.
// */
func _sqlite3PagerBackupPtr(tls *libc.TLS, pPager uintptr) (r uintptr) {
return pPager + 112
}
// C documentation
//
// /*
// ** Unless this is an in-memory or temporary database, clear the pager cache.
// */
func _sqlite3PagerClearCache(tls *libc.TLS, pPager uintptr) {
if int32((*TPager)(unsafe.Pointer(pPager)).FtempFile) == 0 {
_pager_reset(tls, pPager)
}
}
// C documentation
//
// /*
// ** This function is called when the user invokes "PRAGMA wal_checkpoint",
// ** "PRAGMA wal_blocking_checkpoint" or calls the sqlite3_wal_checkpoint()
// ** or wal_blocking_checkpoint() API functions.
// **
// ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART.
// */
func _sqlite3PagerCheckpoint(tls *libc.TLS, pPager uintptr, db uintptr, eMode int32, pnLog uintptr, pnCkpt uintptr) (r int32) {
var rc int32
var v1 uintptr
_, _ = rc, v1
rc = SQLITE_OK
if (*TPager)(unsafe.Pointer(pPager)).FpWal == uintptr(0) && int32((*TPager)(unsafe.Pointer(pPager)).FjournalMode) == int32(PAGER_JOURNALMODE_WAL) {
/* This only happens when a database file is zero bytes in size opened and
** then "PRAGMA journal_mode=WAL" is run and then sqlite3_wal_checkpoint()
** is invoked without any intervening transactions. We need to start
** a transaction to initialize pWal. The PRAGMA table_list statement is
** used for this since it starts transactions on every database file,
** including all ATTACHed databases. This seems expensive for a single
** sqlite3_wal_checkpoint() call, but it happens very rarely.
** https://sqlite.org/forum/forumpost/fd0f19d229156939
*/
Xsqlite3_exec(tls, db, __ccgo_ts+4111, uintptr(0), uintptr(0), uintptr(0))
}
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
if eMode == SQLITE_CHECKPOINT_PASSIVE {
v1 = uintptr(0)
} else {
v1 = (*TPager)(unsafe.Pointer(pPager)).FxBusyHandler
}
rc = _sqlite3WalCheckpoint(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, db, eMode, v1, (*TPager)(unsafe.Pointer(pPager)).FpBusyHandlerArg, int32((*TPager)(unsafe.Pointer(pPager)).FwalSyncFlags), int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace, pnLog, pnCkpt)
}
return rc
}
func _sqlite3PagerWalCallback(tls *libc.TLS, pPager uintptr) (r int32) {
return _sqlite3WalCallback(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
}
// C documentation
//
// /*
// ** Return true if the underlying VFS for the given pager supports the
// ** primitives necessary for write-ahead logging.
// */
func _sqlite3PagerWalSupported(tls *libc.TLS, pPager uintptr) (r int32) {
var pMethods uintptr
_ = pMethods
pMethods = (*Tsqlite3_file)(unsafe.Pointer((*TPager)(unsafe.Pointer(pPager)).Ffd)).FpMethods
if (*TPager)(unsafe.Pointer(pPager)).FnoLock != 0 {
return 0
}
return libc.BoolInt32((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0 || (*Tsqlite3_io_methods)(unsafe.Pointer(pMethods)).FiVersion >= int32(2) && (*Tsqlite3_io_methods)(unsafe.Pointer(pMethods)).FxShmMap != 0)
}
// C documentation
//
// /*
// ** Attempt to take an exclusive lock on the database file. If a PENDING lock
// ** is obtained instead, immediately release it.
// */
func _pagerExclusiveLock(tls *libc.TLS, pPager uintptr) (r int32) {
var eOrigLock Tu8
var rc int32
_, _ = eOrigLock, rc /* Original lock */
eOrigLock = (*TPager)(unsafe.Pointer(pPager)).FeLock
rc = _pagerLockDb(tls, pPager, int32(EXCLUSIVE_LOCK))
if rc != SQLITE_OK {
/* If the attempt to grab the exclusive lock failed, release the
** pending lock that may have been obtained instead. */
_pagerUnlockDb(tls, pPager, int32(eOrigLock))
}
return rc
}
// C documentation
//
// /*
// ** Call sqlite3WalOpen() to open the WAL handle. If the pager is in
// ** exclusive-locking mode when this function is called, take an EXCLUSIVE
// ** lock on the database file and use heap-memory to store the wal-index
// ** in. Otherwise, use the normal shared-memory.
// */
func _pagerOpenWal(tls *libc.TLS, pPager uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
/* If the pager is already in exclusive-mode, the WAL module will use
** heap-memory for the wal-index instead of the VFS shared-memory
** implementation. Take the exclusive lock now, before opening the WAL
** file, to make sure this is safe.
*/
if (*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0 {
rc = _pagerExclusiveLock(tls, pPager)
}
/* Open the connection to the log file. If this operation fails,
** (e.g. due to malloc() failure), return an error code.
*/
if rc == SQLITE_OK {
rc = _sqlite3WalOpen(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).Ffd, (*TPager)(unsafe.Pointer(pPager)).FzWal, int32((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode), (*TPager)(unsafe.Pointer(pPager)).FjournalSizeLimit, pPager+296)
}
_pagerFixMaplimit(tls, pPager)
return rc
}
// C documentation
//
// /*
// ** The caller must be holding a SHARED lock on the database file to call
// ** this function.
// **
// ** If the pager passed as the first argument is open on a real database
// ** file (not a temp file or an in-memory database), and the WAL file
// ** is not already open, make an attempt to open it now. If successful,
// ** return SQLITE_OK. If an error occurs or the VFS used by the pager does
// ** not support the xShmXXX() methods, return an error code. *pbOpen is
// ** not modified in either case.
// **
// ** If the pager is open on a temp-file (or in-memory database), or if
// ** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK
// ** without doing anything.
// */
func _sqlite3PagerOpenWal(tls *libc.TLS, pPager uintptr, pbOpen uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK /* Return code */
if !((*TPager)(unsafe.Pointer(pPager)).FtempFile != 0) && !((*TPager)(unsafe.Pointer(pPager)).FpWal != 0) {
if !(_sqlite3PagerWalSupported(tls, pPager) != 0) {
return int32(SQLITE_CANTOPEN)
}
/* Close any rollback journal previously open */
_sqlite3OsClose(tls, (*TPager)(unsafe.Pointer(pPager)).Fjfd)
rc = _pagerOpenWal(tls, pPager)
if rc == SQLITE_OK {
(*TPager)(unsafe.Pointer(pPager)).FjournalMode = uint8(PAGER_JOURNALMODE_WAL)
(*TPager)(unsafe.Pointer(pPager)).FeState = uint8(PAGER_OPEN)
}
} else {
*(*int32)(unsafe.Pointer(pbOpen)) = int32(1)
}
return rc
}
// C documentation
//
// /*
// ** This function is called to close the connection to the log file prior
// ** to switching from WAL to rollback mode.
// **
// ** Before closing the log file, this function attempts to take an
// ** EXCLUSIVE lock on the database file. If this cannot be obtained, an
// ** error (SQLITE_BUSY) is returned and the log connection is not closed.
// ** If successful, the EXCLUSIVE lock is not released before returning.
// */
func _sqlite3PagerCloseWal(tls *libc.TLS, pPager uintptr, db uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* logexists at bp+0 */ int32
_ = rc
rc = SQLITE_OK
/* If the log file is not already open, but does exist in the file-system,
** it may need to be checkpointed before the connection can switch to
** rollback mode. Open it now so this can happen.
*/
if !((*TPager)(unsafe.Pointer(pPager)).FpWal != 0) {
*(*int32)(unsafe.Pointer(bp)) = 0
rc = _pagerLockDb(tls, pPager, int32(SHARED_LOCK))
if rc == SQLITE_OK {
rc = _sqlite3OsAccess(tls, (*TPager)(unsafe.Pointer(pPager)).FpVfs, (*TPager)(unsafe.Pointer(pPager)).FzWal, SQLITE_ACCESS_EXISTS, bp)
}
if rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp)) != 0 {
rc = _pagerOpenWal(tls, pPager)
}
}
/* Checkpoint and close the log. Because an EXCLUSIVE lock is held on
** the database file, the log and log-summary files will be deleted.
*/
if rc == SQLITE_OK && (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
rc = _pagerExclusiveLock(tls, pPager)
if rc == SQLITE_OK {
rc = _sqlite3WalClose(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, db, int32((*TPager)(unsafe.Pointer(pPager)).FwalSyncFlags), int32((*TPager)(unsafe.Pointer(pPager)).FpageSize), (*TPager)(unsafe.Pointer(pPager)).FpTmpSpace)
(*TPager)(unsafe.Pointer(pPager)).FpWal = uintptr(0)
_pagerFixMaplimit(tls, pPager)
if rc != 0 && !((*TPager)(unsafe.Pointer(pPager)).FexclusiveMode != 0) {
_pagerUnlockDb(tls, pPager, int32(SHARED_LOCK))
}
}
}
return rc
}
// C documentation
//
// /*
// ** If this is a WAL database, obtain a snapshot handle for the snapshot
// ** currently open. Otherwise, return an error.
// */
func _sqlite3PagerSnapshotGet(tls *libc.TLS, pPager uintptr, ppSnapshot uintptr) (r int32) {
var rc int32
_ = rc
rc = int32(SQLITE_ERROR)
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
rc = _sqlite3WalSnapshotGet(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, ppSnapshot)
}
return rc
}
// C documentation
//
// /*
// ** If this is a WAL database, store a pointer to pSnapshot. Next time a
// ** read transaction is opened, attempt to read from the snapshot it
// ** identifies. If this is not a WAL database, return an error.
// */
func _sqlite3PagerSnapshotOpen(tls *libc.TLS, pPager uintptr, pSnapshot uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
_sqlite3WalSnapshotOpen(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, pSnapshot)
} else {
rc = int32(SQLITE_ERROR)
}
return rc
}
// C documentation
//
// /*
// ** If this is a WAL database, call sqlite3WalSnapshotRecover(). If this
// ** is not a WAL database, return an error.
// */
func _sqlite3PagerSnapshotRecover(tls *libc.TLS, pPager uintptr) (r int32) {
var rc int32
_ = rc
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
rc = _sqlite3WalSnapshotRecover(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
} else {
rc = int32(SQLITE_ERROR)
}
return rc
}
// C documentation
//
// /*
// ** The caller currently has a read transaction open on the database.
// ** If this is not a WAL database, SQLITE_ERROR is returned. Otherwise,
// ** this function takes a SHARED lock on the CHECKPOINTER slot and then
// ** checks if the snapshot passed as the second argument is still
// ** available. If so, SQLITE_OK is returned.
// **
// ** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
// ** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
// ** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
// ** lock is released before returning.
// */
func _sqlite3PagerSnapshotCheck(tls *libc.TLS, pPager uintptr, pSnapshot uintptr) (r int32) {
var rc int32
_ = rc
if (*TPager)(unsafe.Pointer(pPager)).FpWal != 0 {
rc = _sqlite3WalSnapshotCheck(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal, pSnapshot)
} else {
rc = int32(SQLITE_ERROR)
}
return rc
}
// C documentation
//
// /*
// ** Release a lock obtained by an earlier successful call to
// ** sqlite3PagerSnapshotCheck().
// */
func _sqlite3PagerSnapshotUnlock(tls *libc.TLS, pPager uintptr) {
_sqlite3WalSnapshotUnlock(tls, (*TPager)(unsafe.Pointer(pPager)).FpWal)
}
/************** End of pager.c ***********************************************/
/************** Begin file wal.c *********************************************/
/*
** 2010 February 1
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains the implementation of a write-ahead log (WAL) used in
** "journal_mode=WAL" mode.
**
** WRITE-AHEAD LOG (WAL) FILE FORMAT
**
** A WAL file consists of a header followed by zero or more "frames".
** Each frame records the revised content of a single page from the
** database file. All changes to the database are recorded by writing
** frames into the WAL. Transactions commit when a frame is written that
** contains a commit marker. A single WAL can and usually does record
** multiple transactions. Periodically, the content of the WAL is
** transferred back into the database file in an operation called a
** "checkpoint".
**
** A single WAL file can be used multiple times. In other words, the
** WAL can fill up with frames and then be checkpointed and then new
** frames can overwrite the old ones. A WAL always grows from beginning
** toward the end. Checksums and counters attached to each frame are
** used to determine which frames within the WAL are valid and which
** are leftovers from prior checkpoints.
**
** The WAL header is 32 bytes in size and consists of the following eight
** big-endian 32-bit unsigned integer values:
**
** 0: Magic number. 0x377f0682 or 0x377f0683
** 4: File format version. Currently 3007000
** 8: Database page size. Example: 1024
** 12: Checkpoint sequence number
** 16: Salt-1, random integer incremented with each checkpoint
** 20: Salt-2, a different random integer changing with each ckpt
** 24: Checksum-1 (first part of checksum for first 24 bytes of header).
** 28: Checksum-2 (second part of checksum for first 24 bytes of header).
**
** Immediately following the wal-header are zero or more frames. Each
** frame consists of a 24-byte frame-header followed by a bytes
** of page data. The frame-header is six big-endian 32-bit unsigned
** integer values, as follows:
**
** 0: Page number.
** 4: For commit records, the size of the database image in pages
** after the commit. For all other records, zero.
** 8: Salt-1 (copied from the header)
** 12: Salt-2 (copied from the header)
** 16: Checksum-1.
** 20: Checksum-2.
**
** A frame is considered valid if and only if the following conditions are
** true:
**
** (1) The salt-1 and salt-2 values in the frame-header match
** salt values in the wal-header
**
** (2) The checksum values in the final 8 bytes of the frame-header
** exactly match the checksum computed consecutively on the
** WAL header and the first 8 bytes and the content of all frames
** up to and including the current frame.
**
** The checksum is computed using 32-bit big-endian integers if the
** magic number in the first 4 bytes of the WAL is 0x377f0683 and it
** is computed using little-endian if the magic number is 0x377f0682.
** The checksum values are always stored in the frame header in a
** big-endian format regardless of which byte order is used to compute
** the checksum. The checksum is computed by interpreting the input as
** an even number of unsigned 32-bit integers: x[0] through x[N]. The
** algorithm used for the checksum is as follows:
**
** for i from 0 to n-1 step 2:
** s0 += x[i] + s1;
** s1 += x[i+1] + s0;
** endfor
**
** Note that s0 and s1 are both weighted checksums using fibonacci weights
** in reverse order (the largest fibonacci weight occurs on the first element
** of the sequence being summed.) The s1 value spans all 32-bit
** terms of the sequence whereas s0 omits the final term.
**
** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the
** WAL is transferred into the database, then the database is VFS.xSync-ed.
** The VFS.xSync operations serve as write barriers - all writes launched
** before the xSync must complete before any write that launches after the
** xSync begins.
**
** After each checkpoint, the salt-1 value is incremented and the salt-2
** value is randomized. This prevents old and new frames in the WAL from
** being considered valid at the same time and being checkpointing together
** following a crash.
**
** READER ALGORITHM
**
** To read a page from the database (call it page number P), a reader
** first checks the WAL to see if it contains page P. If so, then the
** last valid instance of page P that is a followed by a commit frame
** or is a commit frame itself becomes the value read. If the WAL
** contains no copies of page P that are valid and which are a commit
** frame or are followed by a commit frame, then page P is read from
** the database file.
**
** To start a read transaction, the reader records the index of the last
** valid frame in the WAL. The reader uses this recorded "mxFrame" value
** for all subsequent read operations. New transactions can be appended
** to the WAL, but as long as the reader uses its original mxFrame value
** and ignores the newly appended content, it will see a consistent snapshot
** of the database from a single point in time. This technique allows
** multiple concurrent readers to view different versions of the database
** content simultaneously.
**
** The reader algorithm in the previous paragraphs works correctly, but
** because frames for page P can appear anywhere within the WAL, the
** reader has to scan the entire WAL looking for page P frames. If the
** WAL is large (multiple megabytes is typical) that scan can be slow,
** and read performance suffers. To overcome this problem, a separate
** data structure called the wal-index is maintained to expedite the
** search for frames of a particular page.
**
** WAL-INDEX FORMAT
**
** Conceptually, the wal-index is shared memory, though VFS implementations
** might choose to implement the wal-index using a mmapped file. Because
** the wal-index is shared memory, SQLite does not support journal_mode=WAL
** on a network filesystem. All users of the database must be able to
** share memory.
**
** In the default unix and windows implementation, the wal-index is a mmapped
** file whose name is the database name with a "-shm" suffix added. For that
** reason, the wal-index is sometimes called the "shm" file.
**
** The wal-index is transient. After a crash, the wal-index can (and should
** be) reconstructed from the original WAL file. In fact, the VFS is required
** to either truncate or zero the header of the wal-index when the last
** connection to it closes. Because the wal-index is transient, it can
** use an architecture-specific format; it does not have to be cross-platform.
** Hence, unlike the database and WAL file formats which store all values
** as big endian, the wal-index can store multi-byte values in the native
** byte order of the host computer.
**
** The purpose of the wal-index is to answer this question quickly: Given
** a page number P and a maximum frame index M, return the index of the
** last frame in the wal before frame M for page P in the WAL, or return
** NULL if there are no frames for page P in the WAL prior to M.
**
** The wal-index consists of a header region, followed by an one or
** more index blocks.
**
** The wal-index header contains the total number of frames within the WAL
** in the mxFrame field.
**
** Each index block except for the first contains information on
** HASHTABLE_NPAGE frames. The first index block contains information on
** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and
** HASHTABLE_NPAGE are selected so that together the wal-index header and
** first index block are the same size as all other index blocks in the
** wal-index. The values are:
**
** HASHTABLE_NPAGE 4096
** HASHTABLE_NPAGE_ONE 4062
**
** Each index block contains two sections, a page-mapping that contains the
** database page number associated with each wal frame, and a hash-table
** that allows readers to query an index block for a specific page number.
** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE
** for the first index block) 32-bit page numbers. The first entry in the
** first index-block contains the database page number corresponding to the
** first frame in the WAL file. The first entry in the second index block
** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in
** the log, and so on.
**
** The last index block in a wal-index usually contains less than the full
** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers,
** depending on the contents of the WAL file. This does not change the
** allocated size of the page-mapping array - the page-mapping array merely
** contains unused entries.
**
** Even without using the hash table, the last frame for page P
** can be found by scanning the page-mapping sections of each index block
** starting with the last index block and moving toward the first, and
** within each index block, starting at the end and moving toward the
** beginning. The first entry that equals P corresponds to the frame
** holding the content for that page.
**
** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers.
** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the
** hash table for each page number in the mapping section, so the hash
** table is never more than half full. The expected number of collisions
** prior to finding a match is 1. Each entry of the hash table is an
** 1-based index of an entry in the mapping section of the same
** index block. Let K be the 1-based index of the largest entry in
** the mapping section. (For index blocks other than the last, K will
** always be exactly HASHTABLE_NPAGE (4096) and for the last index block
** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table
** contain a value of 0.
**
** To look for page P in the hash table, first compute a hash iKey on
** P as follows:
**
** iKey = (P * 383) % HASHTABLE_NSLOT
**
** Then start scanning entries of the hash table, starting with iKey
** (wrapping around to the beginning when the end of the hash table is
** reached) until an unused hash slot is found. Let the first unused slot
** be at index iUnused. (iUnused might be less than iKey if there was
** wrap-around.) Because the hash table is never more than half full,
** the search is guaranteed to eventually hit an unused entry. Let
** iMax be the value between iKey and iUnused, closest to iUnused,
** where aHash[iMax]==P. If there is no iMax entry (if there exists
** no hash slot such that aHash[i]==p) then page P is not in the
** current index block. Otherwise the iMax-th mapping entry of the
** current index block corresponds to the last entry that references
** page P.
**
** A hash search begins with the last index block and moves toward the
** first index block, looking for entries corresponding to page P. On
** average, only two or three slots in each index block need to be
** examined in order to either find the last entry for page P, or to
** establish that no such entry exists in the block. Each index block
** holds over 4000 entries. So two or three index blocks are sufficient
** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10
** comparisons (on average) suffice to either locate a frame in the
** WAL or to establish that the frame does not exist in the WAL. This
** is much faster than scanning the entire 10MB WAL.
**
** Note that entries are added in order of increasing K. Hence, one
** reader might be using some value K0 and a second reader that started
** at a later time (after additional transactions were added to the WAL
** and to the wal-index) might be using a different value K1, where K1>K0.
** Both readers can use the same hash table and mapping section to get
** the correct result. There may be entries in the hash table with
** K>K0 but to the first reader, those entries will appear to be unused
** slots in the hash table and so the first reader will get an answer as
** if no values greater than K0 had ever been inserted into the hash table
** in the first place - which is what reader one wants. Meanwhile, the
** second reader using K1 will see additional values that were inserted
** later, which is exactly what reader two wants.
**
** When a rollback occurs, the value of K is decreased. Hash table entries
** that correspond to frames greater than the new K value are removed
** from the hash table at this point.
*/
/* #include "wal.h" */
/*
** Trace output macros
*/
/*
** The maximum (and only) versions of the wal and wal-index formats
** that may be interpreted by this version of SQLite.
**
** If a client begins recovering a WAL file and finds that (a) the checksum
** values in the wal-header are correct and (b) the version field is not
** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN.
**
** Similarly, if a client successfully reads a wal-index header (i.e. the
** checksum test is successful) and finds that the version field is not
** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite
** returns SQLITE_CANTOPEN.
*/
/*
** Index numbers for various locking bytes. WAL_NREADER is the number
** of available reader locks and should be at least 3. The default
** is SQLITE_SHM_NLOCK==8 and WAL_NREADER==5.
**
** Technically, the various VFSes are free to implement these locks however
** they see fit. However, compatibility is encouraged so that VFSes can
** interoperate. The standard implementation used on both unix and windows
** is for the index number to indicate a byte offset into the
** WalCkptInfo.aLock[] array in the wal-index header. In other words, all
** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which
** should be 120) is the location in the shm file for the first locking
** byte.
*/
// C documentation
//
// /* Object declarations */
type TWalIndexHdr = struct {
FiVersion Tu32
Funused Tu32
FiChange Tu32
FisInit Tu8
FbigEndCksum Tu8
FszPage Tu16
FmxFrame Tu32
FnPage Tu32
FaFrameCksum [2]Tu32
FaSalt [2]Tu32
FaCksum [2]Tu32
}
type WalIndexHdr = TWalIndexHdr
type TWalIterator = struct {
FiPrior Tu32
FnSegment int32
FaSegment [1]TWalSegment
}
type WalIterator = TWalIterator
type TWalCkptInfo = struct {
FnBackfill Tu32
FaReadMark [5]Tu32
FaLock [8]Tu8
FnBackfillAttempted Tu32
FnotUsed0 Tu32
}
type WalCkptInfo = TWalCkptInfo
/*
** The following object holds a copy of the wal-index header content.
**
** The actual header in the wal-index consists of two copies of this
** object followed by one instance of the WalCkptInfo object.
** For all versions of SQLite through 3.10.0 and probably beyond,
** the locking bytes (WalCkptInfo.aLock) start at offset 120 and
** the total header size is 136 bytes.
**
** The szPage value can be any power of 2 between 512 and 32768, inclusive.
** Or it can be 1 to represent a 65536-byte page. The latter case was
** added in 3.7.1 when support for 64K pages was added.
*/
type TWalIndexHdr1 = struct {
FiVersion Tu32
Funused Tu32
FiChange Tu32
FisInit Tu8
FbigEndCksum Tu8
FszPage Tu16
FmxFrame Tu32
FnPage Tu32
FaFrameCksum [2]Tu32
FaSalt [2]Tu32
FaCksum [2]Tu32
}
type WalIndexHdr1 = TWalIndexHdr1
/*
** A copy of the following object occurs in the wal-index immediately
** following the second copy of the WalIndexHdr. This object stores
** information used by checkpoint.
**
** nBackfill is the number of frames in the WAL that have been written
** back into the database. (We call the act of moving content from WAL to
** database "backfilling".) The nBackfill number is never greater than
** WalIndexHdr.mxFrame. nBackfill can only be increased by threads
** holding the WAL_CKPT_LOCK lock (which includes a recovery thread).
** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from
** mxFrame back to zero when the WAL is reset.
**
** nBackfillAttempted is the largest value of nBackfill that a checkpoint
** has attempted to achieve. Normally nBackfill==nBackfillAtempted, however
** the nBackfillAttempted is set before any backfilling is done and the
** nBackfill is only set after all backfilling completes. So if a checkpoint
** crashes, nBackfillAttempted might be larger than nBackfill. The
** WalIndexHdr.mxFrame must never be less than nBackfillAttempted.
**
** The aLock[] field is a set of bytes used for locking. These bytes should
** never be read or written.
**
** There is one entry in aReadMark[] for each reader lock. If a reader
** holds read-lock K, then the value in aReadMark[K] is no greater than
** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff)
** for any aReadMark[] means that entry is unused. aReadMark[0] is
** a special case; its value is never used and it exists as a place-holder
** to avoid having to offset aReadMark[] indexes by one. Readers holding
** WAL_READ_LOCK(0) always ignore the entire WAL and read all content
** directly from the database.
**
** The value of aReadMark[K] may only be changed by a thread that
** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of
** aReadMark[K] cannot changed while there is a reader is using that mark
** since the reader will be holding a shared lock on WAL_READ_LOCK(K).
**
** The checkpointer may only transfer frames from WAL to database where
** the frame numbers are less than or equal to every aReadMark[] that is
** in use (that is, every aReadMark[j] for which there is a corresponding
** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the
** largest value and will increase an unused aReadMark[] to mxFrame if there
** is not already an aReadMark[] equal to mxFrame. The exception to the
** previous sentence is when nBackfill equals mxFrame (meaning that everything
** in the WAL has been backfilled into the database) then new readers
** will choose aReadMark[0] which has value 0 and hence such reader will
** get all their all content directly from the database file and ignore
** the WAL.
**
** Writers normally append new frames to the end of the WAL. However,
** if nBackfill equals mxFrame (meaning that all WAL content has been
** written back into the database) and if no readers are using the WAL
** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then
** the writer will first "reset" the WAL back to the beginning and start
** writing new content beginning at frame 1.
**
** We assume that 32-bit loads are atomic and so no locks are needed in
** order to read from any aReadMark[] entries.
*/
type TWalCkptInfo1 = struct {
FnBackfill Tu32
FaReadMark [5]Tu32
FaLock [8]Tu8
FnBackfillAttempted Tu32
FnotUsed0 Tu32
}
type WalCkptInfo1 = TWalCkptInfo1
/*
** This is a schematic view of the complete 136-byte header of the
** wal-index file (also known as the -shm file):
**
** +-----------------------------+
** 0: | iVersion | ** +-----------------------------+ |
** 4: | (unused padding) | |
** +-----------------------------+ |
** 8: | iChange | |
** +-------+-------+-------------+ |
** 12: | bInit | bBig | szPage | |
** +-------+-------+-------------+ |
** 16: | mxFrame | | First copy of the
** +-----------------------------+ | WalIndexHdr object
** 20: | nPage | |
** +-----------------------------+ |
** 24: | aFrameCksum | |
** | | |
** +-----------------------------+ |
** 32: | aSalt | |
** | | |
** +-----------------------------+ |
** 40: | aCksum | |
** | | /
** +-----------------------------+
** 48: | iVersion | ** +-----------------------------+ |
** 52: | (unused padding) | |
** +-----------------------------+ |
** 56: | iChange | |
** +-------+-------+-------------+ |
** 60: | bInit | bBig | szPage | |
** +-------+-------+-------------+ | Second copy of the
** 64: | mxFrame | | WalIndexHdr
** +-----------------------------+ |
** 68: | nPage | |
** +-----------------------------+ |
** 72: | aFrameCksum | |
** | | |
** +-----------------------------+ |
** 80: | aSalt | |
** | | |
** +-----------------------------+ |
** 88: | aCksum | |
** | | /
** +-----------------------------+
** 96: | nBackfill |
** +-----------------------------+
** 100: | 5 read marks |
** | |
** | |
** | |
** | |
** +-------+-------+------+------+
** 120: | Write | Ckpt | Rcvr | Rd0 | ** +-------+-------+------+------+ ) 8 lock bytes
** | Read1 | Read2 | Rd3 | Rd4 | /
** +-------+-------+------+------+
** 128: | nBackfillAttempted |
** +-----------------------------+
** 132: | (unused padding) |
** +-----------------------------+
*/
/* A block of WALINDEX_LOCK_RESERVED bytes beginning at
** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems
** only support mandatory file-locks, we do not read or write data
** from the region of the file on which locks are applied.
*/
/* Size of header before each frame in wal */
/* Size of write ahead log header, including checksum. */
/* WAL magic value. Either this value, or the same value with the least
** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit
** big-endian format in the first 4 bytes of a WAL file.
**
** If the LSB is set, then the checksums for each frame within the WAL
** file are calculated by treating all data as an array of 32-bit
** big-endian words. Otherwise, they are calculated by interpreting
** all data as 32-bit little-endian words.
*/
/*
** Return the offset of frame iFrame in the write-ahead log file,
** assuming a database page size of szPage bytes. The offset returned
** is to the start of the write-ahead log frame-header.
*/
/*
** An open write-ahead log file is represented by an instance of the
** following object.
*/
type TWal1 = struct {
FpVfs uintptr
FpDbFd uintptr
FpWalFd uintptr
FiCallback Tu32
FmxWalSize Ti64
FnWiData int32
FszFirstBlock int32
FapWiData uintptr
FszPage Tu32
FreadLock Ti16
FsyncFlags Tu8
FexclusiveMode Tu8
FwriteLock Tu8
FckptLock Tu8
FreadOnly Tu8
FtruncateOnCommit Tu8
FsyncHeader Tu8
FpadToSectorBoundary Tu8
FbShmUnreliable Tu8
Fhdr TWalIndexHdr
FminFrame Tu32
FiReCksum Tu32
FzWalName uintptr
FnCkpt Tu32
FpSnapshot uintptr
}
type Wal1 = TWal1
/*
** Candidate values for Wal.exclusiveMode.
*/
/*
** Possible values for WAL.readOnly
*/
// C documentation
//
// /*
// ** Each page of the wal-index mapping contains a hash-table made up of
// ** an array of HASHTABLE_NSLOT elements of the following type.
// */
type Tht_slot = uint16
type ht_slot = Tht_slot
/*
** This structure is used to implement an iterator that loops through
** all frames in the WAL in database page order. Where two or more frames
** correspond to the same database page, the iterator visits only the
** frame most recently written to the WAL (in other words, the frame with
** the largest index).
**
** The internals of this structure are only accessed by:
**
** walIteratorInit() - Create a new iterator,
** walIteratorNext() - Step an iterator,
** walIteratorFree() - Free an iterator.
**
** This functionality is used by the checkpoint code (see walCheckpoint()).
*/
type TWalIterator1 = struct {
FiPrior Tu32
FnSegment int32
FaSegment [1]TWalSegment
}
type WalIterator1 = TWalIterator1
/*
** Define the parameters of the hash tables in the wal-index file. There
** is a hash-table following every HASHTABLE_NPAGE page numbers in the
** wal-index.
**
** Changing any of these constants will alter the wal-index format and
** create incompatibilities.
*/
/*
** The block of page numbers associated with the first hash-table in a
** wal-index is smaller than usual. This is so that there is a complete
** hash-table on each aligned 32KB page of the wal-index.
*/
/* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */
/*
** Structured Exception Handling (SEH) is a Windows-specific technique
** for catching exceptions raised while accessing memory-mapped files.
**
** The -DSQLITE_USE_SEH compile-time option means to use SEH to catch and
** deal with system-level errors that arise during WAL -shm file processing.
** Without this compile-time option, any system-level faults that appear
** while accessing the memory-mapped -shm file will cause a process-wide
** signal to be deliver, which will more than likely cause the entire
** process to exit.
*/
// C documentation
//
// /*
// ** Obtain a pointer to the iPage'th page of the wal-index. The wal-index
// ** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are
// ** numbered from zero.
// **
// ** If the wal-index is currently smaller the iPage pages then the size
// ** of the wal-index might be increased, but only if it is safe to do
// ** so. It is safe to enlarge the wal-index if pWal->writeLock is true
// ** or pWal->exclusiveMode==WAL_HEAPMEMORY_MODE.
// **
// ** Three possible result scenarios:
// **
// ** (1) rc==SQLITE_OK and *ppPage==Requested-Wal-Index-Page
// ** (2) rc>=SQLITE_ERROR and *ppPage==NULL
// ** (3) rc==SQLITE_OK and *ppPage==NULL // only if iPage==0
// **
// ** Scenario (3) can only occur when pWal->writeLock is false and iPage==0
// */
func _walIndexPageRealloc(tls *libc.TLS, pWal uintptr, iPage int32, ppPage uintptr) (r int32) {
var apNew, p1 uintptr
var nByte Tsqlite3_int64
var rc int32
_, _, _, _ = apNew, nByte, rc, p1
rc = SQLITE_OK
/* Enlarge the pWal->apWiData[] array if required */
if (*TWal)(unsafe.Pointer(pWal)).FnWiData <= iPage {
nByte = int64(uint64(8) * uint64(iPage+libc.Int32FromInt32(1)))
apNew = _sqlite3Realloc(tls, (*TWal)(unsafe.Pointer(pWal)).FapWiData, uint64(nByte))
if !(apNew != 0) {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, apNew+uintptr((*TWal)(unsafe.Pointer(pWal)).FnWiData)*8, 0, uint64(8)*uint64(iPage+libc.Int32FromInt32(1)-(*TWal)(unsafe.Pointer(pWal)).FnWiData))
(*TWal)(unsafe.Pointer(pWal)).FapWiData = apNew
(*TWal)(unsafe.Pointer(pWal)).FnWiData = iPage + int32(1)
}
/* Request a pointer to the required page from the VFS */
if int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) == int32(WAL_HEAPMEMORY_MODE) {
*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPage)*8)) = _sqlite3MallocZero(tls, libc.Uint64FromInt64(2)*uint64(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))+libc.Uint64FromInt32(HASHTABLE_NPAGE)*libc.Uint64FromInt64(4))
if !(*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPage)*8)) != 0) {
rc = int32(SQLITE_NOMEM)
}
} else {
rc = _sqlite3OsShmMap(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, iPage, int32(libc.Uint64FromInt64(2)*uint64(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))+libc.Uint64FromInt32(HASHTABLE_NPAGE)*libc.Uint64FromInt64(4)), int32((*TWal)(unsafe.Pointer(pWal)).FwriteLock), (*TWal)(unsafe.Pointer(pWal)).FapWiData+uintptr(iPage)*8)
if rc == SQLITE_OK {
if iPage > 0 && _sqlite3FaultSim(tls, int32(600)) != 0 {
rc = int32(SQLITE_NOMEM)
}
} else {
if rc&int32(0xff) == int32(SQLITE_READONLY) {
p1 = pWal + 66
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(WAL_SHM_RDONLY))
if rc == int32(SQLITE_READONLY) {
rc = SQLITE_OK
}
}
}
}
*(*uintptr)(unsafe.Pointer(ppPage)) = *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPage)*8))
return rc
}
func _walIndexPage(tls *libc.TLS, pWal uintptr, iPage int32, ppPage uintptr) (r int32) {
var v1 uintptr
var v2 bool
_, _ = v1, v2
if v2 = (*TWal)(unsafe.Pointer(pWal)).FnWiData <= iPage; !v2 {
v1 = *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPage)*8))
*(*uintptr)(unsafe.Pointer(ppPage)) = v1
}
if v2 || v1 == uintptr(0) {
return _walIndexPageRealloc(tls, pWal, iPage, ppPage)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return a pointer to the WalCkptInfo structure in the wal-index.
// */
func _walCkptInfo(tls *libc.TLS, pWal uintptr) (r uintptr) {
return *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData)) + uintptr(libc.Uint64FromInt64(48)/libc.Uint64FromInt32(2))*4
}
// C documentation
//
// /*
// ** Return a pointer to the WalIndexHdr structure in the wal-index.
// */
func _walIndexHdr(tls *libc.TLS, pWal uintptr) (r uintptr) {
return *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData))
}
/*
** The argument to this macro must be of type u32. On a little-endian
** architecture, it returns the u32 value that results from interpreting
** the 4 bytes as a big-endian value. On a big-endian architecture, it
** returns the value that would be produced by interpreting the 4 bytes
** of the input value as a little-endian integer.
*/
// C documentation
//
// /*
// ** Generate or extend an 8 byte checksum based on the data in
// ** array aByte[] and the initial values of aIn[0] and aIn[1] (or
// ** initial values of 0 and 0 if aIn==NULL).
// **
// ** The checksum is written back into aOut[] before returning.
// **
// ** nByte must be a positive multiple of 8.
// */
func _walChecksumBytes(tls *libc.TLS, nativeCksum int32, a uintptr, nByte int32, aIn uintptr, aOut uintptr) {
var aData, aEnd, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v2, v3, v4, v5, v6, v7, v8, v9 uintptr
var s1, s2, v1 Tu32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = aData, aEnd, s1, s2, v1, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v2, v3, v4, v5, v6, v7, v8, v9
aData = a
aEnd = a + uintptr(nByte)
if aIn != 0 {
s1 = *(*Tu32)(unsafe.Pointer(aIn))
s2 = *(*Tu32)(unsafe.Pointer(aIn + 1*4))
} else {
v1 = libc.Uint32FromInt32(0)
s2 = v1
s1 = v1
}
if !(nativeCksum != 0) {
for cond := true; cond; cond = aData < aEnd {
s1 += *(*Tu32)(unsafe.Pointer(aData))&uint32(0x000000FF)<>int32(8) + *(*Tu32)(unsafe.Pointer(aData))&uint32(0xFF000000)>>int32(24) + s2
s2 += *(*Tu32)(unsafe.Pointer(aData + 1*4))&uint32(0x000000FF)<>int32(8) + *(*Tu32)(unsafe.Pointer(aData + 1*4))&uint32(0xFF000000)>>int32(24) + s1
aData += uintptr(2) * 4
}
} else {
if nByte%int32(64) == 0 {
for cond := true; cond; cond = aData < aEnd {
v2 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v2)) + s2
v3 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v3)) + s1
v4 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v4)) + s2
v5 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v5)) + s1
v6 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v6)) + s2
v7 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v7)) + s1
v8 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v8)) + s2
v9 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v9)) + s1
v10 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v10)) + s2
v11 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v11)) + s1
v12 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v12)) + s2
v13 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v13)) + s1
v14 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v14)) + s2
v15 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v15)) + s1
v16 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v16)) + s2
v17 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v17)) + s1
}
} else {
for cond := true; cond; cond = aData < aEnd {
v18 = aData
aData += 4
s1 += *(*Tu32)(unsafe.Pointer(v18)) + s2
v19 = aData
aData += 4
s2 += *(*Tu32)(unsafe.Pointer(v19)) + s1
}
}
}
*(*Tu32)(unsafe.Pointer(aOut)) = s1
*(*Tu32)(unsafe.Pointer(aOut + 1*4)) = s2
}
// C documentation
//
// /*
// ** If there is the possibility of concurrent access to the SHM file
// ** from multiple threads and/or processes, then do a memory barrier.
// */
func _walShmBarrier(tls *libc.TLS, pWal uintptr) {
if int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) != int32(WAL_HEAPMEMORY_MODE) {
_sqlite3OsShmBarrier(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd)
}
}
/*
** Add the SQLITE_NO_TSAN as part of the return-type of a function
** definition as a hint that the function contains constructs that
** might give false-positive TSAN warnings.
**
** See tag-20200519-1.
*/
// C documentation
//
// /*
// ** Write the header information in pWal->hdr into the wal-index.
// **
// ** The checksum on pWal->hdr is updated before it is written.
// */
func _walIndexWriteHdr(tls *libc.TLS, pWal uintptr) {
var aHdr uintptr
var nCksum int32
_, _ = aHdr, nCksum
aHdr = _walIndexHdr(tls, pWal)
nCksum = int32(uint64(libc.UintptrFromInt32(0) + 40))
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FisInit = uint8(1)
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FiVersion = uint32(WALINDEX_MAX_VERSION)
_walChecksumBytes(tls, int32(1), pWal+72, nCksum, uintptr(0), pWal+72+40)
/* Possible TSAN false-positive. See tag-20200519-1 */
libc.Xmemcpy(tls, aHdr+1*48, pWal+72, uint64(48))
_walShmBarrier(tls, pWal)
libc.Xmemcpy(tls, aHdr, pWal+72, uint64(48))
}
// C documentation
//
// /*
// ** This function encodes a single frame header and writes it to a buffer
// ** supplied by the caller. A frame-header is made up of a series of
// ** 4-byte big-endian integers, as follows:
// **
// ** 0: Page number.
// ** 4: For commit records, the size of the database image in pages
// ** after the commit. For all other records, zero.
// ** 8: Salt-1 (copied from the wal-header)
// ** 12: Salt-2 (copied from the wal-header)
// ** 16: Checksum-1.
// ** 20: Checksum-2.
// */
func _walEncodeFrame(tls *libc.TLS, pWal uintptr, iPage Tu32, nTruncate Tu32, aData uintptr, aFrame uintptr) {
var aCksum uintptr
var nativeCksum int32
_, _ = aCksum, nativeCksum /* True for native byte-order checksums */
aCksum = pWal + 72 + 24
_sqlite3Put4byte(tls, aFrame, iPage)
_sqlite3Put4byte(tls, aFrame+4, nTruncate)
if (*TWal)(unsafe.Pointer(pWal)).FiReCksum == uint32(0) {
libc.Xmemcpy(tls, aFrame+8, pWal+72+32, uint64(8))
nativeCksum = libc.BoolInt32(int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FbigEndCksum) == SQLITE_BIGENDIAN)
_walChecksumBytes(tls, nativeCksum, aFrame, int32(8), aCksum, aCksum)
_walChecksumBytes(tls, nativeCksum, aData, int32((*TWal)(unsafe.Pointer(pWal)).FszPage), aCksum, aCksum)
_sqlite3Put4byte(tls, aFrame+16, *(*Tu32)(unsafe.Pointer(aCksum)))
_sqlite3Put4byte(tls, aFrame+20, *(*Tu32)(unsafe.Pointer(aCksum + 1*4)))
} else {
libc.Xmemset(tls, aFrame+8, 0, uint64(16))
}
}
// C documentation
//
// /*
// ** Check to see if the frame with header in aFrame[] and content
// ** in aData[] is valid. If it is a valid frame, fill *piPage and
// ** *pnTruncate and return true. Return if the frame is not valid.
// */
func _walDecodeFrame(tls *libc.TLS, pWal uintptr, piPage uintptr, pnTruncate uintptr, aData uintptr, aFrame uintptr) (r int32) {
var aCksum uintptr
var nativeCksum int32
var pgno Tu32
_, _, _ = aCksum, nativeCksum, pgno /* True for native byte-order checksums */
aCksum = pWal + 72 + 24 /* Page number of the frame */
/* A frame is only valid if the salt values in the frame-header
** match the salt values in the wal-header.
*/
if libc.Xmemcmp(tls, pWal+72+32, aFrame+8, uint64(8)) != 0 {
return 0
}
/* A frame is only valid if the page number is greater than zero.
*/
pgno = _sqlite3Get4byte(tls, aFrame)
if pgno == uint32(0) {
return 0
}
/* A frame is only valid if a checksum of the WAL header,
** all prior frames, the first 16 bytes of this frame-header,
** and the frame-data matches the checksum in the last 8
** bytes of this frame-header.
*/
nativeCksum = libc.BoolInt32(int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FbigEndCksum) == SQLITE_BIGENDIAN)
_walChecksumBytes(tls, nativeCksum, aFrame, int32(8), aCksum, aCksum)
_walChecksumBytes(tls, nativeCksum, aData, int32((*TWal)(unsafe.Pointer(pWal)).FszPage), aCksum, aCksum)
if *(*Tu32)(unsafe.Pointer(aCksum)) != _sqlite3Get4byte(tls, aFrame+16) || *(*Tu32)(unsafe.Pointer(aCksum + 1*4)) != _sqlite3Get4byte(tls, aFrame+20) {
/* Checksum failed. */
return 0
}
/* If we reach this point, the frame is valid. Return the page number
** and the new database size.
*/
*(*Tu32)(unsafe.Pointer(piPage)) = pgno
*(*Tu32)(unsafe.Pointer(pnTruncate)) = _sqlite3Get4byte(tls, aFrame+4)
return int32(1)
}
// C documentation
//
// /*
// ** Set or release locks on the WAL. Locks are either shared or exclusive.
// ** A lock cannot be moved directly between shared and exclusive - it must go
// ** through the unlocked state first.
// **
// ** In locking_mode=EXCLUSIVE, all of these routines become no-ops.
// */
func _walLockShared(tls *libc.TLS, pWal uintptr, lockIdx int32) (r int32) {
var rc int32
_ = rc
if (*TWal)(unsafe.Pointer(pWal)).FexclusiveMode != 0 {
return SQLITE_OK
}
rc = _sqlite3OsShmLock(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, lockIdx, int32(1), libc.Int32FromInt32(SQLITE_SHM_LOCK)|libc.Int32FromInt32(SQLITE_SHM_SHARED))
return rc
}
func _walUnlockShared(tls *libc.TLS, pWal uintptr, lockIdx int32) {
if (*TWal)(unsafe.Pointer(pWal)).FexclusiveMode != 0 {
return
}
_sqlite3OsShmLock(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, lockIdx, int32(1), libc.Int32FromInt32(SQLITE_SHM_UNLOCK)|libc.Int32FromInt32(SQLITE_SHM_SHARED))
}
func _walLockExclusive(tls *libc.TLS, pWal uintptr, lockIdx int32, n int32) (r int32) {
var rc int32
_ = rc
if (*TWal)(unsafe.Pointer(pWal)).FexclusiveMode != 0 {
return SQLITE_OK
}
rc = _sqlite3OsShmLock(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, lockIdx, n, libc.Int32FromInt32(SQLITE_SHM_LOCK)|libc.Int32FromInt32(SQLITE_SHM_EXCLUSIVE))
return rc
}
func _walUnlockExclusive(tls *libc.TLS, pWal uintptr, lockIdx int32, n int32) {
if (*TWal)(unsafe.Pointer(pWal)).FexclusiveMode != 0 {
return
}
_sqlite3OsShmLock(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, lockIdx, n, libc.Int32FromInt32(SQLITE_SHM_UNLOCK)|libc.Int32FromInt32(SQLITE_SHM_EXCLUSIVE))
}
// C documentation
//
// /*
// ** Compute a hash on a page number. The resulting hash value must land
// ** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances
// ** the hash to the next value in the event of a collision.
// */
func _walHash(tls *libc.TLS, iPage Tu32) (r int32) {
return int32(iPage * uint32(HASHTABLE_HASH_1) & uint32(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2)-libc.Int32FromInt32(1)))
}
func _walNextHash(tls *libc.TLS, iPriorHash int32) (r int32) {
return (iPriorHash + int32(1)) & (libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2) - libc.Int32FromInt32(1))
}
// C documentation
//
// /*
// ** An instance of the WalHashLoc object is used to describe the location
// ** of a page hash table in the wal-index. This becomes the return value
// ** from walHashGet().
// */
type TWalHashLoc = struct {
FaHash uintptr
FaPgno uintptr
FiZero Tu32
}
type WalHashLoc = TWalHashLoc
type TWalHashLoc1 = struct {
FaHash uintptr
FaPgno uintptr
FiZero Tu32
}
type WalHashLoc1 = TWalHashLoc1
// C documentation
//
// /*
// ** Return pointers to the hash table and page number array stored on
// ** page iHash of the wal-index. The wal-index is broken into 32KB pages
// ** numbered starting from 0.
// **
// ** Set output variable pLoc->aHash to point to the start of the hash table
// ** in the wal-index file. Set pLoc->iZero to one less than the frame
// ** number of the first frame indexed by this hash table. If a
// ** slot in the hash table is set to N, it refers to frame number
// ** (pLoc->iZero+N) in the log.
// **
// ** Finally, set pLoc->aPgno so that pLoc->aPgno[0] is the page number of the
// ** first frame indexed by the hash table, frame (pLoc->iZero).
// */
func _walHashGet(tls *libc.TLS, pWal uintptr, iHash int32, pLoc uintptr) (r int32) {
var rc int32
_ = rc /* Return code */
rc = _walIndexPage(tls, pWal, iHash, pLoc+8)
if (*TWalHashLoc)(unsafe.Pointer(pLoc)).FaPgno != 0 {
(*TWalHashLoc)(unsafe.Pointer(pLoc)).FaHash = (*TWalHashLoc)(unsafe.Pointer(pLoc)).FaPgno + 4096*4
if iHash == 0 {
(*TWalHashLoc)(unsafe.Pointer(pLoc)).FaPgno = (*TWalHashLoc)(unsafe.Pointer(pLoc)).FaPgno + uintptr((libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4))*4
(*TWalHashLoc)(unsafe.Pointer(pLoc)).FiZero = uint32(0)
} else {
(*TWalHashLoc)(unsafe.Pointer(pLoc)).FiZero = uint32(libc.Uint64FromInt32(HASHTABLE_NPAGE) - (libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4) + uint64((iHash-int32(1))*int32(HASHTABLE_NPAGE)))
}
} else {
if rc == SQLITE_OK {
rc = int32(SQLITE_ERROR)
}
}
return rc
}
// C documentation
//
// /*
// ** Return the number of the wal-index page that contains the hash-table
// ** and page-number array that contain entries corresponding to WAL frame
// ** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages
// ** are numbered starting from 0.
// */
func _walFramePage(tls *libc.TLS, iFrame Tu32) (r int32) {
var iHash int32
_ = iHash
iHash = int32((uint64(iFrame+uint32(HASHTABLE_NPAGE)) - (libc.Uint64FromInt32(HASHTABLE_NPAGE) - (libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4)) - uint64(1)) / uint64(HASHTABLE_NPAGE))
return iHash
}
// C documentation
//
// /*
// ** Return the page number associated with frame iFrame in this WAL.
// */
func _walFramePgno(tls *libc.TLS, pWal uintptr, iFrame Tu32) (r Tu32) {
var iHash int32
_ = iHash
iHash = _walFramePage(tls, iFrame)
if iHash == 0 {
return *(*Tu32)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData)) + uintptr((libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4)+uint64(iFrame)-uint64(1))*4))
}
return *(*Tu32)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iHash)*8)) + uintptr((uint64(iFrame-uint32(1))-(libc.Uint64FromInt32(HASHTABLE_NPAGE)-(libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4)))%uint64(HASHTABLE_NPAGE))*4))
}
// C documentation
//
// /*
// ** Remove entries from the hash table that point to WAL slots greater
// ** than pWal->hdr.mxFrame.
// **
// ** This function is called whenever pWal->hdr.mxFrame is decreased due
// ** to a rollback or savepoint.
// **
// ** At most only the hash table containing pWal->hdr.mxFrame needs to be
// ** updated. Any later hash tables will be automatically cleared when
// ** pWal->hdr.mxFrame advances to the point where those hash tables are
// ** actually needed.
// */
func _walCleanupHash(tls *libc.TLS, pWal uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var i, iLimit, nByte int32
var _ /* sLoc at bp+0 */ TWalHashLoc
_, _, _ = i, iLimit, nByte /* Hash table location */
iLimit = 0 /* Used to iterate through aHash[] */
if (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame == uint32(0) {
return
}
/* Obtain pointers to the hash-table and page-number array containing
** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed
** that the page said hash-table and array reside on is already mapped.(1)
*/
i = _walHashGet(tls, pWal, _walFramePage(tls, (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame), bp)
if i != 0 {
return
} /* Defense-in-depth, in case (1) above is wrong */
/* Zero all hash-table entries that correspond to frame numbers greater
** than pWal->hdr.mxFrame.
*/
iLimit = int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame - (*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero)
i = 0
for {
if !(i < libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2)) {
break
}
if int32(*(*Tht_slot)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash + uintptr(i)*2))) > iLimit {
*(*Tht_slot)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash + uintptr(i)*2)) = uint16(0)
}
goto _1
_1:
;
i++
}
/* Zero the entries in the aPgno array that correspond to frames with
** frame numbers greater than pWal->hdr.mxFrame.
*/
nByte = int32(int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash) - int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno+uintptr(iLimit)*4))
libc.Xmemset(tls, (*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno+uintptr(iLimit)*4, 0, uint64(nByte))
}
// C documentation
//
// /*
// ** Set an entry in the wal-index that will map database page number
// ** pPage into WAL frame iFrame.
// */
func _walIndexAppend(tls *libc.TLS, pWal uintptr, iFrame Tu32, iPage Tu32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iKey, idx, nByte, nCollide, rc, v2 int32
var _ /* sLoc at bp+0 */ TWalHashLoc
_, _, _, _, _, _ = iKey, idx, nByte, nCollide, rc, v2 /* Wal-index hash table location */
rc = _walHashGet(tls, pWal, _walFramePage(tls, iFrame), bp)
/* Assuming the wal-index file was successfully mapped, populate the
** page number array and hash table entry.
*/
if rc == SQLITE_OK { /* Number of hash collisions */
idx = int32(iFrame - (*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero)
/* If this is the first entry to be added to this hash-table, zero the
** entire hash table and aPgno[] array before proceeding.
*/
if idx == int32(1) {
nByte = int32(int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash+uintptr(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))*2) - int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno))
libc.Xmemset(tls, (*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno, 0, uint64(nByte))
}
/* If the entry in aPgno[] is already set, then the previous writer
** must have exited unexpectedly in the middle of a transaction (after
** writing one or more dirty pages to the WAL to free up memory).
** Remove the remnants of that writers uncommitted transaction from
** the hash-table before writing any new entries.
*/
if *(*Tu32)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno + uintptr(idx-int32(1))*4)) != 0 {
_walCleanupHash(tls, pWal)
}
/* Write the aPgno[] array entry and the hash-table slot. */
nCollide = idx
iKey = _walHash(tls, iPage)
for {
if !(*(*Tht_slot)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash + uintptr(iKey)*2)) != 0) {
break
}
v2 = nCollide
nCollide--
if v2 == 0 {
return _sqlite3CorruptError(tls, int32(65899))
}
goto _1
_1:
;
iKey = _walNextHash(tls, iKey)
}
*(*Tu32)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno + uintptr(idx-int32(1))*4)) = iPage
*(*Tht_slot)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash + uintptr(iKey)*2)) = uint16(idx)
}
return rc
}
// C documentation
//
// /*
// ** Recover the wal-index by reading the write-ahead log file.
// **
// ** This routine first tries to establish an exclusive lock on the
// ** wal-index to prevent other threads/processes from doing anything
// ** with the WAL or wal-index while recovery is running. The
// ** WAL_RECOVER_LOCK is also held so that other threads will know
// ** that this thread is running recovery. If unable to establish
// ** the necessary locks, this routine returns SQLITE_BUSY.
// */
func _walIndexRecover(tls *libc.TLS, pWal uintptr) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var aData, aFrame, aPrivate, pInfo uintptr
var aFrameCksum [2]Tu32
var i, iLock, isValid, rc, szFrame, szPage int32
var iFirst, iFrame, iLast, iLastFrame, iPg, magic, nHdr, nHdr32, version Tu32
var iOffset Ti64
var v2, v3, v5 uint64
var _ /* aBuf at bp+8 */ [32]Tu8
var _ /* aShare at bp+40 */ uintptr
var _ /* nSize at bp+0 */ Ti64
var _ /* nTruncate at bp+52 */ Tu32
var _ /* pgno at bp+48 */ Tu32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = aData, aFrame, aFrameCksum, aPrivate, i, iFirst, iFrame, iLast, iLastFrame, iLock, iOffset, iPg, isValid, magic, nHdr, nHdr32, pInfo, rc, szFrame, szPage, version, v2, v3, v5 /* Size of log file */
aFrameCksum = [2]Tu32{} /* Lock offset to lock for checkpoint */
/* Obtain an exclusive lock on all byte in the locking range not already
** locked by the caller. The caller is guaranteed to have locked the
** WAL_WRITE_LOCK byte, and may have also locked the WAL_CKPT_LOCK byte.
** If successful, the same bytes that are locked here are unlocked before
** this function returns.
*/
iLock = int32(WAL_ALL_BUT_WRITE) + int32((*TWal)(unsafe.Pointer(pWal)).FckptLock)
rc = _walLockExclusive(tls, pWal, iLock, libc.Int32FromInt32(3)+libc.Int32FromInt32(0)-iLock)
if rc != 0 {
return rc
}
libc.Xmemset(tls, pWal+72, 0, uint64(48))
rc = _sqlite3OsFileSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp)
if rc != SQLITE_OK {
goto recovery_error
}
if *(*Ti64)(unsafe.Pointer(bp)) > int64(WAL_HDRSIZE) { /* Buffer to load WAL header into */
aPrivate = uintptr(0) /* Heap copy of *-shm hash being populated */
aFrame = uintptr(0) /* Last frame in wal, based on nSize alone */
/* Read in the WAL header. */
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp+8, int32(WAL_HDRSIZE), 0)
if rc != SQLITE_OK {
goto recovery_error
}
/* If the database page size is not a power of two, or is greater than
** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid
** data. Similarly, if the 'magic' value is invalid, ignore the whole
** WAL file.
*/
magic = _sqlite3Get4byte(tls, bp+8)
szPage = int32(_sqlite3Get4byte(tls, bp+8+8))
if magic&uint32(0xFFFFFFFE) != uint32(WAL_MAGIC) || szPage&(szPage-int32(1)) != 0 || szPage > int32(SQLITE_MAX_PAGE_SIZE) || szPage < int32(512) {
goto finished
}
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FbigEndCksum = uint8(magic & libc.Uint32FromInt32(0x00000001))
(*TWal)(unsafe.Pointer(pWal)).FszPage = uint32(szPage)
(*TWal)(unsafe.Pointer(pWal)).FnCkpt = _sqlite3Get4byte(tls, bp+8+12)
libc.Xmemcpy(tls, pWal+72+32, bp+8+16, uint64(8))
/* Verify that the WAL header checksum is correct */
_walChecksumBytes(tls, libc.BoolInt32(int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FbigEndCksum) == SQLITE_BIGENDIAN), bp+8, libc.Int32FromInt32(WAL_HDRSIZE)-libc.Int32FromInt32(2)*libc.Int32FromInt32(4), uintptr(0), pWal+72+24)
if *(*Tu32)(unsafe.Pointer(pWal + 72 + 24)) != _sqlite3Get4byte(tls, bp+8+24) || *(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4)) != _sqlite3Get4byte(tls, bp+8+28) {
goto finished
}
/* Verify that the version number on the WAL format is one that
** are able to understand */
version = _sqlite3Get4byte(tls, bp+8+4)
if version != uint32(WAL_MAX_VERSION) {
rc = _sqlite3CantopenError(tls, int32(66031))
goto finished
}
/* Malloc a buffer to read frames into. */
szFrame = szPage + int32(WAL_FRAME_HDRSIZE)
aFrame = Xsqlite3_malloc64(tls, uint64(szFrame)+(libc.Uint64FromInt64(2)*uint64(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))+libc.Uint64FromInt32(HASHTABLE_NPAGE)*libc.Uint64FromInt64(4)))
if !(aFrame != 0) {
rc = int32(SQLITE_NOMEM)
goto recovery_error
}
aData = aFrame + 24
aPrivate = aData + uintptr(szPage)
/* Read all frames from the log file. */
iLastFrame = uint32((*(*Ti64)(unsafe.Pointer(bp)) - int64(WAL_HDRSIZE)) / int64(szFrame))
iPg = uint32(0)
for {
if !(iPg <= uint32(_walFramePage(tls, iLastFrame))) {
break
}
if uint64(iLastFrame) < libc.Uint64FromInt32(HASHTABLE_NPAGE)-(libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4)+uint64(iPg*uint32(HASHTABLE_NPAGE)) {
v2 = uint64(iLastFrame)
} else {
v2 = libc.Uint64FromInt32(HASHTABLE_NPAGE) - (libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4) + uint64(iPg*uint32(HASHTABLE_NPAGE))
} /* Index of last frame read */
iLast = uint32(v2)
if iPg == uint32(0) {
v3 = uint64(0)
} else {
v3 = libc.Uint64FromInt32(HASHTABLE_NPAGE) - (libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2)+libc.Uint64FromInt64(40))/libc.Uint64FromInt64(4) + uint64((iPg-uint32(1))*uint32(HASHTABLE_NPAGE))
}
iFirst = uint32(uint64(1) + v3)
rc = _walIndexPage(tls, pWal, int32(iPg), bp+40)
if *(*uintptr)(unsafe.Pointer(bp + 40)) == uintptr(0) {
break
}
*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPg)*8)) = aPrivate
iFrame = iFirst
for {
if !(iFrame <= iLast) {
break
}
iOffset = libc.Int64FromInt32(WAL_HDRSIZE) + int64(iFrame-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) /* dbsize field from frame header */
/* Read and decode the next log frame. */
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, aFrame, szFrame, iOffset)
if rc != SQLITE_OK {
break
}
isValid = _walDecodeFrame(tls, pWal, bp+48, bp+52, aData, aFrame)
if !(isValid != 0) {
break
}
rc = _walIndexAppend(tls, pWal, iFrame, *(*Tu32)(unsafe.Pointer(bp + 48)))
if rc != SQLITE_OK {
break
}
/* If nTruncate is non-zero, this is a commit record. */
if *(*Tu32)(unsafe.Pointer(bp + 52)) != 0 {
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame = iFrame
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FnPage = *(*Tu32)(unsafe.Pointer(bp + 52))
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage = uint16(szPage&libc.Int32FromInt32(0xff00) | szPage>>libc.Int32FromInt32(16))
aFrameCksum[0] = *(*Tu32)(unsafe.Pointer(pWal + 72 + 24))
aFrameCksum[int32(1)] = *(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4))
}
goto _4
_4:
;
iFrame++
}
*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(iPg)*8)) = *(*uintptr)(unsafe.Pointer(bp + 40))
if iPg == uint32(0) {
v5 = libc.Uint64FromInt64(48)*libc.Uint64FromInt32(2) + libc.Uint64FromInt64(40)
} else {
v5 = uint64(0)
}
nHdr = uint32(v5)
nHdr32 = uint32(uint64(nHdr) / uint64(4))
/* Memcpy() should work fine here, on all reasonable implementations.
** Technically, memcpy() might change the destination to some
** intermediate value before setting to the final value, and that might
** cause a concurrent reader to malfunction. Memcpy() is allowed to
** do that, according to the spec, but no memcpy() implementation that
** we know of actually does that, which is why we say that memcpy()
** is safe for this. Memcpy() is certainly a lot faster.
*/
libc.Xmemcpy(tls, *(*uintptr)(unsafe.Pointer(bp + 40))+uintptr(nHdr32)*4, aPrivate+uintptr(nHdr32)*4, libc.Uint64FromInt64(2)*uint64(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))+libc.Uint64FromInt32(HASHTABLE_NPAGE)*libc.Uint64FromInt64(4)-uint64(nHdr))
if iFrame <= iLast {
break
}
goto _1
_1:
;
iPg++
}
Xsqlite3_free(tls, aFrame)
}
goto finished
finished:
;
if rc == SQLITE_OK {
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24)) = aFrameCksum[0]
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4)) = aFrameCksum[int32(1)]
_walIndexWriteHdr(tls, pWal)
/* Reset the checkpoint-header. This is safe because this thread is
** currently holding locks that exclude all other writers and
** checkpointers. Then set the values of read-mark slots 1 through N.
*/
pInfo = _walCkptInfo(tls, pWal)
(*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill = uint32(0)
(*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
*(*Tu32)(unsafe.Pointer(pInfo + 4)) = uint32(0)
i = int32(1)
for {
if !(i < libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)) {
break
}
rc = _walLockExclusive(tls, pWal, int32(3)+i, int32(1))
if rc == SQLITE_OK {
if i == int32(1) && (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 {
*(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(i)*4)) = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
} else {
*(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(i)*4)) = uint32(READMARK_NOT_USED)
}
_walUnlockExclusive(tls, pWal, int32(3)+i, int32(1))
} else {
if rc != int32(SQLITE_BUSY) {
goto recovery_error
}
}
goto _6
_6:
;
i++
}
/* If more than one frame was recovered from the log file, report an
** event via sqlite3_log(). This is to help with identifying performance
** problems caused by applications routinely shutting down without
** checkpointing the log file.
*/
if (*TWal)(unsafe.Pointer(pWal)).Fhdr.FnPage != 0 {
Xsqlite3_log(tls, libc.Int32FromInt32(SQLITE_NOTICE)|libc.Int32FromInt32(1)<= 0) {
break
}
pSegment = p + 8 + uintptr(i)*32
for (*TWalSegment)(unsafe.Pointer(pSegment)).FiNext < (*TWalSegment)(unsafe.Pointer(pSegment)).FnEntry {
iPg = *(*Tu32)(unsafe.Pointer((*TWalSegment)(unsafe.Pointer(pSegment)).FaPgno + uintptr(*(*Tht_slot)(unsafe.Pointer((*TWalSegment)(unsafe.Pointer(pSegment)).FaIndex + uintptr((*TWalSegment)(unsafe.Pointer(pSegment)).FiNext)*2)))*4))
if iPg > iMin {
if iPg < iRet {
iRet = iPg
*(*Tu32)(unsafe.Pointer(piFrame)) = uint32((*TWalSegment)(unsafe.Pointer(pSegment)).FiZero + int32(*(*Tht_slot)(unsafe.Pointer((*TWalSegment)(unsafe.Pointer(pSegment)).FaIndex + uintptr((*TWalSegment)(unsafe.Pointer(pSegment)).FiNext)*2))))
}
break
}
(*TWalSegment)(unsafe.Pointer(pSegment)).FiNext++
}
goto _1
_1:
;
i--
}
v2 = iRet
(*TWalIterator)(unsafe.Pointer(p)).FiPrior = v2
*(*Tu32)(unsafe.Pointer(piPage)) = v2
return libc.BoolInt32(iRet == uint32(0xFFFFFFFF))
}
// C documentation
//
// /*
// ** This function merges two sorted lists into a single sorted list.
// **
// ** aLeft[] and aRight[] are arrays of indices. The sort key is
// ** aContent[aLeft[]] and aContent[aRight[]]. Upon entry, the following
// ** is guaranteed for all J= nRight || *(*Tu32)(unsafe.Pointer(aContent + uintptr(*(*Tht_slot)(unsafe.Pointer(aLeft + uintptr(iLeft)*2)))*4)) < *(*Tu32)(unsafe.Pointer(aContent + uintptr(*(*Tht_slot)(unsafe.Pointer(aRight + uintptr(iRight)*2)))*4))) {
v1 = iLeft
iLeft++
logpage = *(*Tht_slot)(unsafe.Pointer(aLeft + uintptr(v1)*2))
} else {
v2 = iRight
iRight++
logpage = *(*Tht_slot)(unsafe.Pointer(aRight + uintptr(v2)*2))
}
dbpage = *(*Tu32)(unsafe.Pointer(aContent + uintptr(logpage)*4))
v3 = iOut
iOut++
*(*Tht_slot)(unsafe.Pointer(aTmp + uintptr(v3)*2)) = logpage
if iLeft < nLeft && *(*Tu32)(unsafe.Pointer(aContent + uintptr(*(*Tht_slot)(unsafe.Pointer(aLeft + uintptr(iLeft)*2)))*4)) == dbpage {
iLeft++
}
}
*(*uintptr)(unsafe.Pointer(paRight)) = aLeft
*(*int32)(unsafe.Pointer(pnRight)) = iOut
libc.Xmemcpy(tls, aLeft, aTmp, uint64(2)*uint64(iOut))
}
// C documentation
//
// /*
// ** Sort the elements in list aList using aContent[] as the sort key.
// ** Remove elements with duplicate keys, preferring to keep the
// ** larger aList[] values.
// **
// ** The aList[] entries are indices into aContent[]. The values in
// ** aList[] are to be sorted so that for all J0).
*/
iLast = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
/* Allocate space for the WalIterator object. */
nSegment = _walFramePage(tls, iLast) + int32(1)
nByte = int64(uint64(40) + uint64(nSegment-libc.Int32FromInt32(1))*uint64(32) + uint64(iLast)*uint64(2))
if iLast > uint32(HASHTABLE_NPAGE) {
v1 = uint32(HASHTABLE_NPAGE)
} else {
v1 = iLast
}
p = Xsqlite3_malloc64(tls, uint64(nByte)+uint64(2)*uint64(v1))
if !(p != 0) {
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, p, 0, uint64(nByte))
(*TWalIterator)(unsafe.Pointer(p)).FnSegment = nSegment
aTmp = p + uintptr(nByte)
i = _walFramePage(tls, nBackfill+uint32(1))
for {
if !(rc == SQLITE_OK && i < nSegment) {
break
}
rc = _walHashGet(tls, pWal, i, bp)
if rc == SQLITE_OK { /* Sorted index for this segment */
if i+int32(1) == nSegment {
*(*int32)(unsafe.Pointer(bp + 24)) = int32(iLast - (*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero)
} else {
*(*int32)(unsafe.Pointer(bp + 24)) = int32((int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash) - int64((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno)) / 4)
}
aIndex = p + 8 + uintptr((*TWalIterator)(unsafe.Pointer(p)).FnSegment)*32 + uintptr((*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero)*2
(*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero++
j = 0
for {
if !(j < *(*int32)(unsafe.Pointer(bp + 24))) {
break
}
*(*Tht_slot)(unsafe.Pointer(aIndex + uintptr(j)*2)) = uint16(j)
goto _3
_3:
;
j++
}
_walMergesort(tls, (*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno, aTmp, aIndex, bp+24)
(*(*TWalSegment)(unsafe.Pointer(p + 8 + uintptr(i)*32))).FiZero = int32((*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero)
(*(*TWalSegment)(unsafe.Pointer(p + 8 + uintptr(i)*32))).FnEntry = *(*int32)(unsafe.Pointer(bp + 24))
(*(*TWalSegment)(unsafe.Pointer(p + 8 + uintptr(i)*32))).FaIndex = aIndex
(*(*TWalSegment)(unsafe.Pointer(p + 8 + uintptr(i)*32))).FaPgno = (*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno
}
goto _2
_2:
;
i++
}
if rc != SQLITE_OK {
_walIteratorFree(tls, p)
p = uintptr(0)
}
*(*uintptr)(unsafe.Pointer(pp)) = p
return rc
}
// C documentation
//
// /*
// ** Attempt to obtain the exclusive WAL lock defined by parameters lockIdx and
// ** n. If the attempt fails and parameter xBusy is not NULL, then it is a
// ** busy-handler function. Invoke it and retry the lock until either the
// ** lock is successfully obtained or the busy-handler returns 0.
// */
func _walBusyLock(tls *libc.TLS, pWal uintptr, xBusy uintptr, pBusyArg uintptr, lockIdx int32, n int32) (r int32) {
var rc int32
_ = rc
for cond := true; cond; cond = xBusy != 0 && rc == int32(SQLITE_BUSY) && (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{xBusy})))(tls, pBusyArg) != 0 {
rc = _walLockExclusive(tls, pWal, lockIdx, n)
}
return rc
}
// C documentation
//
// /*
// ** The cache of the wal-index header must be valid to call this function.
// ** Return the page-size in bytes used by the database.
// */
func _walPagesize(tls *libc.TLS, pWal uintptr) (r int32) {
return int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage)&int32(0xfe00) + int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage)&int32(0x0001)< y {
rc = _walBusyLock(tls, pWal, xBusy, pBusyArg, int32(3)+i, int32(1))
if rc == SQLITE_OK {
if i == int32(1) {
v2 = mxSafeFrame
} else {
v2 = uint32(READMARK_NOT_USED)
}
iMark = v2
*(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(i)*4)) = iMark
_walUnlockExclusive(tls, pWal, int32(3)+i, int32(1))
} else {
if rc == int32(SQLITE_BUSY) {
mxSafeFrame = y
xBusy = uintptr(0)
} else {
goto walcheckpoint_out
}
}
}
goto _1
_1:
;
i++
}
/* Allocate the iterator */
if (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill < mxSafeFrame {
rc = _walIteratorInit(tls, pWal, (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill, bp)
}
if v4 = *(*uintptr)(unsafe.Pointer(bp)) != 0; v4 {
v3 = _walBusyLock(tls, pWal, xBusy, pBusyArg, libc.Int32FromInt32(3)+libc.Int32FromInt32(0), int32(1))
rc = v3
}
if v4 && v3 == SQLITE_OK {
nBackfill = (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill
(*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted = mxSafeFrame
/* Sync the WAL to disk */
rc = _sqlite3OsSync(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, sync_flags>>int32(2)&int32(0x03))
/* If the database may grow as a result of this checkpoint, hint
** about the eventual size of the db file to the VFS layer.
*/
if rc == SQLITE_OK {
*(*Ti64)(unsafe.Pointer(bp + 16)) = int64(mxPage) * int64(szPage) /* Current size of database file */
_sqlite3OsFileControl(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, int32(SQLITE_FCNTL_CKPT_START), uintptr(0))
rc = _sqlite3OsFileSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, bp+24)
if rc == SQLITE_OK && *(*Ti64)(unsafe.Pointer(bp + 24)) < *(*Ti64)(unsafe.Pointer(bp + 16)) {
if *(*Ti64)(unsafe.Pointer(bp + 24))+int64(65536)+int64((*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame)*int64(szPage) < *(*Ti64)(unsafe.Pointer(bp + 16)) {
/* If the size of the final database is larger than the current
** database plus the amount of data in the wal file, plus the
** maximum size of the pending-byte page (65536 bytes), then
** must be corruption somewhere. */
rc = _sqlite3CorruptError(tls, int32(66841))
} else {
_sqlite3OsFileControlHint(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, int32(SQLITE_FCNTL_SIZE_HINT), bp+16)
}
}
}
/* Iterate through the contents of the WAL, copying data to the db file */
for rc == SQLITE_OK && 0 == _walIteratorNext(tls, *(*uintptr)(unsafe.Pointer(bp)), bp+8, bp+12) {
if libc.AtomicLoadPInt32(db+432) != 0 {
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
v5 = int32(SQLITE_NOMEM)
} else {
v5 = int32(SQLITE_INTERRUPT)
}
rc = v5
break
}
if *(*Tu32)(unsafe.Pointer(bp + 12)) <= nBackfill || *(*Tu32)(unsafe.Pointer(bp + 12)) > mxSafeFrame || *(*Tu32)(unsafe.Pointer(bp + 8)) > mxPage {
continue
}
iOffset = int64(WAL_HDRSIZE) + int64(*(*Tu32)(unsafe.Pointer(bp + 12))-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) + int64(WAL_FRAME_HDRSIZE)
/* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, zBuf, szPage, iOffset)
if rc != SQLITE_OK {
break
}
iOffset = int64(*(*Tu32)(unsafe.Pointer(bp + 8))-libc.Uint32FromInt32(1)) * int64(szPage)
rc = _sqlite3OsWrite(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, zBuf, szPage, iOffset)
if rc != SQLITE_OK {
break
}
}
_sqlite3OsFileControl(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, int32(SQLITE_FCNTL_CKPT_DONE), uintptr(0))
/* If work was actually accomplished... */
if rc == SQLITE_OK {
if mxSafeFrame == (*TWalIndexHdr)(unsafe.Pointer(_walIndexHdr(tls, pWal))).FmxFrame {
szDb = int64((*TWal)(unsafe.Pointer(pWal)).Fhdr.FnPage) * int64(szPage)
rc = _sqlite3OsTruncate(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, szDb)
if rc == SQLITE_OK {
rc = _sqlite3OsSync(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, sync_flags>>int32(2)&int32(0x03))
}
}
if rc == SQLITE_OK {
*(*Tu32)(unsafe.Pointer(pInfo)) = mxSafeFrame
}
}
/* Release the reader lock held while backfilling */
_walUnlockExclusive(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(0), int32(1))
}
if rc == int32(SQLITE_BUSY) {
/* Reset the return code so as not to report a checkpoint failure
** just because there are active readers. */
rc = SQLITE_OK
}
}
/* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the
** entire wal file has been copied into the database file, then block
** until all readers have finished using the wal file. This ensures that
** the next process to write to the database restarts the wal file.
*/
if rc == SQLITE_OK && eMode != SQLITE_CHECKPOINT_PASSIVE {
if (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill < (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame {
rc = int32(SQLITE_BUSY)
} else {
if eMode >= int32(SQLITE_CHECKPOINT_RESTART) {
Xsqlite3_randomness(tls, int32(4), bp+32)
rc = _walBusyLock(tls, pWal, xBusy, pBusyArg, libc.Int32FromInt32(3)+libc.Int32FromInt32(1), libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)-libc.Int32FromInt32(1))
if rc == SQLITE_OK {
if eMode == int32(SQLITE_CHECKPOINT_TRUNCATE) {
/* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as
** SQLITE_CHECKPOINT_RESTART with the addition that it also
** truncates the log file to zero bytes just prior to a
** successful return.
**
** In theory, it might be safe to do this without updating the
** wal-index header in shared memory, as all subsequent reader or
** writer clients should see that the entire log file has been
** checkpointed and behave accordingly. This seems unsafe though,
** as it would leave the system in a state where the contents of
** the wal-index header do not match the contents of the
** file-system. To avoid this, update the wal-index header to
** indicate that the log file contains zero valid frames. */
_walRestartHdr(tls, pWal, *(*Tu32)(unsafe.Pointer(bp + 32)))
rc = _sqlite3OsTruncate(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, 0)
}
_walUnlockExclusive(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(1), libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)-libc.Int32FromInt32(1))
}
}
}
}
goto walcheckpoint_out
walcheckpoint_out:
;
_walIteratorFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
return rc
}
// C documentation
//
// /*
// ** If the WAL file is currently larger than nMax bytes in size, truncate
// ** it to exactly nMax bytes. If an error occurs while doing so, ignore it.
// */
func _walLimitSize(tls *libc.TLS, pWal uintptr, nMax Ti64) {
bp := tls.Alloc(32)
defer tls.Free(32)
var rx int32
var _ /* sz at bp+0 */ Ti64
_ = rx
_sqlite3BeginBenignMalloc(tls)
rx = _sqlite3OsFileSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp)
if rx == SQLITE_OK && *(*Ti64)(unsafe.Pointer(bp)) > nMax {
rx = _sqlite3OsTruncate(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, nMax)
}
_sqlite3EndBenignMalloc(tls)
if rx != 0 {
Xsqlite3_log(tls, rx, __ccgo_ts+4166, libc.VaList(bp+16, (*TWal)(unsafe.Pointer(pWal)).FzWalName))
}
}
// C documentation
//
// /*
// ** Close a connection to a log file.
// */
func _sqlite3WalClose(tls *libc.TLS, pWal uintptr, db uintptr, sync_flags int32, nBuf int32, zBuf uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var isDelete, rc, v1 int32
var v2 bool
var _ /* bPersist at bp+0 */ int32
_, _, _, _ = isDelete, rc, v1, v2
rc = SQLITE_OK
if pWal != 0 {
isDelete = 0 /* True to unlink wal and wal-index files */
/* If an EXCLUSIVE lock can be obtained on the database file (using the
** ordinary, rollback-mode locking methods, this guarantees that the
** connection associated with this log file is the only connection to
** the database. In this case checkpoint the database and unlink both
** the wal and wal-index files.
**
** The EXCLUSIVE lock is not released before returning.
*/
if v2 = zBuf != uintptr(0); v2 {
v1 = _sqlite3OsLock(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, int32(SQLITE_LOCK_EXCLUSIVE))
rc = v1
}
if v2 && SQLITE_OK == v1 {
if int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) == WAL_NORMAL_MODE {
(*TWal)(unsafe.Pointer(pWal)).FexclusiveMode = uint8(WAL_EXCLUSIVE_MODE)
}
rc = _sqlite3WalCheckpoint(tls, pWal, db, SQLITE_CHECKPOINT_PASSIVE, uintptr(0), uintptr(0), sync_flags, nBuf, zBuf, uintptr(0), uintptr(0))
if rc == SQLITE_OK {
*(*int32)(unsafe.Pointer(bp)) = -int32(1)
_sqlite3OsFileControlHint(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, int32(SQLITE_FCNTL_PERSIST_WAL), bp)
if *(*int32)(unsafe.Pointer(bp)) != int32(1) {
/* Try to delete the WAL file if the checkpoint completed and
** fsynced (rc==SQLITE_OK) and if we are not in persistent-wal
** mode (!bPersist) */
isDelete = int32(1)
} else {
if (*TWal)(unsafe.Pointer(pWal)).FmxWalSize >= 0 {
/* Try to truncate the WAL file to zero bytes if the checkpoint
** completed and fsynced (rc==SQLITE_OK) and we are in persistent
** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a
** non-negative value (pWal->mxWalSize>=0). Note that we truncate
** to zero bytes as truncating to the journal_size_limit might
** leave a corrupt WAL file on disk. */
_walLimitSize(tls, pWal, 0)
}
}
}
}
_walIndexClose(tls, pWal, isDelete)
_sqlite3OsClose(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd)
if isDelete != 0 {
_sqlite3BeginBenignMalloc(tls)
_sqlite3OsDelete(tls, (*TWal)(unsafe.Pointer(pWal)).FpVfs, (*TWal)(unsafe.Pointer(pWal)).FzWalName, 0)
_sqlite3EndBenignMalloc(tls)
}
Xsqlite3_free(tls, (*TWal)(unsafe.Pointer(pWal)).FapWiData)
Xsqlite3_free(tls, pWal)
}
return rc
}
// C documentation
//
// /*
// ** Try to read the wal-index header. Return 0 on success and 1 if
// ** there is a problem.
// **
// ** The wal-index is in shared memory. Another thread or process might
// ** be writing the header at the same time this procedure is trying to
// ** read it, which might result in inconsistency. A dirty read is detected
// ** by verifying that both copies of the header are the same and also by
// ** a checksum on the header.
// **
// ** If and only if the read is consistent and the header is different from
// ** pWal->hdr, then pWal->hdr is updated to the content of the new header
// ** and *pChanged is set to 1.
// **
// ** If the checksum cannot be verified return non-zero. If the header
// ** is read successfully and the checksum verified, return zero.
// */
func _walIndexTryHdr(tls *libc.TLS, pWal uintptr, pChanged uintptr) (r int32) {
bp := tls.Alloc(112)
defer tls.Free(112)
var aHdr uintptr
var _ /* aCksum at bp+0 */ [2]Tu32
var _ /* h1 at bp+8 */ TWalIndexHdr
var _ /* h2 at bp+56 */ TWalIndexHdr
_ = aHdr /* Header in shared memory */
/* The first page of the wal-index must be mapped at this point. */
/* Read the header. This might happen concurrently with a write to the
** same area of shared memory on a different CPU in a SMP,
** meaning it is possible that an inconsistent snapshot is read
** from the file. If this happens, return non-zero.
**
** tag-20200519-1:
** There are two copies of the header at the beginning of the wal-index.
** When reading, read [0] first then [1]. Writes are in the reverse order.
** Memory barriers are used to prevent the compiler or the hardware from
** reordering the reads and writes. TSAN and similar tools can sometimes
** give false-positive warnings about these accesses because the tools do not
** account for the double-read and the memory barrier. The use of mutexes
** here would be problematic as the memory being accessed is potentially
** shared among multiple processes and not all mutex implementations work
** reliably in that environment.
*/
aHdr = _walIndexHdr(tls, pWal)
libc.Xmemcpy(tls, bp+8, aHdr, uint64(48)) /* Possible TSAN false-positive */
_walShmBarrier(tls, pWal)
libc.Xmemcpy(tls, bp+56, aHdr+1*48, uint64(48))
if libc.Xmemcmp(tls, bp+8, bp+56, uint64(48)) != 0 {
return int32(1) /* Dirty read */
}
if int32((*(*TWalIndexHdr)(unsafe.Pointer(bp + 8))).FisInit) == 0 {
return int32(1) /* Malformed header - probably all zeros */
}
_walChecksumBytes(tls, int32(1), bp+8, int32(libc.Uint64FromInt64(48)-libc.Uint64FromInt64(8)), uintptr(0), bp)
if (*(*[2]Tu32)(unsafe.Pointer(bp)))[0] != *(*Tu32)(unsafe.Pointer(bp + 8 + 40)) || (*(*[2]Tu32)(unsafe.Pointer(bp)))[int32(1)] != *(*Tu32)(unsafe.Pointer(bp + 8 + 40 + 1*4)) {
return int32(1) /* Checksum does not match */
}
if libc.Xmemcmp(tls, pWal+72, bp+8, uint64(48)) != 0 {
*(*int32)(unsafe.Pointer(pChanged)) = int32(1)
libc.Xmemcpy(tls, pWal+72, bp+8, uint64(48))
(*TWal)(unsafe.Pointer(pWal)).FszPage = uint32(int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage)&int32(0xfe00) + int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage)&int32(0x0001)<hdr.
// ** If the wal-header appears to be corrupt, try to reconstruct the
// ** wal-index from the WAL before returning.
// **
// ** Set *pChanged to 1 if the wal-index header value in pWal->hdr is
// ** changed by this operation. If pWal->hdr is unchanged, set *pChanged
// ** to 0.
// **
// ** If the wal-index header is successfully read, return SQLITE_OK.
// ** Otherwise an SQLite error code.
// */
func _walIndexReadHdr(tls *libc.TLS, pWal uintptr, pChanged uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var bWriteLock, badHdr, rc, v1, v2, v3, v5 int32
var v4 bool
var _ /* page0 at bp+0 */ uintptr
_, _, _, _, _, _, _, _ = bWriteLock, badHdr, rc, v1, v2, v3, v4, v5 /* Chunk of wal-index containing header */
/* Ensure that page 0 of the wal-index (the page that contains the
** wal-index header) is mapped. Return early if an error occurs here.
*/
rc = _walIndexPage(tls, pWal, 0, bp)
if rc != SQLITE_OK {
/* READONLY changed to OK in walIndexPage */
if rc == libc.Int32FromInt32(SQLITE_READONLY)|libc.Int32FromInt32(5)<writeLock
** is zero, which prevents the SHM from growing */
}
/* If the first page of the wal-index has been mapped, try to read the
** wal-index header immediately, without holding any lock. This usually
** works, but may fail if the wal-index header is corrupt or currently
** being modified by another thread or process.
*/
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
v1 = _walIndexTryHdr(tls, pWal, pChanged)
} else {
v1 = int32(1)
}
badHdr = v1
/* If the first attempt failed, it might have been due to a race
** with a writer. So get a WRITE lock and try again.
*/
if badHdr != 0 {
if int32((*TWal)(unsafe.Pointer(pWal)).FbShmUnreliable) == 0 && int32((*TWal)(unsafe.Pointer(pWal)).FreadOnly)&int32(WAL_SHM_RDONLY) != 0 {
v2 = _walLockShared(tls, pWal, WAL_WRITE_LOCK)
rc = v2
if SQLITE_OK == v2 {
_walUnlockShared(tls, pWal, WAL_WRITE_LOCK)
rc = libc.Int32FromInt32(SQLITE_READONLY) | libc.Int32FromInt32(1)<apWiData[] using heap memory instead of shared
// ** memory.
// **
// ** If this function returns SQLITE_OK, then the read transaction has
// ** been successfully opened. In this case output variable (*pChanged)
// ** is set to true before returning if the caller should discard the
// ** contents of the page cache before proceeding. Or, if it returns
// ** WAL_RETRY, then the heap memory wal-index has been discarded and
// ** the caller should retry opening the read transaction from the
// ** beginning (including attempting to map the *-shm file).
// **
// ** If an error occurs, an SQLite error code is returned.
// */
func _walBeginShmUnreliable(tls *libc.TLS, pWal uintptr, pChanged uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var aData, aFrame uintptr
var aSaveCksum [2]Tu32
var i, rc, szFrame, v1, v2 int32
var iOffset Ti64
var _ /* aBuf at bp+8 */ [32]Tu8
var _ /* nTruncate at bp+52 */ Tu32
var _ /* pDummy at bp+40 */ uintptr
var _ /* pgno at bp+48 */ Tu32
var _ /* szWal at bp+0 */ Ti64
_, _, _, _, _, _, _, _, _ = aData, aFrame, aSaveCksum, i, iOffset, rc, szFrame, v1, v2 /* Buffer to load WAL header into */
aFrame = uintptr(0) /* Saved copy of pWal->hdr.aFrameCksum */
/* Take WAL_READ_LOCK(0). This has the effect of preventing any
** writers from running a checkpoint, but does not stop them
** from running recovery. */
rc = _walLockShared(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(0))
if rc != SQLITE_OK {
if rc == int32(SQLITE_BUSY) {
rc = -int32(1)
}
goto begin_unreliable_shm_out
}
(*TWal)(unsafe.Pointer(pWal)).FreadLock = 0
/* Check to see if a separate writer has attached to the shared-memory area,
** thus making the shared-memory "reliable" again. Do this by invoking
** the xShmMap() routine of the VFS and looking to see if the return
** is SQLITE_READONLY instead of SQLITE_READONLY_CANTINIT.
**
** If the shared-memory is now "reliable" return WAL_RETRY, which will
** cause the heap-memory WAL-index to be discarded and the actual
** shared memory to be used in its place.
**
** This step is important because, even though this connection is holding
** the WAL_READ_LOCK(0) which prevents a checkpoint, a writer might
** have already checkpointed the WAL file and, while the current
** is active, wrap the WAL and start overwriting frames that this
** process wants to use.
**
** Once sqlite3OsShmMap() has been called for an sqlite3_file and has
** returned any SQLITE_READONLY value, it must return only SQLITE_READONLY
** or SQLITE_READONLY_CANTINIT or some error for all subsequent invocations,
** even if some external agent does a "chmod" to make the shared-memory
** writable by us, until sqlite3OsShmUnmap() has been called.
** This is a requirement on the VFS implementation.
*/
rc = _sqlite3OsShmMap(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, 0, int32(libc.Uint64FromInt64(2)*uint64(libc.Int32FromInt32(HASHTABLE_NPAGE)*libc.Int32FromInt32(2))+libc.Uint64FromInt32(HASHTABLE_NPAGE)*libc.Uint64FromInt64(4)), 0, bp+40)
/* SQLITE_OK not possible for read-only connection */
if rc != libc.Int32FromInt32(SQLITE_READONLY)|libc.Int32FromInt32(5)<hdr.
*/
libc.Xmemcpy(tls, pWal+72, _walIndexHdr(tls, pWal), uint64(48))
/* Make sure some writer hasn't come in and changed the WAL file out
** from under us, then disconnected, while we were not looking.
*/
rc = _sqlite3OsFileSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp)
if rc != SQLITE_OK {
goto begin_unreliable_shm_out
}
if *(*Ti64)(unsafe.Pointer(bp)) < int64(WAL_HDRSIZE) {
/* If the wal file is too small to contain a wal-header and the
** wal-index header has mxFrame==0, then it must be safe to proceed
** reading the database file only. However, the page cache cannot
** be trusted, as a read/write connection may have connected, written
** the db, run a checkpoint, truncated the wal file and disconnected
** since this client's last read transaction. */
*(*int32)(unsafe.Pointer(pChanged)) = int32(1)
if (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame == uint32(0) {
v2 = SQLITE_OK
} else {
v2 = -int32(1)
}
rc = v2
goto begin_unreliable_shm_out
}
/* Check the salt keys at the start of the wal file still match. */
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp+8, int32(WAL_HDRSIZE), 0)
if rc != SQLITE_OK {
goto begin_unreliable_shm_out
}
if libc.Xmemcmp(tls, pWal+72+32, bp+8+16, uint64(8)) != 0 {
/* Some writer has wrapped the WAL file while we were not looking.
** Return WAL_RETRY which will cause the in-memory WAL-index to be
** rebuilt. */
rc = -int32(1)
goto begin_unreliable_shm_out
}
/* Allocate a buffer to read frames into */
szFrame = int32((*TWal)(unsafe.Pointer(pWal)).FszPage + uint32(WAL_FRAME_HDRSIZE))
aFrame = Xsqlite3_malloc64(tls, uint64(szFrame))
if aFrame == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto begin_unreliable_shm_out
}
aData = aFrame + 24
/* Check to see if a complete transaction has been appended to the
** wal file since the heap-memory wal-index was created. If so, the
** heap-memory wal-index is discarded and WAL_RETRY returned to
** the caller. */
aSaveCksum[0] = *(*Tu32)(unsafe.Pointer(pWal + 72 + 24))
aSaveCksum[int32(1)] = *(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4))
iOffset = libc.Int64FromInt32(WAL_HDRSIZE) + int64((*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame+libc.Uint32FromInt32(1)-libc.Uint32FromInt32(1))*int64((*TWal)(unsafe.Pointer(pWal)).FszPage+libc.Uint32FromInt32(WAL_FRAME_HDRSIZE))
for {
if !(iOffset+int64(szFrame) <= *(*Ti64)(unsafe.Pointer(bp))) {
break
} /* dbsize field from frame header */
/* Read and decode the next log frame. */
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, aFrame, szFrame, iOffset)
if rc != SQLITE_OK {
break
}
if !(_walDecodeFrame(tls, pWal, bp+48, bp+52, aData, aFrame) != 0) {
break
}
/* If nTruncate is non-zero, then a complete transaction has been
** appended to this wal file. Set rc to WAL_RETRY and break out of
** the loop. */
if *(*Tu32)(unsafe.Pointer(bp + 52)) != 0 {
rc = -int32(1)
break
}
goto _3
_3:
;
iOffset += int64(szFrame)
}
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24)) = aSaveCksum[0]
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4)) = aSaveCksum[int32(1)]
goto begin_unreliable_shm_out
begin_unreliable_shm_out:
;
Xsqlite3_free(tls, aFrame)
if rc != SQLITE_OK {
i = 0
for {
if !(i < (*TWal)(unsafe.Pointer(pWal)).FnWiData) {
break
}
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(i)*8)))
*(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData + uintptr(i)*8)) = uintptr(0)
goto _4
_4:
;
i++
}
(*TWal)(unsafe.Pointer(pWal)).FbShmUnreliable = uint8(0)
_sqlite3WalEndReadTransaction(tls, pWal)
*(*int32)(unsafe.Pointer(pChanged)) = int32(1)
}
return rc
}
/*
** The final argument passed to walTryBeginRead() is of type (int*). The
** caller should invoke walTryBeginRead as follows:
**
** int cnt = 0;
** do {
** rc = walTryBeginRead(..., &cnt);
** }while( rc==WAL_RETRY );
**
** The final value of "cnt" is of no use to the caller. It is used by
** the implementation of walTryBeginRead() as follows:
**
** + Each time walTryBeginRead() is called, it is incremented. Once
** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead()
** has many times been invoked and failed with WAL_RETRY - walTryBeginRead()
** returns SQLITE_PROTOCOL.
**
** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed
** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS
** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case
** the next invocation of walTryBeginRead() may omit an expected call to
** sqlite3OsSleep(). There has already been a delay when the previous call
** waited on a lock.
*/
// C documentation
//
// /*
// ** Attempt to start a read transaction. This might fail due to a race or
// ** other transient condition. When that happens, it returns WAL_RETRY to
// ** indicate to the caller that it is safe to retry immediately.
// **
// ** On success return SQLITE_OK. On a permanent failure (such an
// ** I/O error or an SQLITE_BUSY because another process is running
// ** recovery) return a positive error code.
// **
// ** The useWal parameter is true to force the use of the WAL and disable
// ** the case where the WAL is bypassed because it has been completely
// ** checkpointed. If useWal==0 then this routine calls walIndexReadHdr()
// ** to make a copy of the wal-index header into pWal->hdr. If the
// ** wal-index header has changed, *pChanged is set to 1 (as an indication
// ** to the caller that the local page cache is obsolete and needs to be
// ** flushed.) When useWal==1, the wal-index header is assumed to already
// ** be loaded and the pChanged parameter is unused.
// **
// ** The caller must set the cnt parameter to the number of prior calls to
// ** this routine during the current read attempt that returned WAL_RETRY.
// ** This routine will start taking more aggressive measures to clear the
// ** race conditions after multiple WAL_RETRY returns, and after an excessive
// ** number of errors will ultimately return SQLITE_PROTOCOL. The
// ** SQLITE_PROTOCOL return indicates that some other process has gone rogue
// ** and is not honoring the locking protocol. There is a vanishingly small
// ** chance that SQLITE_PROTOCOL could be returned because of a run of really
// ** bad luck when there is lots of contention for the wal-index, but that
// ** possibility is so small that it can be safely neglected, we believe.
// **
// ** On success, this routine obtains a read lock on
// ** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is
// ** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1)
// ** that means the Wal does not hold any read lock. The reader must not
// ** access any database page that is modified by a WAL frame up to and
// ** including frame number aReadMark[pWal->readLock]. The reader will
// ** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0
// ** Or if pWal->readLock==0, then the reader will ignore the WAL
// ** completely and get all content directly from the database file.
// ** If the useWal parameter is 1 then the WAL will never be ignored and
// ** this routine will always set pWal->readLock>0 on success.
// ** When the read transaction is completed, the caller must release the
// ** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1.
// **
// ** This routine uses the nBackfill and aReadMark[] fields of the header
// ** to select a particular WAL_READ_LOCK() that strives to let the
// ** checkpoint process do as much work as possible. This routine might
// ** update values of the aReadMark[] array in the header, but if it does
// ** so it takes care to hold an exclusive lock on the corresponding
// ** WAL_READ_LOCK() while changing values.
// */
func _walTryBeginRead(tls *libc.TLS, pWal uintptr, pChanged uintptr, useWal int32, pCnt uintptr) (r int32) {
var cnt, i, mxI, nDelay, rc, v1, v4, v5 int32
var mxFrame, mxReadMark, thisMark Tu32
var pInfo uintptr
_, _, _, _, _, _, _, _, _, _, _, _ = cnt, i, mxFrame, mxI, mxReadMark, nDelay, pInfo, rc, thisMark, v1, v4, v5 /* Loop counter */
rc = SQLITE_OK /* Wal frame to lock to */
/* Not currently locked */
/* useWal may only be set for read/write connections */
/* Take steps to avoid spinning forever if there is a protocol error.
**
** Circumstances that cause a RETRY should only last for the briefest
** instances of time. No I/O or other system calls are done while the
** locks are held, so the locks should not be held for very long. But
** if we are unlucky, another process that is holding a lock might get
** paged out or take a page-fault that is time-consuming to resolve,
** during the few nanoseconds that it is holding the lock. In that case,
** it might take longer than normal for the lock to free.
**
** After 5 RETRYs, we begin calling sqlite3OsSleep(). The first few
** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this
** is more of a scheduler yield than an actual delay. But on the 10th
** an subsequent retries, the delays start becoming longer and longer,
** so that on the 100th (and last) RETRY we delay for 323 milliseconds.
** The total delay time before giving up is less than 10 seconds.
*/
*(*int32)(unsafe.Pointer(pCnt))++
if *(*int32)(unsafe.Pointer(pCnt)) > int32(5) {
nDelay = int32(1) /* Pause time in microseconds */
cnt = *(*int32)(unsafe.Pointer(pCnt)) & ^libc.Int32FromInt32(WAL_RETRY_BLOCKED_MASK)
if cnt > int32(WAL_RETRY_PROTOCOL_LIMIT) {
return int32(SQLITE_PROTOCOL)
}
if *(*int32)(unsafe.Pointer(pCnt)) >= int32(10) {
nDelay = (cnt - int32(9)) * (cnt - int32(9)) * int32(39)
}
_sqlite3OsSleep(tls, (*TWal)(unsafe.Pointer(pWal)).FpVfs, nDelay)
*(*int32)(unsafe.Pointer(pCnt)) &= ^libc.Int32FromInt32(WAL_RETRY_BLOCKED_MASK)
}
if !(useWal != 0) {
if int32((*TWal)(unsafe.Pointer(pWal)).FbShmUnreliable) == 0 {
rc = _walIndexReadHdr(tls, pWal, pChanged)
}
if rc == int32(SQLITE_BUSY) {
/* If there is not a recovery running in another thread or process
** then convert BUSY errors to WAL_RETRY. If recovery is known to
** be running, convert BUSY to BUSY_RECOVERY. There is a race here
** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY
** would be technically correct. But the race is benign since with
** WAL_RETRY this routine will be called again and will probably be
** right on the second iteration.
*/
if *(*uintptr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FapWiData)) == uintptr(0) {
/* This branch is taken when the xShmMap() method returns SQLITE_BUSY.
** We assume this is a transient condition, so return WAL_RETRY. The
** xShmMap() implementation used by the default unix and win32 VFS
** modules may return SQLITE_BUSY due to a race condition in the
** code that determines whether or not the shared-memory region
** must be zeroed before the requested page is returned.
*/
rc = -int32(1)
} else {
v1 = _walLockShared(tls, pWal, int32(WAL_RECOVER_LOCK))
rc = v1
if SQLITE_OK == v1 {
_walUnlockShared(tls, pWal, int32(WAL_RECOVER_LOCK))
rc = -int32(1)
} else {
if rc == int32(SQLITE_BUSY) {
rc = libc.Int32FromInt32(SQLITE_BUSY) | libc.Int32FromInt32(1)<hdr.mxFrame and lock that entry.
*/
mxReadMark = uint32(0)
mxI = 0
mxFrame = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
if (*TWal)(unsafe.Pointer(pWal)).FpSnapshot != 0 && (*TWalIndexHdr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FpSnapshot)).FmxFrame < mxFrame {
mxFrame = (*TWalIndexHdr)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FpSnapshot)).FmxFrame
}
i = int32(1)
for {
if !(i < libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)) {
break
}
thisMark = *(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(i)*4))
if mxReadMark <= thisMark && thisMark <= mxFrame {
mxReadMark = thisMark
mxI = i
}
goto _2
_2:
;
i++
}
if int32((*TWal)(unsafe.Pointer(pWal)).FreadOnly)&int32(WAL_SHM_RDONLY) == 0 && (mxReadMark < mxFrame || mxI == 0) {
i = int32(1)
for {
if !(i < libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)) {
break
}
rc = _walLockExclusive(tls, pWal, int32(3)+i, int32(1))
if rc == SQLITE_OK {
*(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(i)*4)) = mxFrame
mxReadMark = mxFrame
mxI = i
_walUnlockExclusive(tls, pWal, int32(3)+i, int32(1))
break
} else {
if rc != int32(SQLITE_BUSY) {
return rc
}
}
goto _3
_3:
;
i++
}
}
if mxI == 0 {
if rc == int32(SQLITE_BUSY) {
v4 = -int32(1)
} else {
v4 = libc.Int32FromInt32(SQLITE_READONLY) | libc.Int32FromInt32(5)<hdr.mxFrame may have been
** copied into the database by a checkpointer. If either of these things
** happened, then reading the database with the current value of
** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry
** instead.
**
** Before checking that the live wal-index header has not changed
** since it was read, set Wal.minFrame to the first frame in the wal
** file that has not yet been checkpointed. This client will not need
** to read any frames earlier than minFrame from the wal file - they
** can be safely read directly from the database file.
**
** Because a ShmBarrier() call is made between taking the copy of
** nBackfill and checking that the wal-header in shared-memory still
** matches the one cached in pWal->hdr, it is guaranteed that the
** checkpointer that set nBackfill was not working with a wal-index
** header newer than that cached in pWal->hdr. If it were, that could
** cause a problem. The checkpointer could omit to checkpoint
** a version of page X that lies before pWal->minFrame (call that version
** A) on the basis that there is a newer version (version B) of the same
** page later in the wal file. But if version B happens to like past
** frame pWal->hdr.mxFrame - then the client would incorrectly assume
** that it can read version A from the database file. However, since
** we can guarantee that the checkpointer that set nBackfill could not
** see any pages past pWal->hdr.mxFrame, this problem does not come up.
*/
(*TWal)(unsafe.Pointer(pWal)).FminFrame = *(*Tu32)(unsafe.Pointer(pInfo)) + uint32(1)
_walShmBarrier(tls, pWal)
if *(*Tu32)(unsafe.Pointer(pInfo + 4 + uintptr(mxI)*4)) != mxReadMark || libc.Xmemcmp(tls, _walIndexHdr(tls, pWal), pWal+72, uint64(48)) != 0 {
_walUnlockShared(tls, pWal, int32(3)+mxI)
return -int32(1)
} else {
(*TWal)(unsafe.Pointer(pWal)).FreadLock = int16(mxI)
}
return rc
}
// C documentation
//
// /*
// ** This function does the work of sqlite3WalSnapshotRecover().
// */
func _walSnapshotRecover(tls *libc.TLS, pWal uintptr, pBuf1 uintptr, pBuf2 uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var i, pgno Tu32
var iDbOff, iWalOff Ti64
var pInfo uintptr
var rc, szPage int32
var _ /* sLoc at bp+8 */ TWalHashLoc
var _ /* szDb at bp+0 */ Ti64
_, _, _, _, _, _, _ = i, iDbOff, iWalOff, pInfo, pgno, rc, szPage
szPage = int32((*TWal)(unsafe.Pointer(pWal)).FszPage) /* Size of db file in bytes */
rc = _sqlite3OsFileSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, bp)
if rc == SQLITE_OK {
pInfo = _walCkptInfo(tls, pWal)
i = (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted
i = (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted
for {
if !(i > *(*Tu32)(unsafe.Pointer(pInfo))) {
break
} /* Offset of wal file entry */
rc = _walHashGet(tls, pWal, _walFramePage(tls, i), bp+8)
if rc != SQLITE_OK {
break
}
pgno = *(*Tu32)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp + 8))).FaPgno + uintptr(i-(*(*TWalHashLoc)(unsafe.Pointer(bp + 8))).FiZero-uint32(1))*4))
iDbOff = int64(pgno-libc.Uint32FromInt32(1)) * int64(szPage)
if iDbOff+int64(szPage) <= *(*Ti64)(unsafe.Pointer(bp)) {
iWalOff = int64(WAL_HDRSIZE) + int64(i-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) + int64(WAL_FRAME_HDRSIZE)
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, pBuf1, szPage, iWalOff)
if rc == SQLITE_OK {
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, pBuf2, szPage, iDbOff)
}
if rc != SQLITE_OK || 0 == libc.Xmemcmp(tls, pBuf1, pBuf2, uint64(szPage)) {
break
}
}
(*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted = i - uint32(1)
goto _1
_1:
;
i--
}
}
return rc
}
// C documentation
//
// /*
// ** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted
// ** variable so that older snapshots can be accessed. To do this, loop
// ** through all wal frames from nBackfillAttempted to (nBackfill+1),
// ** comparing their content to the corresponding page with the database
// ** file, if any. Set nBackfillAttempted to the frame number of the
// ** first frame for which the wal file content matches the db file.
// **
// ** This is only really safe if the file-system is such that any page
// ** writes made by earlier checkpointers were atomic operations, which
// ** is not always true. It is also possible that nBackfillAttempted
// ** may be left set to a value larger than expected, if a wal frame
// ** contains content that duplicate of an earlier version of the same
// ** page.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code if an
// ** error occurs. It is not an error if nBackfillAttempted cannot be
// ** decreased at all.
// */
func _sqlite3WalSnapshotRecover(tls *libc.TLS, pWal uintptr) (r int32) {
var pBuf1, pBuf2 uintptr
var rc int32
_, _, _ = pBuf1, pBuf2, rc
rc = _walLockExclusive(tls, pWal, int32(WAL_CKPT_LOCK), int32(1))
if rc == SQLITE_OK {
pBuf1 = Xsqlite3_malloc(tls, int32((*TWal)(unsafe.Pointer(pWal)).FszPage))
pBuf2 = Xsqlite3_malloc(tls, int32((*TWal)(unsafe.Pointer(pWal)).FszPage))
if pBuf1 == uintptr(0) || pBuf2 == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
(*TWal)(unsafe.Pointer(pWal)).FckptLock = uint8(1)
rc = _walSnapshotRecover(tls, pWal, pBuf1, pBuf2)
(*TWal)(unsafe.Pointer(pWal)).FckptLock = uint8(0)
}
Xsqlite3_free(tls, pBuf1)
Xsqlite3_free(tls, pBuf2)
_walUnlockExclusive(tls, pWal, int32(WAL_CKPT_LOCK), int32(1))
}
return rc
}
// C documentation
//
// /*
// ** This function does the work of sqlite3WalBeginReadTransaction() (see
// ** below). That function simply calls this one inside an SEH_TRY{...} block.
// */
func _walBeginReadTransaction(tls *libc.TLS, pWal uintptr, pChanged uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var bChanged, ckptLock, rc int32
var pInfo, pSnapshot uintptr
var _ /* cnt at bp+0 */ int32
_, _, _, _, _ = bChanged, ckptLock, pInfo, pSnapshot, rc /* Return code */
*(*int32)(unsafe.Pointer(bp)) = 0 /* Number of TryBeginRead attempts */
ckptLock = 0
bChanged = 0
pSnapshot = (*TWal)(unsafe.Pointer(pWal)).FpSnapshot
if pSnapshot != 0 {
if libc.Xmemcmp(tls, pSnapshot, pWal+72, uint64(48)) != 0 {
bChanged = int32(1)
}
/* It is possible that there is a checkpointer thread running
** concurrent with this code. If this is the case, it may be that the
** checkpointer has already determined that it will checkpoint
** snapshot X, where X is later in the wal file than pSnapshot, but
** has not yet set the pInfo->nBackfillAttempted variable to indicate
** its intent. To avoid the race condition this leads to, ensure that
** there is no checkpointer process by taking a shared CKPT lock
** before checking pInfo->nBackfillAttempted. */
rc = _walLockShared(tls, pWal, int32(WAL_CKPT_LOCK))
if rc != SQLITE_OK {
return rc
}
ckptLock = int32(1)
}
for cond := true; cond; cond = rc == -int32(1) {
rc = _walTryBeginRead(tls, pWal, pChanged, 0, bp)
}
if rc == SQLITE_OK {
if pSnapshot != 0 && libc.Xmemcmp(tls, pSnapshot, pWal+72, uint64(48)) != 0 {
/* At this point the client has a lock on an aReadMark[] slot holding
** a value equal to or smaller than pSnapshot->mxFrame, but pWal->hdr
** is populated with the wal-index header corresponding to the head
** of the wal file. Verify that pSnapshot is still valid before
** continuing. Reasons why pSnapshot might no longer be valid:
**
** (1) The WAL file has been reset since the snapshot was taken.
** In this case, the salt will have changed.
**
** (2) A checkpoint as been attempted that wrote frames past
** pSnapshot->mxFrame into the database file. Note that the
** checkpoint need not have completed for this to cause problems.
*/
pInfo = _walCkptInfo(tls, pWal)
/* Check that the wal file has not been wrapped. Assuming that it has
** not, also check that no checkpointer has attempted to checkpoint any
** frames beyond pSnapshot->mxFrame. If either of these conditions are
** true, return SQLITE_ERROR_SNAPSHOT. Otherwise, overwrite pWal->hdr
** with *pSnapshot and set *pChanged as appropriate for opening the
** snapshot. */
if !(libc.Xmemcmp(tls, pSnapshot+32, pWal+72+32, uint64(8)) != 0) && (*TWalIndexHdr)(unsafe.Pointer(pSnapshot)).FmxFrame >= (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfillAttempted {
libc.Xmemcpy(tls, pWal+72, pSnapshot, uint64(48))
*(*int32)(unsafe.Pointer(pChanged)) = bChanged
} else {
rc = libc.Int32FromInt32(SQLITE_ERROR) | libc.Int32FromInt32(3)<= 0 {
_walUnlockShared(tls, pWal, int32(3)+int32((*TWal)(unsafe.Pointer(pWal)).FreadLock))
(*TWal)(unsafe.Pointer(pWal)).FreadLock = int16(-int32(1))
}
}
// C documentation
//
// /*
// ** Search the wal file for page pgno. If found, set *piRead to the frame that
// ** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
// ** to zero.
// **
// ** Return SQLITE_OK if successful, or an error code if an error occurs. If an
// ** error does occur, the final value of *piRead is undefined.
// */
func _walFindFrame(tls *libc.TLS, pWal uintptr, pgno TPgno, piRead uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iFrame, iH, iLast, iRead, v2 Tu32
var iHash, iKey, iMinHash, nCollide, rc, v3 int32
var _ /* sLoc at bp+0 */ TWalHashLoc
_, _, _, _, _, _, _, _, _, _, _ = iFrame, iH, iHash, iKey, iLast, iMinHash, iRead, nCollide, rc, v2, v3
iRead = uint32(0) /* If !=0, WAL frame to return data from */
iLast = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
/* This routine is only be called from within a read transaction. */
/* If the "last page" field of the wal-index header snapshot is 0, then
** no data will be read from the wal under any circumstances. Return early
** in this case as an optimization. Likewise, if pWal->readLock==0,
** then the WAL is ignored by the reader so return early, as if the
** WAL were empty.
*/
if iLast == uint32(0) || int32((*TWal)(unsafe.Pointer(pWal)).FreadLock) == 0 && int32((*TWal)(unsafe.Pointer(pWal)).FbShmUnreliable) == 0 {
*(*Tu32)(unsafe.Pointer(piRead)) = uint32(0)
return SQLITE_OK
}
/* Search the hash table or tables for an entry matching page number
** pgno. Each iteration of the following for() loop searches one
** hash table (each hash table indexes up to HASHTABLE_NPAGE frames).
**
** This code might run concurrently to the code in walIndexAppend()
** that adds entries to the wal-index (and possibly to this hash
** table). This means the value just read from the hash
** slot (aHash[iKey]) may have been added before or after the
** current read transaction was opened. Values added after the
** read transaction was opened may have been written incorrectly -
** i.e. these slots may contain garbage data. However, we assume
** that any slots written before the current read transaction was
** opened remain unmodified.
**
** For the reasons above, the if(...) condition featured in the inner
** loop of the following block is more stringent that would be required
** if we had exclusive access to the hash-table:
**
** (aPgno[iFrame]==pgno):
** This condition filters out normal hash-table collisions.
**
** (iFrame<=iLast):
** This condition filters out entries that were added to the hash
** table after the current read-transaction had started.
*/
iMinHash = _walFramePage(tls, (*TWal)(unsafe.Pointer(pWal)).FminFrame)
iHash = _walFramePage(tls, iLast)
for {
if !(iHash >= iMinHash) {
break
}
rc = _walHashGet(tls, pWal, iHash, bp)
if rc != SQLITE_OK {
return rc
}
nCollide = libc.Int32FromInt32(HASHTABLE_NPAGE) * libc.Int32FromInt32(2)
iKey = _walHash(tls, pgno)
for {
v2 = uint32(*(*Tht_slot)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaHash + uintptr(iKey)*2)))
iH = v2
if !(v2 != uint32(0)) {
break
}
iFrame = iH + (*(*TWalHashLoc)(unsafe.Pointer(bp))).FiZero
if iFrame <= iLast && iFrame >= (*TWal)(unsafe.Pointer(pWal)).FminFrame && *(*Tu32)(unsafe.Pointer((*(*TWalHashLoc)(unsafe.Pointer(bp))).FaPgno + uintptr(iH-uint32(1))*4)) == pgno {
iRead = iFrame
}
v3 = nCollide
nCollide--
if v3 == 0 {
*(*Tu32)(unsafe.Pointer(piRead)) = uint32(0)
return _sqlite3CorruptError(tls, int32(68129))
}
iKey = _walNextHash(tls, iKey)
}
if iRead != 0 {
break
}
goto _1
_1:
;
iHash--
}
*(*Tu32)(unsafe.Pointer(piRead)) = iRead
return SQLITE_OK
}
// C documentation
//
// /*
// ** Search the wal file for page pgno. If found, set *piRead to the frame that
// ** contains the page. Otherwise, if pgno is not in the wal file, set *piRead
// ** to zero.
// **
// ** Return SQLITE_OK if successful, or an error code if an error occurs. If an
// ** error does occur, the final value of *piRead is undefined.
// **
// ** The difference between this function and walFindFrame() is that this
// ** function wraps walFindFrame() in an SEH_TRY{...} block.
// */
func _sqlite3WalFindFrame(tls *libc.TLS, pWal uintptr, pgno TPgno, piRead uintptr) (r int32) {
var rc int32
_ = rc
rc = _walFindFrame(tls, pWal, pgno, piRead)
return rc
}
// C documentation
//
// /*
// ** Read the contents of frame iRead from the wal file into buffer pOut
// ** (which is nOut bytes in size). Return SQLITE_OK if successful, or an
// ** error code otherwise.
// */
func _sqlite3WalReadFrame(tls *libc.TLS, pWal uintptr, iRead Tu32, nOut int32, pOut uintptr) (r int32) {
var iOffset Ti64
var sz, v1 int32
_, _, _ = iOffset, sz, v1
sz = int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage)
sz = sz&int32(0xfe00) + sz&int32(0x0001)< sz {
v1 = sz
} else {
v1 = nOut
}
return _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, pOut, v1, iOffset)
}
// C documentation
//
// /*
// ** Return the size of the database in pages (or zero, if unknown).
// */
func _sqlite3WalDbsize(tls *libc.TLS, pWal uintptr) (r TPgno) {
if pWal != 0 && int32((*TWal)(unsafe.Pointer(pWal)).FreadLock) >= 0 {
return (*TWal)(unsafe.Pointer(pWal)).Fhdr.FnPage
}
return uint32(0)
}
// C documentation
//
// /*
// ** This function starts a write transaction on the WAL.
// **
// ** A read transaction must have already been started by a prior call
// ** to sqlite3WalBeginReadTransaction().
// **
// ** If another thread or process has written into the database since
// ** the read transaction was started, then it is not possible for this
// ** thread to write as doing so would cause a fork. So this routine
// ** returns SQLITE_BUSY in that case and no write transaction is started.
// **
// ** There can only be a single writer active at a time.
// */
func _sqlite3WalBeginWriteTransaction(tls *libc.TLS, pWal uintptr) (r int32) {
var rc int32
_ = rc
/* Cannot start a write transaction without first holding a read
** transaction. */
if (*TWal)(unsafe.Pointer(pWal)).FreadOnly != 0 {
return int32(SQLITE_READONLY)
}
/* Only one writer allowed at a time. Get the write lock. Return
** SQLITE_BUSY if unable.
*/
rc = _walLockExclusive(tls, pWal, WAL_WRITE_LOCK, int32(1))
if rc != 0 {
return rc
}
(*TWal)(unsafe.Pointer(pWal)).FwriteLock = uint8(1)
/* If another connection has written to the database file since the
** time the read transaction on this connection was started, then
** the write is disallowed.
*/
if libc.Xmemcmp(tls, pWal+72, _walIndexHdr(tls, pWal), uint64(48)) != 0 {
rc = libc.Int32FromInt32(SQLITE_BUSY) | libc.Int32FromInt32(2)<hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left
// ** unchanged.
// **
// ** SQLITE_OK is returned if no error is encountered (regardless of whether
// ** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned
// ** if an error occurs.
// */
func _walRestartLog(tls *libc.TLS, pWal uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pInfo uintptr
var rc int32
var _ /* cnt at bp+0 */ int32
var _ /* notUsed at bp+8 */ int32
var _ /* salt1 at bp+4 */ Tu32
_, _ = pInfo, rc
rc = SQLITE_OK
if int32((*TWal)(unsafe.Pointer(pWal)).FreadLock) == 0 {
pInfo = _walCkptInfo(tls, pWal)
if (*TWalCkptInfo)(unsafe.Pointer(pInfo)).FnBackfill > uint32(0) {
Xsqlite3_randomness(tls, int32(4), bp+4)
rc = _walLockExclusive(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(1), libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)-libc.Int32FromInt32(1))
if rc == SQLITE_OK {
/* If all readers are using WAL_READ_LOCK(0) (in other words if no
** readers are currently using the WAL), then the transactions
** frames will overwrite the start of the existing log. Update the
** wal-index header to reflect this.
**
** In theory it would be Ok to update the cache of the header only
** at this point. But updating the actual wal-index header is also
** safe and means there is no special case for sqlite3WalUndo()
** to handle if this transaction is rolled back. */
_walRestartHdr(tls, pWal, *(*Tu32)(unsafe.Pointer(bp + 4)))
_walUnlockExclusive(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(1), libc.Int32FromInt32(SQLITE_SHM_NLOCK)-libc.Int32FromInt32(3)-libc.Int32FromInt32(1))
} else {
if rc != int32(SQLITE_BUSY) {
return rc
}
}
}
_walUnlockShared(tls, pWal, libc.Int32FromInt32(3)+libc.Int32FromInt32(0))
(*TWal)(unsafe.Pointer(pWal)).FreadLock = int16(-int32(1))
*(*int32)(unsafe.Pointer(bp)) = 0
for cond := true; cond; cond = rc == -int32(1) {
rc = _walTryBeginRead(tls, pWal, bp+8, int32(1), bp)
}
/* BUSY not possible when useWal==1 */
}
return rc
}
// C documentation
//
// /*
// ** Information about the current state of the WAL file and where
// ** the next fsync should occur - passed from sqlite3WalFrames() into
// ** walWriteToLog().
// */
type TWalWriter = struct {
FpWal uintptr
FpFd uintptr
FiSyncPoint Tsqlite3_int64
FsyncFlags int32
FszPage int32
}
type WalWriter = TWalWriter
// C documentation
//
// /*
// ** Write iAmt bytes of content into the WAL file beginning at iOffset.
// ** Do a sync when crossing the p->iSyncPoint boundary.
// **
// ** In other words, if iSyncPoint is in between iOffset and iOffset+iAmt,
// ** first write the part before iSyncPoint, then sync, then write the
// ** rest.
// */
func _walWriteToLog(tls *libc.TLS, p uintptr, pContent uintptr, iAmt int32, iOffset Tsqlite3_int64) (r int32) {
var iFirstAmt, rc int32
_, _ = iFirstAmt, rc
if iOffset < (*TWalWriter)(unsafe.Pointer(p)).FiSyncPoint && iOffset+int64(iAmt) >= (*TWalWriter)(unsafe.Pointer(p)).FiSyncPoint {
iFirstAmt = int32((*TWalWriter)(unsafe.Pointer(p)).FiSyncPoint - iOffset)
rc = _sqlite3OsWrite(tls, (*TWalWriter)(unsafe.Pointer(p)).FpFd, pContent, iFirstAmt, iOffset)
if rc != 0 {
return rc
}
iOffset += int64(iFirstAmt)
iAmt -= iFirstAmt
pContent = uintptr(iFirstAmt) + pContent
rc = _sqlite3OsSync(tls, (*TWalWriter)(unsafe.Pointer(p)).FpFd, (*TWalWriter)(unsafe.Pointer(p)).FsyncFlags&int32(0x03))
if iAmt == 0 || rc != 0 {
return rc
}
}
rc = _sqlite3OsWrite(tls, (*TWalWriter)(unsafe.Pointer(p)).FpFd, pContent, iAmt, iOffset)
return rc
}
// C documentation
//
// /*
// ** Write out a single frame of the WAL
// */
func _walWriteOneFrame(tls *libc.TLS, p uintptr, pPage uintptr, nTruncate int32, iOffset Tsqlite3_int64) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var pData uintptr
var rc int32
var _ /* aFrame at bp+0 */ [24]Tu8
_, _ = pData, rc /* Buffer to assemble frame-header in */
pData = (*TPgHdr)(unsafe.Pointer(pPage)).FpData
_walEncodeFrame(tls, (*TWalWriter)(unsafe.Pointer(p)).FpWal, (*TPgHdr)(unsafe.Pointer(pPage)).Fpgno, uint32(nTruncate), pData, bp)
rc = _walWriteToLog(tls, p, bp, int32(24), iOffset)
if rc != 0 {
return rc
}
/* Write the page data */
rc = _walWriteToLog(tls, p, pData, (*TWalWriter)(unsafe.Pointer(p)).FszPage, int64(uint64(iOffset)+uint64(24)))
return rc
}
// C documentation
//
// /*
// ** This function is called as part of committing a transaction within which
// ** one or more frames have been overwritten. It updates the checksums for
// ** all frames written to the wal file by the current transaction starting
// ** with the earliest to have been overwritten.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _walRewriteChecksums(tls *libc.TLS, pWal uintptr, iLast Tu32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var aBuf uintptr
var iCksumOff, iOff Ti64
var iPgno, iRead, nDbSize Tu32
var rc, szPage int32
var _ /* aFrame at bp+0 */ [24]Tu8
_, _, _, _, _, _, _, _ = aBuf, iCksumOff, iOff, iPgno, iRead, nDbSize, rc, szPage
szPage = int32((*TWal)(unsafe.Pointer(pWal)).FszPage) /* Database page size */
rc = SQLITE_OK
aBuf = Xsqlite3_malloc(tls, szPage+int32(WAL_FRAME_HDRSIZE))
if aBuf == uintptr(0) {
return int32(SQLITE_NOMEM)
}
/* Find the checksum values to use as input for the recalculating the
** first checksum. If the first frame is frame 1 (implying that the current
** transaction restarted the wal file), these values must be read from the
** wal-file header. Otherwise, read them from the frame header of the
** previous frame. */
if (*TWal)(unsafe.Pointer(pWal)).FiReCksum == uint32(1) {
iCksumOff = int64(24)
} else {
iCksumOff = int64(WAL_HDRSIZE) + int64((*TWal)(unsafe.Pointer(pWal)).FiReCksum-libc.Uint32FromInt32(1)-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) + int64(16)
}
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, aBuf, int32(libc.Uint64FromInt64(4)*libc.Uint64FromInt32(2)), iCksumOff)
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24)) = _sqlite3Get4byte(tls, aBuf)
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4)) = _sqlite3Get4byte(tls, aBuf+uintptr(4))
iRead = (*TWal)(unsafe.Pointer(pWal)).FiReCksum
(*TWal)(unsafe.Pointer(pWal)).FiReCksum = uint32(0)
for {
if !(rc == SQLITE_OK && iRead <= iLast) {
break
}
iOff = libc.Int64FromInt32(WAL_HDRSIZE) + int64(iRead-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE))
rc = _sqlite3OsRead(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, aBuf, szPage+int32(WAL_FRAME_HDRSIZE), iOff)
if rc == SQLITE_OK {
iPgno = _sqlite3Get4byte(tls, aBuf)
nDbSize = _sqlite3Get4byte(tls, aBuf+4)
_walEncodeFrame(tls, pWal, iPgno, nDbSize, aBuf+24, bp)
rc = _sqlite3OsWrite(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp, int32(24), iOff)
}
goto _1
_1:
;
iRead++
}
Xsqlite3_free(tls, aBuf)
return rc
}
// C documentation
//
// /*
// ** Write a set of frames to the log. The caller must hold the write-lock
// ** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
// */
func _walFrames(tls *libc.TLS, pWal uintptr, szPage int32, pList uintptr, nTruncate TPgno, isCommit int32, sync_flags int32) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var bSync, nDbSize, nExtra, rc, sectorSize, szFrame, v1 int32
var iFirst, iFrame Tu32
var iOff, iOffset, sz Ti64
var p, pData, pLast, pLive, p3, p5 uintptr
var v4 uint32
var _ /* aCksum at bp+64 */ [2]Tu32
var _ /* aWalHdr at bp+32 */ [32]Tu8
var _ /* iWrite at bp+72 */ Tu32
var _ /* w at bp+0 */ TWalWriter
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = bSync, iFirst, iFrame, iOff, iOffset, nDbSize, nExtra, p, pData, pLast, pLive, rc, sectorSize, sz, szFrame, v1, v4, p3, p5 /* Iterator to run through pList with. */
pLast = uintptr(0) /* Last frame in list */
nExtra = 0 /* The writer */
iFirst = uint32(0) /* Pointer to shared header */
/* If this frame set completes a transaction, then nTruncate>0. If
** nTruncate==0 then this frame set does not complete the transaction. */
pLive = _walIndexHdr(tls, pWal)
if libc.Xmemcmp(tls, pWal+72, pLive, uint64(48)) != 0 {
iFirst = (*TWalIndexHdr)(unsafe.Pointer(pLive)).FmxFrame + uint32(1)
}
/* See if it is possible to write these frames into the start of the
** log file, instead of appending to it at pWal->hdr.mxFrame.
*/
v1 = _walRestartLog(tls, pWal)
rc = v1
if SQLITE_OK != v1 {
return rc
}
/* If this is the first frame written into the log, write the WAL
** header to the start of the WAL file. See comments at the top of
** this source file for a description of the WAL header format.
*/
iFrame = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
if iFrame == uint32(0) { /* Checksum for wal-header */
_sqlite3Put4byte(tls, bp+32, uint32(libc.Int32FromInt32(WAL_MAGIC)|libc.Int32FromInt32(SQLITE_BIGENDIAN)))
_sqlite3Put4byte(tls, bp+32+4, uint32(WAL_MAX_VERSION))
_sqlite3Put4byte(tls, bp+32+8, uint32(szPage))
_sqlite3Put4byte(tls, bp+32+12, (*TWal)(unsafe.Pointer(pWal)).FnCkpt)
if (*TWal)(unsafe.Pointer(pWal)).FnCkpt == uint32(0) {
Xsqlite3_randomness(tls, int32(8), pWal+72+32)
}
libc.Xmemcpy(tls, bp+32+16, pWal+72+32, uint64(8))
_walChecksumBytes(tls, int32(1), bp+32, libc.Int32FromInt32(WAL_HDRSIZE)-libc.Int32FromInt32(2)*libc.Int32FromInt32(4), uintptr(0), bp+64)
_sqlite3Put4byte(tls, bp+32+24, (*(*[2]Tu32)(unsafe.Pointer(bp + 64)))[0])
_sqlite3Put4byte(tls, bp+32+28, (*(*[2]Tu32)(unsafe.Pointer(bp + 64)))[int32(1)])
(*TWal)(unsafe.Pointer(pWal)).FszPage = uint32(szPage)
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FbigEndCksum = uint8(SQLITE_BIGENDIAN)
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24)) = (*(*[2]Tu32)(unsafe.Pointer(bp + 64)))[0]
*(*Tu32)(unsafe.Pointer(pWal + 72 + 24 + 1*4)) = (*(*[2]Tu32)(unsafe.Pointer(bp + 64)))[int32(1)]
(*TWal)(unsafe.Pointer(pWal)).FtruncateOnCommit = uint8(1)
rc = _sqlite3OsWrite(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, bp+32, int32(32), 0)
if rc != SQLITE_OK {
return rc
}
/* Sync the header (unless SQLITE_IOCAP_SEQUENTIAL is true or unless
** all syncing is turned off by PRAGMA synchronous=OFF). Otherwise
** an out-of-order write following a WAL restart could result in
** database corruption. See the ticket:
**
** https://sqlite.org/src/info/ff5be73dee
*/
if (*TWal)(unsafe.Pointer(pWal)).FsyncHeader != 0 {
rc = _sqlite3OsSync(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, sync_flags>>int32(2)&int32(0x03))
if rc != 0 {
return rc
}
}
}
if int32((*TWal)(unsafe.Pointer(pWal)).FszPage) != szPage {
return _sqlite3CorruptError(tls, int32(68652)) /* TH3 test case: cov1/corrupt155.test */
}
/* Setup information needed to write frames into the WAL */
(*(*TWalWriter)(unsafe.Pointer(bp))).FpWal = pWal
(*(*TWalWriter)(unsafe.Pointer(bp))).FpFd = (*TWal)(unsafe.Pointer(pWal)).FpWalFd
(*(*TWalWriter)(unsafe.Pointer(bp))).FiSyncPoint = 0
(*(*TWalWriter)(unsafe.Pointer(bp))).FsyncFlags = sync_flags
(*(*TWalWriter)(unsafe.Pointer(bp))).FszPage = szPage
iOffset = libc.Int64FromInt32(WAL_HDRSIZE) + int64(iFrame+libc.Uint32FromInt32(1)-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE))
szFrame = szPage + int32(WAL_FRAME_HDRSIZE)
/* Write all frames into the log file exactly once */
p = pList
for {
if !(p != 0) {
break
} /* 0 normally. Positive == commit flag */
/* Check if this page has already been written into the wal file by
** the current transaction. If so, overwrite the existing frame and
** set Wal.writeLock to WAL_WRITELOCK_RECKSUM - indicating that
** checksums must be recomputed when the transaction is committed. */
if iFirst != 0 && ((*TPgHdr)(unsafe.Pointer(p)).FpDirty != 0 || isCommit == 0) {
*(*Tu32)(unsafe.Pointer(bp + 72)) = uint32(0)
_walFindFrame(tls, pWal, (*TPgHdr)(unsafe.Pointer(p)).Fpgno, bp+72)
if *(*Tu32)(unsafe.Pointer(bp + 72)) >= iFirst {
iOff = int64(WAL_HDRSIZE) + int64(*(*Tu32)(unsafe.Pointer(bp + 72))-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) + int64(WAL_FRAME_HDRSIZE)
if (*TWal)(unsafe.Pointer(pWal)).FiReCksum == uint32(0) || *(*Tu32)(unsafe.Pointer(bp + 72)) < (*TWal)(unsafe.Pointer(pWal)).FiReCksum {
(*TWal)(unsafe.Pointer(pWal)).FiReCksum = *(*Tu32)(unsafe.Pointer(bp + 72))
}
pData = (*TPgHdr)(unsafe.Pointer(p)).FpData
rc = _sqlite3OsWrite(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd, pData, szPage, iOff)
if rc != 0 {
return rc
}
p3 = p + 52
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(PGHDR_WAL_APPEND))
goto _2
}
}
iFrame++
if isCommit != 0 && (*TPgHdr)(unsafe.Pointer(p)).FpDirty == uintptr(0) {
v4 = nTruncate
} else {
v4 = uint32(0)
}
nDbSize = int32(v4)
rc = _walWriteOneFrame(tls, bp, p, nDbSize, iOffset)
if rc != 0 {
return rc
}
pLast = p
iOffset += int64(szFrame)
p5 = p + 52
*(*Tu16)(unsafe.Pointer(p5)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p5))) | libc.Int32FromInt32(PGHDR_WAL_APPEND))
goto _2
_2:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
}
/* Recalculate checksums within the wal file if required. */
if isCommit != 0 && (*TWal)(unsafe.Pointer(pWal)).FiReCksum != 0 {
rc = _walRewriteChecksums(tls, pWal, iFrame)
if rc != 0 {
return rc
}
}
/* If this is the end of a transaction, then we might need to pad
** the transaction and/or sync the WAL file.
**
** Padding and syncing only occur if this set of frames complete a
** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL
** or synchronous==OFF, then no padding or syncing are needed.
**
** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not
** needed and only the sync is done. If padding is needed, then the
** final frame is repeated (with its commit mark) until the next sector
** boundary is crossed. Only the part of the WAL prior to the last
** sector boundary is synced; the part of the last frame that extends
** past the sector boundary is written after the sync.
*/
if isCommit != 0 && sync_flags&int32(0x03) != 0 {
bSync = int32(1)
if (*TWal)(unsafe.Pointer(pWal)).FpadToSectorBoundary != 0 {
sectorSize = _sqlite3SectorSize(tls, (*TWal)(unsafe.Pointer(pWal)).FpWalFd)
(*(*TWalWriter)(unsafe.Pointer(bp))).FiSyncPoint = (iOffset + int64(sectorSize) - int64(1)) / int64(sectorSize) * int64(sectorSize)
bSync = libc.BoolInt32((*(*TWalWriter)(unsafe.Pointer(bp))).FiSyncPoint == iOffset)
for iOffset < (*(*TWalWriter)(unsafe.Pointer(bp))).FiSyncPoint {
rc = _walWriteOneFrame(tls, bp, pLast, int32(nTruncate), iOffset)
if rc != 0 {
return rc
}
iOffset += int64(szFrame)
nExtra++
}
}
if bSync != 0 {
rc = _sqlite3OsSync(tls, (*(*TWalWriter)(unsafe.Pointer(bp))).FpFd, sync_flags&int32(0x03))
}
}
/* If this frame set completes the first transaction in the WAL and
** if PRAGMA journal_size_limit is set, then truncate the WAL to the
** journal size limit, if possible.
*/
if isCommit != 0 && (*TWal)(unsafe.Pointer(pWal)).FtruncateOnCommit != 0 && (*TWal)(unsafe.Pointer(pWal)).FmxWalSize >= 0 {
sz = (*TWal)(unsafe.Pointer(pWal)).FmxWalSize
if int64(WAL_HDRSIZE)+int64(iFrame+uint32(nExtra)+libc.Uint32FromInt32(1)-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE)) > (*TWal)(unsafe.Pointer(pWal)).FmxWalSize {
sz = libc.Int64FromInt32(WAL_HDRSIZE) + int64(iFrame+uint32(nExtra)+libc.Uint32FromInt32(1)-libc.Uint32FromInt32(1))*int64(szPage+libc.Int32FromInt32(WAL_FRAME_HDRSIZE))
}
_walLimitSize(tls, pWal, sz)
(*TWal)(unsafe.Pointer(pWal)).FtruncateOnCommit = uint8(0)
}
/* Append data to the wal-index. It is not necessary to lock the
** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index
** guarantees that there are no other writers, and no data that may
** be in use by existing readers is being overwritten.
*/
iFrame = (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame
p = pList
for {
if !(p != 0 && rc == SQLITE_OK) {
break
}
if int32((*TPgHdr)(unsafe.Pointer(p)).Fflags)&int32(PGHDR_WAL_APPEND) == 0 {
goto _6
}
iFrame++
rc = _walIndexAppend(tls, pWal, iFrame, (*TPgHdr)(unsafe.Pointer(p)).Fpgno)
goto _6
_6:
;
p = (*TPgHdr)(unsafe.Pointer(p)).FpDirty
}
for rc == SQLITE_OK && nExtra > 0 {
iFrame++
nExtra--
rc = _walIndexAppend(tls, pWal, iFrame, (*TPgHdr)(unsafe.Pointer(pLast)).Fpgno)
}
if rc == SQLITE_OK {
/* Update the private copy of the header. */
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FszPage = uint16(szPage&libc.Int32FromInt32(0xff00) | szPage>>libc.Int32FromInt32(16))
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame = iFrame
if isCommit != 0 {
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FiChange++
(*TWal)(unsafe.Pointer(pWal)).Fhdr.FnPage = nTruncate
}
/* If this is a commit, update the wal-index header too. */
if isCommit != 0 {
_walIndexWriteHdr(tls, pWal)
(*TWal)(unsafe.Pointer(pWal)).FiCallback = iFrame
}
}
return rc
}
// C documentation
//
// /*
// ** Write a set of frames to the log. The caller must hold the write-lock
// ** on the log file (obtained using sqlite3WalBeginWriteTransaction()).
// **
// ** The difference between this function and walFrames() is that this
// ** function wraps walFrames() in an SEH_TRY{...} block.
// */
func _sqlite3WalFrames(tls *libc.TLS, pWal uintptr, szPage int32, pList uintptr, nTruncate TPgno, isCommit int32, sync_flags int32) (r int32) {
var rc int32
_ = rc
rc = _walFrames(tls, pWal, szPage, pList, nTruncate, isCommit, sync_flags)
return rc
}
// C documentation
//
// /*
// ** This routine is called to implement sqlite3_wal_checkpoint() and
// ** related interfaces.
// **
// ** Obtain a CHECKPOINT lock and then backfill as much information as
// ** we can from WAL into the database.
// **
// ** If parameter xBusy is not NULL, it is a pointer to a busy-handler
// ** callback. In this case this function runs a blocking checkpoint.
// */
func _sqlite3WalCheckpoint(tls *libc.TLS, pWal uintptr, db uintptr, eMode int32, xBusy uintptr, pBusyArg uintptr, sync_flags int32, nBuf int32, zBuf uintptr, pnLog uintptr, pnCkpt uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var eMode2, rc, v1 int32
var xBusy2 uintptr
var _ /* isChanged at bp+0 */ int32
_, _, _, _ = eMode2, rc, xBusy2, v1 /* Return code */
*(*int32)(unsafe.Pointer(bp)) = 0 /* True if a new wal-index header is loaded */
eMode2 = eMode /* Mode to pass to walCheckpoint() */
xBusy2 = xBusy /* Busy handler for eMode2 */
/* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked
** in the SQLITE_CHECKPOINT_PASSIVE mode. */
if (*TWal)(unsafe.Pointer(pWal)).FreadOnly != 0 {
return int32(SQLITE_READONLY)
}
/* Enable blocking locks, if possible. */
if xBusy2 != 0 {
}
/* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive
** "checkpoint" lock on the database file.
** EVIDENCE-OF: R-10421-19736 If any other process is running a
** checkpoint operation at the same time, the lock cannot be obtained and
** SQLITE_BUSY is returned.
** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured,
** it will not be invoked in this case.
*/
rc = _walLockExclusive(tls, pWal, int32(WAL_CKPT_LOCK), int32(1))
if rc == SQLITE_OK {
(*TWal)(unsafe.Pointer(pWal)).FckptLock = uint8(1)
/* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and
** TRUNCATE modes also obtain the exclusive "writer" lock on the database
** file.
**
** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained
** immediately, and a busy-handler is configured, it is invoked and the
** writer lock retried until either the busy-handler returns 0 or the
** lock is successfully obtained.
*/
if eMode != SQLITE_CHECKPOINT_PASSIVE {
rc = _walBusyLock(tls, pWal, xBusy2, pBusyArg, WAL_WRITE_LOCK, int32(1))
if rc == SQLITE_OK {
(*TWal)(unsafe.Pointer(pWal)).FwriteLock = uint8(1)
} else {
if rc == int32(SQLITE_BUSY) {
eMode2 = SQLITE_CHECKPOINT_PASSIVE
xBusy2 = uintptr(0)
rc = SQLITE_OK
}
}
}
}
/* Read the wal-index header. */
if rc == SQLITE_OK {
/* For a passive checkpoint, do not re-enable blocking locks after
** reading the wal-index header. A passive checkpoint should not block
** or invoke the busy handler. The only lock such a checkpoint may
** attempt to obtain is a lock on a read-slot, and it should give up
** immediately and do a partial checkpoint if it cannot obtain it. */
rc = _walIndexReadHdr(tls, pWal, bp)
if eMode2 != SQLITE_CHECKPOINT_PASSIVE {
}
if *(*int32)(unsafe.Pointer(bp)) != 0 && (*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer((*TWal)(unsafe.Pointer(pWal)).FpDbFd)).FpMethods)).FiVersion >= int32(3) {
_sqlite3OsUnfetch(tls, (*TWal)(unsafe.Pointer(pWal)).FpDbFd, 0, uintptr(0))
}
}
/* Copy data from the log to the database file. */
if rc == SQLITE_OK {
if (*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame != 0 && _walPagesize(tls, pWal) != nBuf {
rc = _sqlite3CorruptError(tls, int32(68912))
} else {
rc = _walCheckpoint(tls, pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf)
}
/* If no error occurred, set the output variables. */
if rc == SQLITE_OK || rc == int32(SQLITE_BUSY) {
if pnLog != 0 {
*(*int32)(unsafe.Pointer(pnLog)) = int32((*TWal)(unsafe.Pointer(pWal)).Fhdr.FmxFrame)
}
if pnCkpt != 0 {
*(*int32)(unsafe.Pointer(pnCkpt)) = int32((*TWalCkptInfo)(unsafe.Pointer(_walCkptInfo(tls, pWal))).FnBackfill)
}
}
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
/* If a new wal-index header was loaded before the checkpoint was
** performed, then the pager-cache associated with pWal is now
** out of date. So zero the cached wal-index header to ensure that
** next time the pager opens a snapshot on this database it knows that
** the cache needs to be reset.
*/
libc.Xmemset(tls, pWal+72, 0, uint64(48))
}
/* Release the locks. */
_sqlite3WalEndWriteTransaction(tls, pWal)
if (*TWal)(unsafe.Pointer(pWal)).FckptLock != 0 {
_walUnlockExclusive(tls, pWal, int32(WAL_CKPT_LOCK), int32(1))
(*TWal)(unsafe.Pointer(pWal)).FckptLock = uint8(0)
}
if rc == SQLITE_OK && eMode != eMode2 {
v1 = int32(SQLITE_BUSY)
} else {
v1 = rc
}
return v1
}
// C documentation
//
// /* Return the value to pass to a sqlite3_wal_hook callback, the
// ** number of frames in the WAL at the point of the last commit since
// ** sqlite3WalCallback() was called. If no commits have occurred since
// ** the last call, then return 0.
// */
func _sqlite3WalCallback(tls *libc.TLS, pWal uintptr) (r int32) {
var ret Tu32
_ = ret
ret = uint32(0)
if pWal != 0 {
ret = (*TWal)(unsafe.Pointer(pWal)).FiCallback
(*TWal)(unsafe.Pointer(pWal)).FiCallback = uint32(0)
}
return int32(ret)
}
// C documentation
//
// /*
// ** This function is called to change the WAL subsystem into or out
// ** of locking_mode=EXCLUSIVE.
// **
// ** If op is zero, then attempt to change from locking_mode=EXCLUSIVE
// ** into locking_mode=NORMAL. This means that we must acquire a lock
// ** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL
// ** or if the acquisition of the lock fails, then return 0. If the
// ** transition out of exclusive-mode is successful, return 1. This
// ** operation must occur while the pager is still holding the exclusive
// ** lock on the main database file.
// **
// ** If op is one, then change from locking_mode=NORMAL into
// ** locking_mode=EXCLUSIVE. This means that the pWal->readLock must
// ** be released. Return 1 if the transition is made and 0 if the
// ** WAL is already in exclusive-locking mode - meaning that this
// ** routine is a no-op. The pager must already hold the exclusive lock
// ** on the main database file before invoking this operation.
// **
// ** If op is negative, then do a dry-run of the op==1 case but do
// ** not actually change anything. The pager uses this to see if it
// ** should acquire the database exclusive lock prior to invoking
// ** the op==1 case.
// */
func _sqlite3WalExclusiveMode(tls *libc.TLS, pWal uintptr, op int32) (r int32) {
var rc int32
_ = rc
/* pWal->readLock is usually set, but might be -1 if there was a
** prior error while attempting to acquire are read-lock. This cannot
** happen if the connection is actually in exclusive mode (as no xShmLock
** locks are taken in this case). Nor should the pager attempt to
** upgrade to exclusive-mode following such an error.
*/
if op == 0 {
if int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) != WAL_NORMAL_MODE {
(*TWal)(unsafe.Pointer(pWal)).FexclusiveMode = uint8(WAL_NORMAL_MODE)
if _walLockShared(tls, pWal, int32(3)+int32((*TWal)(unsafe.Pointer(pWal)).FreadLock)) != SQLITE_OK {
(*TWal)(unsafe.Pointer(pWal)).FexclusiveMode = uint8(WAL_EXCLUSIVE_MODE)
}
rc = libc.BoolInt32(int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) == WAL_NORMAL_MODE)
} else {
/* Already in locking_mode=NORMAL */
rc = 0
}
} else {
if op > 0 {
_walUnlockShared(tls, pWal, int32(3)+int32((*TWal)(unsafe.Pointer(pWal)).FreadLock))
(*TWal)(unsafe.Pointer(pWal)).FexclusiveMode = uint8(WAL_EXCLUSIVE_MODE)
rc = int32(1)
} else {
rc = libc.BoolInt32(int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) == WAL_NORMAL_MODE)
}
}
return rc
}
// C documentation
//
// /*
// ** Return true if the argument is non-NULL and the WAL module is using
// ** heap-memory for the wal-index. Otherwise, if the argument is NULL or the
// ** WAL module is using shared-memory, return false.
// */
func _sqlite3WalHeapMemory(tls *libc.TLS, pWal uintptr) (r int32) {
return libc.BoolInt32(pWal != 0 && int32((*TWal)(unsafe.Pointer(pWal)).FexclusiveMode) == int32(WAL_HEAPMEMORY_MODE))
}
// C documentation
//
// /* Create a snapshot object. The content of a snapshot is opaque to
// ** every other subsystem, so the WAL module can put whatever it needs
// ** in the object.
// */
func _sqlite3WalSnapshotGet(tls *libc.TLS, pWal uintptr, ppSnapshot uintptr) (r int32) {
var pRet uintptr
var rc int32
_, _ = pRet, rc
rc = SQLITE_OK
if libc.Xmemcmp(tls, pWal+72+24, uintptr(unsafe.Pointer(&_aZero)), uint64(16)) == 0 {
*(*uintptr)(unsafe.Pointer(ppSnapshot)) = uintptr(0)
return int32(SQLITE_ERROR)
}
pRet = Xsqlite3_malloc(tls, int32(48))
if pRet == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
libc.Xmemcpy(tls, pRet, pWal+72, uint64(48))
*(*uintptr)(unsafe.Pointer(ppSnapshot)) = pRet
}
return rc
}
var _aZero = [4]Tu32{}
// C documentation
//
// /* Try to open on pSnapshot when the next read-transaction starts
// */
func _sqlite3WalSnapshotOpen(tls *libc.TLS, pWal uintptr, pSnapshot uintptr) {
(*TWal)(unsafe.Pointer(pWal)).FpSnapshot = pSnapshot
}
// C documentation
//
// /*
// ** Return a +ve value if snapshot p1 is newer than p2. A -ve value if
// ** p1 is older than p2 and zero if p1 and p2 are the same snapshot.
// */
func Xsqlite3_snapshot_cmp(tls *libc.TLS, p1 uintptr, p2 uintptr) (r int32) {
var pHdr1, pHdr2 uintptr
_, _ = pHdr1, pHdr2
pHdr1 = p1
pHdr2 = p2
/* aSalt[0] is a copy of the value stored in the wal file header. It
** is incremented each time the wal file is restarted. */
if *(*Tu32)(unsafe.Pointer(pHdr1 + 32)) < *(*Tu32)(unsafe.Pointer(pHdr2 + 32)) {
return -int32(1)
}
if *(*Tu32)(unsafe.Pointer(pHdr1 + 32)) > *(*Tu32)(unsafe.Pointer(pHdr2 + 32)) {
return +libc.Int32FromInt32(1)
}
if (*TWalIndexHdr)(unsafe.Pointer(pHdr1)).FmxFrame < (*TWalIndexHdr)(unsafe.Pointer(pHdr2)).FmxFrame {
return -int32(1)
}
if (*TWalIndexHdr)(unsafe.Pointer(pHdr1)).FmxFrame > (*TWalIndexHdr)(unsafe.Pointer(pHdr2)).FmxFrame {
return +libc.Int32FromInt32(1)
}
return 0
}
// C documentation
//
// /*
// ** The caller currently has a read transaction open on the database.
// ** This function takes a SHARED lock on the CHECKPOINTER slot and then
// ** checks if the snapshot passed as the second argument is still
// ** available. If so, SQLITE_OK is returned.
// **
// ** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
// ** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
// ** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
// ** lock is released before returning.
// */
func _sqlite3WalSnapshotCheck(tls *libc.TLS, pWal uintptr, pSnapshot uintptr) (r int32) {
var pNew uintptr
var rc int32
_, _ = pNew, rc
rc = _walLockShared(tls, pWal, int32(WAL_CKPT_LOCK))
if rc == SQLITE_OK {
pNew = pSnapshot
if libc.Xmemcmp(tls, pNew+32, pWal+72+32, uint64(8)) != 0 || (*TWalIndexHdr)(unsafe.Pointer(pNew)).FmxFrame < (*TWalCkptInfo)(unsafe.Pointer(_walCkptInfo(tls, pWal))).FnBackfillAttempted {
rc = libc.Int32FromInt32(SQLITE_ERROR) | libc.Int32FromInt32(3)<mutex.
*/
type TMemPage1 = struct {
FisInit Tu8
FintKey Tu8
FintKeyLeaf Tu8
Fpgno TPgno
Fleaf Tu8
FhdrOffset Tu8
FchildPtrSize Tu8
Fmax1bytePayload Tu8
FnOverflow Tu8
FmaxLocal Tu16
FminLocal Tu16
FcellOffset Tu16
FnFree int32
FnCell Tu16
FmaskPage Tu16
FaiOvfl [4]Tu16
FapOvfl [4]uintptr
FpBt uintptr
FaData uintptr
FaDataEnd uintptr
FaCellIdx uintptr
FaDataOfst uintptr
FpDbPage uintptr
FxCellSize uintptr
FxParseCell uintptr
}
type MemPage1 = TMemPage1
/*
** A linked list of the following structures is stored at BtShared.pLock.
** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor
** is opened on the table with root page BtShared.iTable. Locks are removed
** from this list when a transaction is committed or rolled back, or when
** a btree handle is closed.
*/
type TBtLock1 = struct {
FpBtree uintptr
FiTable TPgno
FeLock Tu8
FpNext uintptr
}
type BtLock1 = TBtLock1
/* Candidate values for BtLock.eLock */
/* A Btree handle
**
** A database connection contains a pointer to an instance of
** this object for every database file that it has open. This structure
** is opaque to the database connection. The database connection cannot
** see the internals of this structure and only deals with pointers to
** this structure.
**
** For some database files, the same underlying database cache might be
** shared between multiple connections. In that case, each connection
** has it own instance of this object. But each instance of this object
** points to the same BtShared object. The database cache and the
** schema associated with the database file are all contained within
** the BtShared object.
**
** All fields in this structure are accessed under sqlite3.mutex.
** The pBt pointer itself may not be changed while there exists cursors
** in the referenced BtShared that point back to this Btree since those
** cursors have to go through this Btree to find their BtShared and
** they often do so without holding sqlite3.mutex.
*/
type TBtree1 = struct {
Fdb uintptr
FpBt uintptr
FinTrans Tu8
Fsharable Tu8
Flocked Tu8
FhasIncrblobCur Tu8
FwantToLock int32
FnBackup int32
FiBDataVersion Tu32
FpNext uintptr
FpPrev uintptr
Flock TBtLock
}
type Btree1 = TBtree1
/*
** Btree.inTrans may take one of the following values.
**
** If the shared-data extension is enabled, there may be multiple users
** of the Btree structure. At most one of these may open a write transaction,
** but any number may have active read transactions.
**
** These values must match SQLITE_TXN_NONE, SQLITE_TXN_READ, and
** SQLITE_TXN_WRITE
*/
/*
** An instance of this object represents a single database file.
**
** A single database file can be in use at the same time by two
** or more database connections. When two or more connections are
** sharing the same database file, each connection has it own
** private Btree object for the file and each of those Btrees points
** to this one BtShared object. BtShared.nRef is the number of
** connections currently sharing this database file.
**
** Fields in this structure are accessed under the BtShared.mutex
** mutex, except for nRef and pNext which are accessed under the
** global SQLITE_MUTEX_STATIC_MAIN mutex. The pPager field
** may not be modified once it is initially set as long as nRef>0.
** The pSchema field may be set once under BtShared.mutex and
** thereafter is unchanged as long as nRef>0.
**
** isPending:
**
** If a BtShared client fails to obtain a write-lock on a database
** table (because there exists one or more read-locks on the table),
** the shared-cache enters 'pending-lock' state and isPending is
** set to true.
**
** The shared-cache leaves the 'pending lock' state when either of
** the following occur:
**
** 1) The current writer (BtShared.pWriter) concludes its transaction, OR
** 2) The number of locks held by other connections drops to zero.
**
** while in the 'pending-lock' state, no connection may start a new
** transaction.
**
** This feature is included to help prevent writer-starvation.
*/
type TBtShared1 = struct {
FpPager uintptr
Fdb uintptr
FpCursor uintptr
FpPage1 uintptr
FopenFlags Tu8
FautoVacuum Tu8
FincrVacuum Tu8
FbDoTruncate Tu8
FinTransaction Tu8
Fmax1bytePayload Tu8
FnReserveWanted Tu8
FbtsFlags Tu16
FmaxLocal Tu16
FminLocal Tu16
FmaxLeaf Tu16
FminLeaf Tu16
FpageSize Tu32
FusableSize Tu32
FnTransaction int32
FnPage Tu32
FpSchema uintptr
FxFreeSchema uintptr
Fmutex uintptr
FpHasContent uintptr
FnRef int32
FpNext uintptr
FpLock uintptr
FpWriter uintptr
FpTmpSpace uintptr
FnPreformatSize int32
}
type BtShared1 = TBtShared1
/*
** Allowed values for BtShared.btsFlags
*/
/*
** An instance of the following structure is used to hold information
** about a cell. The parseCellPtr() function fills in this structure
** based on information extract from the raw disk page.
*/
type TCellInfo1 = struct {
FnKey Ti64
FpPayload uintptr
FnPayload Tu32
FnLocal Tu16
FnSize Tu16
}
type CellInfo1 = TCellInfo1
/*
** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than
** this will be declared corrupt. This value is calculated based on a
** maximum database size of 2^31 pages a minimum fanout of 2 for a
** root-node and 3 for all other internal nodes.
**
** If a tree that appears to be taller than this is encountered, it is
** assumed that the database is corrupt.
*/
/*
** A cursor is a pointer to a particular entry within a particular
** b-tree within a database file.
**
** The entry is identified by its MemPage and the index in
** MemPage.aCell[] of the entry.
**
** A single database file can be shared by two more database connections,
** but cursors cannot be shared. Each cursor is associated with a
** particular database connection identified BtCursor.pBtree.db.
**
** Fields in this structure are accessed under the BtShared.mutex
** found at self->pBt->mutex.
**
** skipNext meaning:
** The meaning of skipNext depends on the value of eState:
**
** eState Meaning of skipNext
** VALID skipNext is meaningless and is ignored
** INVALID skipNext is meaningless and is ignored
** SKIPNEXT sqlite3BtreeNext() is a no-op if skipNext>0 and
** sqlite3BtreePrevious() is no-op if skipNext<0.
** REQUIRESEEK restoreCursorPosition() restores the cursor to
** eState=SKIPNEXT if skipNext!=0
** FAULT skipNext holds the cursor fault error code.
*/
type TBtCursor1 = struct {
FeState Tu8
FcurFlags Tu8
FcurPagerFlags Tu8
Fhints Tu8
FskipNext int32
FpBtree uintptr
FaOverflow uintptr
FpKey uintptr
FpBt uintptr
FpNext uintptr
Finfo TCellInfo
FnKey Ti64
FpgnoRoot TPgno
FiPage Ti8
FcurIntKey Tu8
Fix Tu16
FaiIdx [19]Tu16
FpKeyInfo uintptr
FpPage uintptr
FapPage [19]uintptr
}
type BtCursor1 = TBtCursor1
/*
** Legal values for BtCursor.curFlags
*/
/*
** Potential values for BtCursor.eState.
**
** CURSOR_INVALID:
** Cursor does not point to a valid entry. This can happen (for example)
** because the table is empty or because BtreeCursorFirst() has not been
** called.
**
** CURSOR_VALID:
** Cursor points to a valid entry. getPayload() etc. may be called.
**
** CURSOR_SKIPNEXT:
** Cursor is valid except that the Cursor.skipNext field is non-zero
** indicating that the next sqlite3BtreeNext() or sqlite3BtreePrevious()
** operation should be a no-op.
**
** CURSOR_REQUIRESEEK:
** The table that this cursor was opened on still exists, but has been
** modified since the cursor was last used. The cursor position is saved
** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in
** this state, restoreCursorPosition() can be called to attempt to
** seek the cursor to the saved position.
**
** CURSOR_FAULT:
** An unrecoverable error (an I/O error or a malloc failure) has occurred
** on a different connection that shares the BtShared cache with this
** cursor. The error has left the cache in an inconsistent state.
** Do nothing else with this cursor. Any attempt to use the cursor
** should return the error code stored in BtCursor.skipNext
*/
/*
** The database page the PENDING_BYTE occupies. This page is never used.
*/
/*
** These macros define the location of the pointer-map entry for a
** database page. The first argument to each is the number of usable
** bytes on each page of the database (often 1024). The second is the
** page number to look up in the pointer map.
**
** PTRMAP_PAGENO returns the database page number of the pointer-map
** page that stores the required pointer. PTRMAP_PTROFFSET returns
** the offset of the requested map entry.
**
** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page,
** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be
** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements
** this test.
*/
/*
** The pointer map is a lookup table that identifies the parent page for
** each child page in the database file. The parent page is the page that
** contains a pointer to the child. Every page in the database contains
** 0 or 1 parent pages. (In this context 'database page' refers
** to any page that is not part of the pointer map itself.) Each pointer map
** entry consists of a single byte 'type' and a 4 byte parent page number.
** The PTRMAP_XXX identifiers below are the valid types.
**
** The purpose of the pointer map is to facility moving pages from one
** position in the file to another as part of autovacuum. When a page
** is moved, the pointer in its parent must be updated to point to the
** new location. The pointer map is used to locate the parent page quickly.
**
** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not
** used in this case.
**
** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number
** is not used in this case.
**
** PTRMAP_OVERFLOW1: The database page is the first page in a list of
** overflow pages. The page number identifies the page that
** contains the cell with a pointer to this overflow page.
**
** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of
** overflow pages. The page-number identifies the previous
** page in the overflow page list.
**
** PTRMAP_BTREE: The database page is a non-root btree page. The page number
** identifies the parent page in the btree.
*/
/* A bunch of assert() statements to check the transaction state variables
** of handle p (type Btree*) are internally consistent.
*/
/*
** The ISAUTOVACUUM macro is used within balance_nonroot() to determine
** if the database supports auto-vacuum or not. Because it is used
** within an expression that is an argument to another macro
** (sqliteMallocRaw), it is not possible to use conditional compilation.
** So, this macro is defined instead.
*/
// C documentation
//
// /*
// ** This structure is passed around through all the PRAGMA integrity_check
// ** checking routines in order to keep track of some global state information.
// **
// ** The aRef[] array is allocated so that there is 1 bit for each page in
// ** the database. As the integrity-check proceeds, for each page used in
// ** the database the corresponding bit is set. This allows integrity-check to
// ** detect pages that are used twice and orphaned pages (both of which
// ** indicate corruption).
// */
type TIntegrityCk = struct {
FpBt uintptr
FpPager uintptr
FaPgRef uintptr
FnCkPage TPgno
FmxErr int32
FnErr int32
Frc int32
FnStep Tu32
FzPfx uintptr
Fv0 TPgno
Fv1 TPgno
Fv2 int32
FerrMsg TStrAccum
Fheap uintptr
Fdb uintptr
}
type IntegrityCk = TIntegrityCk
type TIntegrityCk1 = struct {
FpBt uintptr
FpPager uintptr
FaPgRef uintptr
FnCkPage TPgno
FmxErr int32
FnErr int32
Frc int32
FnStep Tu32
FzPfx uintptr
Fv0 TPgno
Fv1 TPgno
Fv2 int32
FerrMsg TStrAccum
Fheap uintptr
Fdb uintptr
}
type IntegrityCk1 = TIntegrityCk1
/*
** Routines to read or write a two- and four-byte big-endian integer values.
*/
/*
** get2byteAligned(), unlike get2byte(), requires that its argument point to a
** two-byte aligned address. get2byteAligned() is only used for accessing the
** cell addresses in a btree header.
*/
/************** End of btreeInt.h ********************************************/
/************** Continuing where we left off in btmutex.c ********************/
// C documentation
//
// /*
// ** Obtain the BtShared mutex associated with B-Tree handle p. Also,
// ** set BtShared.db to the database handle associated with p and the
// ** p->locked boolean to true.
// */
func _lockBtreeMutex(tls *libc.TLS, p uintptr) {
Xsqlite3_mutex_enter(tls, (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).Fmutex)
(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).Fdb = (*TBtree)(unsafe.Pointer(p)).Fdb
(*TBtree)(unsafe.Pointer(p)).Flocked = uint8(1)
}
// C documentation
//
// /*
// ** Release the BtShared mutex associated with B-Tree handle p and
// ** clear the p->locked boolean.
// */
func _unlockBtreeMutex(tls *libc.TLS, p uintptr) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
Xsqlite3_mutex_leave(tls, (*TBtShared)(unsafe.Pointer(pBt)).Fmutex)
(*TBtree)(unsafe.Pointer(p)).Flocked = uint8(0)
}
// C documentation
//
// /*
// ** Enter a mutex on the given BTree object.
// **
// ** If the object is not sharable, then no mutex is ever required
// ** and this routine is a no-op. The underlying mutex is non-recursive.
// ** But we keep a reference count in Btree.wantToLock so the behavior
// ** of this interface is recursive.
// **
// ** To avoid deadlocks, multiple Btrees are locked in the same order
// ** by all database connections. The p->pNext is a list of other
// ** Btrees belonging to the same database connection as the p Btree
// ** which need to be locked after p. If we cannot get a lock on
// ** p, then first unlock all of the others on p->pNext, then wait
// ** for the lock to become available on p, then relock all of the
// ** subsequent Btrees that desire a lock.
// */
func _sqlite3BtreeEnter(tls *libc.TLS, p uintptr) {
/* Some basic sanity checking on the Btree. The list of Btrees
** connected by pNext and pPrev should be in sorted order by
** Btree.pBt value. All elements of the list should belong to
** the same connection. Only shared Btrees are on the list. */
/* Check for locking consistency */
/* We should already hold a lock on the database connection */
/* Unless the database is sharable and unlocked, then BtShared.db
** should already be set correctly. */
if !((*TBtree)(unsafe.Pointer(p)).Fsharable != 0) {
return
}
(*TBtree)(unsafe.Pointer(p)).FwantToLock++
if (*TBtree)(unsafe.Pointer(p)).Flocked != 0 {
return
}
_btreeLockCarefully(tls, p)
}
// C documentation
//
// /* This is a helper function for sqlite3BtreeLock(). By moving
// ** complex, but seldom used logic, out of sqlite3BtreeLock() and
// ** into this routine, we avoid unnecessary stack pointer changes
// ** and thus help the sqlite3BtreeLock() routine to run much faster
// ** in the common case.
// */
func _btreeLockCarefully(tls *libc.TLS, p uintptr) {
var pLater uintptr
_ = pLater
/* In most cases, we should be able to acquire the lock we
** want without having to go through the ascending lock
** procedure that follows. Just be sure not to block.
*/
if Xsqlite3_mutex_try(tls, (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).Fmutex) == SQLITE_OK {
(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).Fdb = (*TBtree)(unsafe.Pointer(p)).Fdb
(*TBtree)(unsafe.Pointer(p)).Flocked = uint8(1)
return
}
/* To avoid deadlock, first release all locks with a larger
** BtShared address. Then acquire our lock. Then reacquire
** the other BtShared locks that we used to hold in ascending
** order.
*/
pLater = (*TBtree)(unsafe.Pointer(p)).FpNext
for {
if !(pLater != 0) {
break
}
if (*TBtree)(unsafe.Pointer(pLater)).Flocked != 0 {
_unlockBtreeMutex(tls, pLater)
}
goto _1
_1:
;
pLater = (*TBtree)(unsafe.Pointer(pLater)).FpNext
}
_lockBtreeMutex(tls, p)
pLater = (*TBtree)(unsafe.Pointer(p)).FpNext
for {
if !(pLater != 0) {
break
}
if (*TBtree)(unsafe.Pointer(pLater)).FwantToLock != 0 {
_lockBtreeMutex(tls, pLater)
}
goto _2
_2:
;
pLater = (*TBtree)(unsafe.Pointer(pLater)).FpNext
}
}
// C documentation
//
// /*
// ** Exit the recursive mutex on a Btree.
// */
func _sqlite3BtreeLeave(tls *libc.TLS, p uintptr) {
if (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 {
(*TBtree)(unsafe.Pointer(p)).FwantToLock--
if (*TBtree)(unsafe.Pointer(p)).FwantToLock == 0 {
_unlockBtreeMutex(tls, p)
}
}
}
// C documentation
//
// /*
// ** Enter the mutex on every Btree associated with a database
// ** connection. This is needed (for example) prior to parsing
// ** a statement since we will be comparing table and column names
// ** against all schemas and we do not want those schemas being
// ** reset out from under us.
// **
// ** There is a corresponding leave-all procedures.
// **
// ** Enter the mutexes in ascending order by BtShared pointer address
// ** to avoid the possibility of deadlock when two threads with
// ** two or more btrees in common both try to lock all their btrees
// ** at the same instant.
// */
func _btreeEnterAll(tls *libc.TLS, db uintptr) {
var i, skipOk int32
var p uintptr
_, _, _ = i, p, skipOk
skipOk = int32(1)
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
p = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if p != 0 && (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 {
_sqlite3BtreeEnter(tls, p)
skipOk = 0
}
goto _1
_1:
;
i++
}
(*Tsqlite3)(unsafe.Pointer(db)).FnoSharedCache = uint8(skipOk)
}
func _sqlite3BtreeEnterAll(tls *libc.TLS, db uintptr) {
if int32((*Tsqlite3)(unsafe.Pointer(db)).FnoSharedCache) == 0 {
_btreeEnterAll(tls, db)
}
}
func _btreeLeaveAll(tls *libc.TLS, db uintptr) {
var i int32
var p uintptr
_, _ = i, p
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
p = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if p != 0 {
_sqlite3BtreeLeave(tls, p)
}
goto _1
_1:
;
i++
}
}
func _sqlite3BtreeLeaveAll(tls *libc.TLS, db uintptr) {
if int32((*Tsqlite3)(unsafe.Pointer(db)).FnoSharedCache) == 0 {
_btreeLeaveAll(tls, db)
}
}
// C documentation
//
// /*
// ** Enter a mutex on a Btree given a cursor owned by that Btree.
// **
// ** These entry points are used by incremental I/O only. Enter() is required
// ** any time OMIT_SHARED_CACHE is not defined, regardless of whether or not
// ** the build is threadsafe. Leave() is only required by threadsafe builds.
// */
func _sqlite3BtreeEnterCursor(tls *libc.TLS, pCur uintptr) {
_sqlite3BtreeEnter(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpBtree)
}
func _sqlite3BtreeLeaveCursor(tls *libc.TLS, pCur uintptr) {
_sqlite3BtreeLeave(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpBtree)
}
/************** End of btmutex.c *********************************************/
/************** Begin file btree.c *******************************************/
/*
** 2004 April 6
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file implements an external (disk-based) database using BTrees.
** See the header comment on "btreeInt.h" for additional information.
** Including a description of file format and an overview of operation.
*/
/* #include "btreeInt.h" */
// C documentation
//
// /*
// ** The header string that appears at the beginning of every
// ** SQLite database.
// */
var _zMagicHeader = [16]int8{'S', 'Q', 'L', 'i', 't', 'e', ' ', 'f', 'o', 'r', 'm', 'a', 't', ' ', '3'}
/*
** Set this global variable to 1 to enable tracing using the TRACE
** macro.
*/
/*
** Extract a 2-byte big-endian integer from an array of unsigned bytes.
** But if the value is zero, make it 65536.
**
** This routine is used to extract the "offset to cell content area" value
** from the header of a btree page. If the page size is 65536 and the page
** is empty, the offset should be 65536, but the 2-byte value stores zero.
** This routine makes the necessary adjustment to 65536.
*/
/*
** Values passed as the 5th argument to allocateBtreePage()
*/
/*
** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not
** defined, or 0 if it is. For example:
**
** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum);
*/
// C documentation
//
// /*
// ** A list of BtShared objects that are eligible for participation
// ** in shared cache. This variable has file scope during normal builds,
// ** but the test harness needs to access it so we make it global for
// ** test builds.
// **
// ** Access to this variable is protected by SQLITE_MUTEX_STATIC_MAIN.
// */
var _sqlite3SharedCacheList = uintptr(0)
// C documentation
//
// /*
// ** Enable or disable the shared pager and schema features.
// **
// ** This routine has no effect on existing database connections.
// ** The shared cache setting effects only future calls to
// ** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
// */
func Xsqlite3_enable_shared_cache(tls *libc.TLS, enable int32) (r int32) {
_sqlite3Config.FsharedCacheEnabled = enable
return SQLITE_OK
}
/*
** Implementation of the SQLITE_CORRUPT_PAGE() macro. Takes a single
** (MemPage*) as an argument. The (MemPage*) must not be NULL.
**
** If SQLITE_DEBUG is not defined, then this macro is equivalent to
** SQLITE_CORRUPT_BKPT. Or, if SQLITE_DEBUG is set, then the log message
** normally produced as a side-effect of SQLITE_CORRUPT_BKPT is augmented
** with the page number and filename associated with the (MemPage*).
*/
// C documentation
//
// /*
// ** Query to see if Btree handle p may obtain a lock of type eLock
// ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return
// ** SQLITE_OK if the lock may be obtained (by calling
// ** setSharedCacheTableLock()), or SQLITE_LOCKED if not.
// */
func _querySharedCacheTableLock(tls *libc.TLS, p uintptr, iTab TPgno, eLock Tu8) (r int32) {
var pBt, pIter, p2 uintptr
_, _, _ = pBt, pIter, p2
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
/* If requesting a write-lock, then the Btree must have an open write
** transaction on this file. And, obviously, for this to be so there
** must be an open write transaction on the file itself.
*/
/* This routine is a no-op if the shared-cache is not enabled */
if !((*TBtree)(unsafe.Pointer(p)).Fsharable != 0) {
return SQLITE_OK
}
/* If some other connection is holding an exclusive lock, the
** requested lock may not be obtained.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FpWriter != p && int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_EXCLUSIVE) != 0 {
_sqlite3ConnectionBlocked(tls, (*TBtree)(unsafe.Pointer(p)).Fdb, (*TBtree)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpWriter)).Fdb)
return libc.Int32FromInt32(SQLITE_LOCKED) | libc.Int32FromInt32(1)<eLock!=eLock) in the following if(...)
** statement is a simplification of:
**
** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK)
**
** since we know that if eLock==WRITE_LOCK, then no other connection
** may hold a WRITE_LOCK on any table in this file (since there can
** only be a single writer).
*/
if (*TBtLock)(unsafe.Pointer(pIter)).FpBtree != p && (*TBtLock)(unsafe.Pointer(pIter)).FiTable == iTab && int32((*TBtLock)(unsafe.Pointer(pIter)).FeLock) != int32(eLock) {
_sqlite3ConnectionBlocked(tls, (*TBtree)(unsafe.Pointer(p)).Fdb, (*TBtree)(unsafe.Pointer((*TBtLock)(unsafe.Pointer(pIter)).FpBtree)).Fdb)
if int32(eLock) == int32(WRITE_LOCK) {
p2 = pBt + 40
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(BTS_PENDING))
}
return libc.Int32FromInt32(SQLITE_LOCKED) | libc.Int32FromInt32(1)< int32((*TBtLock)(unsafe.Pointer(pLock)).FeLock) {
(*TBtLock)(unsafe.Pointer(pLock)).FeLock = eLock
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Release all the table locks (locks obtained via calls to
// ** the setSharedCacheTableLock() procedure) held by Btree object p.
// **
// ** This function assumes that Btree p has an open read or write
// ** transaction. If it does not, then the BTS_PENDING flag
// ** may be incorrectly cleared.
// */
func _clearAllSharedCacheTableLocks(tls *libc.TLS, p uintptr) {
var pBt, pLock, ppIter, p1, p2 uintptr
_, _, _, _, _ = pBt, pLock, ppIter, p1, p2
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
ppIter = pBt + 120
for *(*uintptr)(unsafe.Pointer(ppIter)) != 0 {
pLock = *(*uintptr)(unsafe.Pointer(ppIter))
if (*TBtLock)(unsafe.Pointer(pLock)).FpBtree == p {
*(*uintptr)(unsafe.Pointer(ppIter)) = (*TBtLock)(unsafe.Pointer(pLock)).FpNext
if (*TBtLock)(unsafe.Pointer(pLock)).FiTable != uint32(1) {
Xsqlite3_free(tls, pLock)
}
} else {
ppIter = pLock + 16
}
}
if (*TBtShared)(unsafe.Pointer(pBt)).FpWriter == p {
(*TBtShared)(unsafe.Pointer(pBt)).FpWriter = uintptr(0)
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(BTS_EXCLUSIVE) | libc.Int32FromInt32(BTS_PENDING)))
} else {
if (*TBtShared)(unsafe.Pointer(pBt)).FnTransaction == int32(2) {
/* This function is called when Btree p is concluding its
** transaction. If there currently exists a writer, and p is not
** that writer, then the number of locks held by connections other
** than the writer must be about to drop to zero. In this case
** set the BTS_PENDING flag to 0.
**
** If there is not currently a writer, then BTS_PENDING must
** be zero already. So this next line is harmless in that case.
*/
p2 = pBt + 40
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^libc.Int32FromInt32(BTS_PENDING))
}
}
}
// C documentation
//
// /*
// ** This function changes all write-locks held by Btree p into read-locks.
// */
func _downgradeAllSharedCacheTableLocks(tls *libc.TLS, p uintptr) {
var pBt, pLock, p1 uintptr
_, _, _ = pBt, pLock, p1
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
if (*TBtShared)(unsafe.Pointer(pBt)).FpWriter == p {
(*TBtShared)(unsafe.Pointer(pBt)).FpWriter = uintptr(0)
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(BTS_EXCLUSIVE) | libc.Int32FromInt32(BTS_PENDING)))
pLock = (*TBtShared)(unsafe.Pointer(pBt)).FpLock
for {
if !(pLock != 0) {
break
}
(*TBtLock)(unsafe.Pointer(pLock)).FeLock = uint8(READ_LOCK)
goto _2
_2:
;
pLock = (*TBtLock)(unsafe.Pointer(pLock)).FpNext
}
}
}
/* Forward reference */
/*
***** This routine is used inside of assert() only ****
**
** Verify that the cursor holds the mutex on its BtShared
*/
/*
** Invalidate the overflow cache of the cursor passed as the first argument.
** on the shared btree structure pBt.
*/
// C documentation
//
// /*
// ** Invalidate the overflow page-list cache for all cursors opened
// ** on the shared btree structure pBt.
// */
func _invalidateAllOverflowCache(tls *libc.TLS, pBt uintptr) {
var p, p2 uintptr
_, _ = p, p2
p = (*TBtShared)(unsafe.Pointer(pBt)).FpCursor
for {
if !(p != 0) {
break
}
p2 = p + 1
*(*Tu8)(unsafe.Pointer(p2)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p2))) & ^libc.Int32FromInt32(BTCF_ValidOvfl))
goto _1
_1:
;
p = (*TBtCursor)(unsafe.Pointer(p)).FpNext
}
}
// C documentation
//
// /*
// ** This function is called before modifying the contents of a table
// ** to invalidate any incrblob cursors that are open on the
// ** row or one of the rows being modified.
// **
// ** If argument isClearTable is true, then the entire contents of the
// ** table is about to be deleted. In this case invalidate all incrblob
// ** cursors open on any row within the table with root-page pgnoRoot.
// **
// ** Otherwise, if argument isClearTable is false, then the row with
// ** rowid iRow is being replaced or deleted. In this case invalidate
// ** only those incrblob cursors open on that specific row.
// */
func _invalidateIncrblobCursors(tls *libc.TLS, pBtree uintptr, pgnoRoot TPgno, iRow Ti64, isClearTable int32) {
var p uintptr
_ = p
(*TBtree)(unsafe.Pointer(pBtree)).FhasIncrblobCur = uint8(0)
p = (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(pBtree)).FpBt)).FpCursor
for {
if !(p != 0) {
break
}
if int32((*TBtCursor)(unsafe.Pointer(p)).FcurFlags)&int32(BTCF_Incrblob) != 0 {
(*TBtree)(unsafe.Pointer(pBtree)).FhasIncrblobCur = uint8(1)
if (*TBtCursor)(unsafe.Pointer(p)).FpgnoRoot == pgnoRoot && (isClearTable != 0 || (*TBtCursor)(unsafe.Pointer(p)).Finfo.FnKey == iRow) {
(*TBtCursor)(unsafe.Pointer(p)).FeState = uint8(CURSOR_INVALID)
}
}
goto _1
_1:
;
p = (*TBtCursor)(unsafe.Pointer(p)).FpNext
}
}
// C documentation
//
// /*
// ** Set bit pgno of the BtShared.pHasContent bitvec. This is called
// ** when a page that previously contained data becomes a free-list leaf
// ** page.
// **
// ** The BtShared.pHasContent bitvec exists to work around an obscure
// ** bug caused by the interaction of two useful IO optimizations surrounding
// ** free-list leaf pages:
// **
// ** 1) When all data is deleted from a page and the page becomes
// ** a free-list leaf page, the page is not written to the database
// ** (as free-list leaf pages contain no meaningful data). Sometimes
// ** such a page is not even journalled (as it will not be modified,
// ** why bother journalling it?).
// **
// ** 2) When a free-list leaf page is reused, its content is not read
// ** from the database or written to the journal file (why should it
// ** be, if it is not at all meaningful?).
// **
// ** By themselves, these optimizations work fine and provide a handy
// ** performance boost to bulk delete or insert operations. However, if
// ** a page is moved to the free-list and then reused within the same
// ** transaction, a problem comes up. If the page is not journalled when
// ** it is moved to the free-list and it is also not journalled when it
// ** is extracted from the free-list and reused, then the original data
// ** may be lost. In the event of a rollback, it may not be possible
// ** to restore the database to its original configuration.
// **
// ** The solution is the BtShared.pHasContent bitvec. Whenever a page is
// ** moved to become a free-list leaf page, the corresponding bit is
// ** set in the bitvec. Whenever a leaf page is extracted from the free-list,
// ** optimization 2 above is omitted if the corresponding bit is already
// ** set in BtShared.pHasContent. The contents of the bitvec are cleared
// ** at the end of every transaction.
// */
func _btreeSetHasContent(tls *libc.TLS, pBt uintptr, pgno TPgno) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if !((*TBtShared)(unsafe.Pointer(pBt)).FpHasContent != 0) {
(*TBtShared)(unsafe.Pointer(pBt)).FpHasContent = _sqlite3BitvecCreate(tls, (*TBtShared)(unsafe.Pointer(pBt)).FnPage)
if !((*TBtShared)(unsafe.Pointer(pBt)).FpHasContent != 0) {
rc = int32(SQLITE_NOMEM)
}
}
if rc == SQLITE_OK && pgno <= _sqlite3BitvecSize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpHasContent) {
rc = _sqlite3BitvecSet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpHasContent, pgno)
}
return rc
}
// C documentation
//
// /*
// ** Query the BtShared.pHasContent vector.
// **
// ** This function is called when a free-list leaf page is removed from the
// ** free-list for reuse. It returns false if it is safe to retrieve the
// ** page from the pager layer with the 'no-content' flag set. True otherwise.
// */
func _btreeGetHasContent(tls *libc.TLS, pBt uintptr, pgno TPgno) (r int32) {
var p uintptr
_ = p
p = (*TBtShared)(unsafe.Pointer(pBt)).FpHasContent
return libc.BoolInt32(p != 0 && (pgno > _sqlite3BitvecSize(tls, p) || _sqlite3BitvecTestNotNull(tls, p, pgno) != 0))
}
// C documentation
//
// /*
// ** Clear (destroy) the BtShared.pHasContent bitvec. This should be
// ** invoked at the conclusion of each write-transaction.
// */
func _btreeClearHasContent(tls *libc.TLS, pBt uintptr) {
_sqlite3BitvecDestroy(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpHasContent)
(*TBtShared)(unsafe.Pointer(pBt)).FpHasContent = uintptr(0)
}
// C documentation
//
// /*
// ** Release all of the apPage[] pages for a cursor.
// */
func _btreeReleaseAllCursorPages(tls *libc.TLS, pCur uintptr) {
var i int32
_ = i
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) >= 0 {
i = 0
for {
if !(i < int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)) {
break
}
_releasePageNotNull(tls, *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(i)*8)))
goto _1
_1:
;
i++
}
_releasePageNotNull(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage)
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage = int8(-int32(1))
}
}
// C documentation
//
// /*
// ** The cursor passed as the only argument must point to a valid entry
// ** when this function is called (i.e. have eState==CURSOR_VALID). This
// ** function saves the current cursor key in variables pCur->nKey and
// ** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error
// ** code otherwise.
// **
// ** If the cursor is open on an intkey table, then the integer key
// ** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to
// ** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is
// ** set to point to a malloced buffer pCur->nKey bytes in size containing
// ** the key.
// */
func _saveCursorKey(tls *libc.TLS, pCur uintptr) (r int32) {
var pKey uintptr
var rc int32
_, _ = pKey, rc
rc = SQLITE_OK
if (*TBtCursor)(unsafe.Pointer(pCur)).FcurIntKey != 0 {
/* Only the rowid is required for a table btree */
(*TBtCursor)(unsafe.Pointer(pCur)).FnKey = _sqlite3BtreeIntegerKey(tls, pCur)
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).FnKey = int64(_sqlite3BtreePayloadSize(tls, pCur))
pKey = _sqlite3Malloc(tls, uint64((*TBtCursor)(unsafe.Pointer(pCur)).FnKey+int64(9)+int64(8)))
if pKey != 0 {
rc = _sqlite3BtreePayload(tls, pCur, uint32(0), uint32(int32((*TBtCursor)(unsafe.Pointer(pCur)).FnKey)), pKey)
if rc == SQLITE_OK {
libc.Xmemset(tls, pKey+uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FnKey), 0, uint64(libc.Int32FromInt32(9)+libc.Int32FromInt32(8)))
(*TBtCursor)(unsafe.Pointer(pCur)).FpKey = pKey
} else {
Xsqlite3_free(tls, pKey)
}
} else {
rc = int32(SQLITE_NOMEM)
}
}
return rc
}
// C documentation
//
// /*
// ** Save the current cursor position in the variables BtCursor.nKey
// ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK.
// **
// ** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID)
// ** prior to calling this routine.
// */
func _saveCursorPosition(tls *libc.TLS, pCur uintptr) (r int32) {
var rc int32
var p1 uintptr
_, _ = rc, p1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_Pinned) != 0 {
return libc.Int32FromInt32(SQLITE_CONSTRAINT) | libc.Int32FromInt32(11)< int32((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) {
rc = _sqlite3CorruptError(tls, int32(71032))
} else {
rc = _sqlite3BtreeIndexMoveto(tls, pCur, pIdxKey, pRes)
}
_sqlite3DbFree(tls, (*TKeyInfo1)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo)).Fdb, pIdxKey)
} else {
pIdxKey = uintptr(0)
rc = _sqlite3BtreeTableMoveto(tls, pCur, nKey, bias, pRes)
}
return rc
}
// C documentation
//
// /*
// ** Restore the cursor to the position it was in (or as close to as possible)
// ** when saveCursorPosition() was called. Note that this call deletes the
// ** saved position info stored by saveCursorPosition(), so there can be
// ** at most one effective restoreCursorPosition() call after each
// ** saveCursorPosition().
// */
func _btreeRestoreCursorPosition(tls *libc.TLS, pCur uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* skipNext at bp+0 */ int32
_ = rc
*(*int32)(unsafe.Pointer(bp)) = 0
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == int32(CURSOR_FAULT) {
return (*TBtCursor)(unsafe.Pointer(pCur)).FskipNext
}
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
if _sqlite3FaultSim(tls, int32(410)) != 0 {
rc = int32(SQLITE_IOERR)
} else {
rc = _btreeMoveto(tls, pCur, (*TBtCursor)(unsafe.Pointer(pCur)).FpKey, (*TBtCursor)(unsafe.Pointer(pCur)).FnKey, 0, bp)
}
if rc == SQLITE_OK {
Xsqlite3_free(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpKey)
(*TBtCursor)(unsafe.Pointer(pCur)).FpKey = uintptr(0)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).FskipNext = *(*int32)(unsafe.Pointer(bp))
}
if (*TBtCursor)(unsafe.Pointer(pCur)).FskipNext != 0 && int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == CURSOR_VALID {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_SKIPNEXT)
}
}
return rc
}
// C documentation
//
// /*
// ** Determine whether or not a cursor has moved from the position where
// ** it was last placed, or has been invalidated for any other reason.
// ** Cursors can move when the row they are pointing at is deleted out
// ** from under them, for example. Cursor might also move if a btree
// ** is rebalanced.
// **
// ** Calling this routine with a NULL cursor pointer returns false.
// **
// ** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor
// ** back to where it ought to be if this routine returns true.
// */
func _sqlite3BtreeCursorHasMoved(tls *libc.TLS, pCur uintptr) (r int32) {
return libc.BoolInt32(CURSOR_VALID != int32(*(*Tu8)(unsafe.Pointer(pCur))))
}
// C documentation
//
// /*
// ** Return a pointer to a fake BtCursor object that will always answer
// ** false to the sqlite3BtreeCursorHasMoved() routine above. The fake
// ** cursor returned must not be used with any other Btree interface.
// */
func _sqlite3BtreeFakeValidCursor(tls *libc.TLS) (r uintptr) {
return uintptr(unsafe.Pointer(&_fakeCursor))
}
var _fakeCursor Tu8
// C documentation
//
// /*
// ** This routine restores a cursor back to its original position after it
// ** has been moved by some outside activity (such as a btree rebalance or
// ** a row having been deleted out from under the cursor).
// **
// ** On success, the *pDifferentRow parameter is false if the cursor is left
// ** pointing at exactly the same row. *pDifferntRow is the row the cursor
// ** was pointing to has been deleted, forcing the cursor to point to some
// ** nearby row.
// **
// ** This routine should only be called for a cursor that just returned
// ** TRUE from sqlite3BtreeCursorHasMoved().
// */
func _sqlite3BtreeCursorRestore(tls *libc.TLS, pCur uintptr, pDifferentRow uintptr) (r int32) {
var rc, v1 int32
_, _ = rc, v1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
v1 = _btreeRestoreCursorPosition(tls, pCur)
} else {
v1 = SQLITE_OK
}
rc = v1
if rc != 0 {
*(*int32)(unsafe.Pointer(pDifferentRow)) = int32(1)
return rc
}
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
*(*int32)(unsafe.Pointer(pDifferentRow)) = int32(1)
} else {
*(*int32)(unsafe.Pointer(pDifferentRow)) = 0
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Provide flag hints to the cursor.
// */
func _sqlite3BtreeCursorHintFlags(tls *libc.TLS, pCur uintptr, x uint32) {
(*TBtCursor)(unsafe.Pointer(pCur)).Fhints = uint8(x)
}
// C documentation
//
// /*
// ** Given a page number of a regular database page, return the page
// ** number for the pointer-map page that contains the entry for the
// ** input page number.
// **
// ** Return 0 (not a valid page) for pgno==1 since there is
// ** no pointer map associated with page 1. The integrity_check logic
// ** requires that ptrmapPageno(*,1)!=1.
// */
func _ptrmapPageno(tls *libc.TLS, pBt uintptr, pgno TPgno) (r TPgno) {
var iPtrMap, ret TPgno
var nPagesPerMapPage int32
_, _, _ = iPtrMap, nPagesPerMapPage, ret
if pgno < uint32(2) {
return uint32(0)
}
nPagesPerMapPage = int32((*TBtShared)(unsafe.Pointer(pBt)).FusableSize/uint32(5) + uint32(1))
iPtrMap = (pgno - uint32(2)) / uint32(nPagesPerMapPage)
ret = iPtrMap*uint32(nPagesPerMapPage) + uint32(2)
if ret == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
ret++
}
return ret
}
// C documentation
//
// /*
// ** Write an entry into the pointer map.
// **
// ** This routine updates the pointer map entry for page number 'key'
// ** so that it maps to type 'eType' and parent page number 'pgno'.
// **
// ** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is
// ** a no-op. If an error occurs, the appropriate error code is written
// ** into *pRC.
// */
func _ptrmapPut(tls *libc.TLS, pBt uintptr, key TPgno, eType Tu8, parent TPgno, pRC uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iPtrmap TPgno
var offset, rc, v1 int32
var pPtrmap uintptr
var _ /* pDbPage at bp+0 */ uintptr
_, _, _, _, _ = iPtrmap, offset, pPtrmap, rc, v1 /* Return code from subfunctions */
if *(*int32)(unsafe.Pointer(pRC)) != 0 {
return
}
/* The super-journal page number must never be used as a pointer map page */
if key == uint32(0) {
*(*int32)(unsafe.Pointer(pRC)) = _sqlite3CorruptError(tls, int32(71230))
return
}
iPtrmap = _ptrmapPageno(tls, pBt, key)
rc = _sqlite3PagerGet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, iPtrmap, bp, 0)
if rc != SQLITE_OK {
*(*int32)(unsafe.Pointer(pRC)) = rc
return
}
if int32(*(*int8)(unsafe.Pointer(_sqlite3PagerGetExtra(tls, *(*uintptr)(unsafe.Pointer(bp)))))) != 0 {
/* The first byte of the extra data is the MemPage.isInit byte.
** If that byte is set, it means this page is also being used
** as a btree page. */
*(*int32)(unsafe.Pointer(pRC)) = _sqlite3CorruptError(tls, int32(71243))
goto ptrmap_exit
}
offset = int32(libc.Uint32FromInt32(5) * (key - iPtrmap - libc.Uint32FromInt32(1)))
if offset < 0 {
*(*int32)(unsafe.Pointer(pRC)) = _sqlite3CorruptError(tls, int32(71248))
goto ptrmap_exit
}
pPtrmap = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp)))
if int32(eType) != int32(*(*Tu8)(unsafe.Pointer(pPtrmap + uintptr(offset)))) || _sqlite3Get4byte(tls, pPtrmap+uintptr(offset+int32(1))) != parent {
v1 = _sqlite3PagerWrite(tls, *(*uintptr)(unsafe.Pointer(bp)))
rc = v1
*(*int32)(unsafe.Pointer(pRC)) = v1
if rc == SQLITE_OK {
*(*Tu8)(unsafe.Pointer(pPtrmap + uintptr(offset))) = eType
_sqlite3Put4byte(tls, pPtrmap+uintptr(offset+int32(1)), parent)
}
}
goto ptrmap_exit
ptrmap_exit:
;
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
// C documentation
//
// /*
// ** Read an entry from the pointer map.
// **
// ** This routine retrieves the pointer map entry for page 'key', writing
// ** the type and parent page number to *pEType and *pPgno respectively.
// ** An error code is returned if something goes wrong, otherwise SQLITE_OK.
// */
func _ptrmapGet(tls *libc.TLS, pBt uintptr, key TPgno, pEType uintptr, pPgno uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iPtrmap, offset, rc int32
var pPtrmap uintptr
var _ /* pDbPage at bp+0 */ uintptr
_, _, _, _ = iPtrmap, offset, pPtrmap, rc
iPtrmap = int32(_ptrmapPageno(tls, pBt, key))
rc = _sqlite3PagerGet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, uint32(iPtrmap), bp, 0)
if rc != 0 {
return rc
}
pPtrmap = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp)))
offset = int32(libc.Uint32FromInt32(5) * (key - uint32(iPtrmap) - libc.Uint32FromInt32(1)))
if offset < 0 {
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
return _sqlite3CorruptError(tls, int32(71293))
}
*(*Tu8)(unsafe.Pointer(pEType)) = *(*Tu8)(unsafe.Pointer(pPtrmap + uintptr(offset)))
if pPgno != 0 {
*(*TPgno)(unsafe.Pointer(pPgno)) = _sqlite3Get4byte(tls, pPtrmap+uintptr(offset+int32(1)))
}
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
if int32(*(*Tu8)(unsafe.Pointer(pEType))) < int32(1) || int32(*(*Tu8)(unsafe.Pointer(pEType))) > int32(5) {
return _sqlite3CorruptError(tls, int32(71301))
}
return SQLITE_OK
}
/*
** Given a btree page and a cell index (0 means the first cell on
** the page, 1 means the second cell, and so forth) return a pointer
** to the cell content.
**
** findCellPastPtr() does the same except it skips past the initial
** 4-byte child pointer found on interior pages, if there is one.
**
** This routine works only for pages that do not contain overflow cells.
*/
// C documentation
//
// /*
// ** This is common tail processing for btreeParseCellPtr() and
// ** btreeParseCellPtrIndex() for the case when the cell does not fit entirely
// ** on a single B-tree page. Make necessary adjustments to the CellInfo
// ** structure.
// */
func _btreeParseCellAdjustSizeForOverflow(tls *libc.TLS, pPage uintptr, pCell uintptr, pInfo uintptr) {
var maxLocal, minLocal, surplus int32
_, _, _ = maxLocal, minLocal, surplus /* Overflow payload available for local storage */
minLocal = int32((*TMemPage)(unsafe.Pointer(pPage)).FminLocal)
maxLocal = int32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal)
surplus = int32(uint32(minLocal) + ((*TCellInfo)(unsafe.Pointer(pInfo)).FnPayload-uint32(minLocal))%((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-uint32(4)))
if surplus <= maxLocal {
(*TCellInfo)(unsafe.Pointer(pInfo)).FnLocal = uint16(surplus)
} else {
(*TCellInfo)(unsafe.Pointer(pInfo)).FnLocal = uint16(minLocal)
}
(*TCellInfo)(unsafe.Pointer(pInfo)).FnSize = uint16(int32(uint16(t__predefined_ptrdiff_t((*TCellInfo)(unsafe.Pointer(pInfo)).FpPayload+uintptr((*TCellInfo)(unsafe.Pointer(pInfo)).FnLocal))-int64(pCell))) + int32(4))
}
// C documentation
//
// /*
// ** Given a record with nPayload bytes of payload stored within btree
// ** page pPage, return the number of bytes of payload stored locally.
// */
func _btreePayloadToLocal(tls *libc.TLS, pPage uintptr, nPayload Ti64) (r int32) {
var maxLocal, minLocal, surplus, v1 int32
_, _, _, _ = maxLocal, minLocal, surplus, v1 /* Maximum amount of payload held locally */
maxLocal = int32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal)
if nPayload <= int64(maxLocal) {
return int32(nPayload)
} else { /* Overflow payload available for local storage */
minLocal = int32((*TMemPage)(unsafe.Pointer(pPage)).FminLocal)
surplus = int32(int64(minLocal) + (nPayload-int64(minLocal))%int64((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-libc.Uint32FromInt32(4)))
if surplus <= maxLocal {
v1 = surplus
} else {
v1 = minLocal
}
return v1
}
return r
}
// C documentation
//
// /*
// ** The following routines are implementations of the MemPage.xParseCell()
// ** method.
// **
// ** Parse a cell content block and fill in the CellInfo structure.
// **
// ** btreeParseCellPtr() => table btree leaf nodes
// ** btreeParseCellNoPayload() => table btree internal nodes
// ** btreeParseCellPtrIndex() => index btree nodes
// **
// ** There is also a wrapper function btreeParseCell() that works for
// ** all MemPage types and that references the cell by index rather than
// ** by pointer.
// */
func _btreeParseCellPtrNoPayload(tls *libc.TLS, pPage uintptr, pCell uintptr, pInfo uintptr) {
_ = pPage
(*TCellInfo)(unsafe.Pointer(pInfo)).FnSize = uint16(int32(4) + int32(_sqlite3GetVarint(tls, pCell+4, pInfo)))
(*TCellInfo)(unsafe.Pointer(pInfo)).FnPayload = uint32(0)
(*TCellInfo)(unsafe.Pointer(pInfo)).FnLocal = uint16(0)
(*TCellInfo)(unsafe.Pointer(pInfo)).FpPayload = uintptr(0)
return
}
func _btreeParseCellPtr(tls *libc.TLS, pPage uintptr, pCell uintptr, pInfo uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nPayload Tu32
var pEnd, pIter, v1, v11, v13, v15, v16, v3, v5, v7, v9 uintptr
var x, v10, v12, v14, v2, v4, v6, v8 Tu8
var _ /* iKey at bp+0 */ Tu64
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = nPayload, pEnd, pIter, x, v1, v10, v11, v12, v13, v14, v15, v16, v2, v3, v4, v5, v6, v7, v8, v9 /* Extracted Key value */
pIter = pCell
/* The next block of code is equivalent to:
**
** pIter += getVarint32(pIter, nPayload);
**
** The code is inlined to avoid a function call.
*/
nPayload = uint32(*(*Tu8)(unsafe.Pointer(pIter)))
if nPayload >= uint32(0x80) {
pEnd = pIter + 8
nPayload &= uint32(0x7f)
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(pIter))) >= int32(0x80) && pIter < pEnd {
pIter++
v1 = pIter
nPayload = nPayload<nKey);
**
** The code is inlined and the loop is unrolled for performance.
** This routine is a high-runner.
*/
*(*Tu64)(unsafe.Pointer(bp)) = uint64(*(*Tu8)(unsafe.Pointer(pIter)))
if *(*Tu64)(unsafe.Pointer(bp)) >= uint64(0x80) {
pIter++
v3 = pIter
v2 = *(*Tu8)(unsafe.Pointer(v3))
x = v2
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v5 = pIter
v4 = *(*Tu8)(unsafe.Pointer(v5))
x = v4
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v7 = pIter
v6 = *(*Tu8)(unsafe.Pointer(v7))
x = v6
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v9 = pIter
v8 = *(*Tu8)(unsafe.Pointer(v9))
x = v8
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v11 = pIter
v10 = *(*Tu8)(unsafe.Pointer(v11))
x = v10
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v13 = pIter
v12 = *(*Tu8)(unsafe.Pointer(v13))
x = v12
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v15 = pIter
v14 = *(*Tu8)(unsafe.Pointer(v15))
x = v14
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= int32(0x80) {
pIter++
v16 = pIter
*(*Tu64)(unsafe.Pointer(bp)) = *(*Tu64)(unsafe.Pointer(bp))<= uint32(0x80) {
pEnd = pIter + 8
nPayload &= uint32(0x7f)
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(pIter))) >= int32(0x80) && pIter < pEnd {
pIter++
v1 = pIter
nPayload = nPayload< table internal nodes
// ** cellSizePtrTableLeaf() => table leaf nodes
// ** cellSizePtr() => index internal nodes
// ** cellSizeIdxLeaf() => index leaf nodes
// */
func _cellSizePtr(tls *libc.TLS, pPage uintptr, pCell uintptr) (r Tu16) {
var minLocal int32
var nSize Tu32
var pEnd, pIter, v1 uintptr
_, _, _, _, _ = minLocal, nSize, pEnd, pIter, v1
pIter = pCell + uintptr(4) /* Size value to return */
nSize = uint32(*(*Tu8)(unsafe.Pointer(pIter)))
if nSize >= uint32(0x80) {
pEnd = pIter + 8
nSize &= uint32(0x7f)
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(pIter))) >= int32(0x80) && pIter < pEnd {
pIter++
v1 = pIter
nSize = nSize< uint32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
nSize = uint32(minLocal)
}
nSize += uint32(int32(4) + int32(uint16(int64(pIter)-int64(pCell))))
}
return uint16(nSize)
}
func _cellSizePtrIdxLeaf(tls *libc.TLS, pPage uintptr, pCell uintptr) (r Tu16) {
var minLocal int32
var nSize Tu32
var pEnd, pIter, v1 uintptr
_, _, _, _, _ = minLocal, nSize, pEnd, pIter, v1
pIter = pCell /* Size value to return */
nSize = uint32(*(*Tu8)(unsafe.Pointer(pIter)))
if nSize >= uint32(0x80) {
pEnd = pIter + 8
nSize &= uint32(0x7f)
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(pIter))) >= int32(0x80) && pIter < pEnd {
pIter++
v1 = pIter
nSize = nSize< uint32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
nSize = uint32(minLocal)
}
nSize += uint32(int32(4) + int32(uint16(int64(pIter)-int64(pCell))))
}
return uint16(nSize)
}
func _cellSizePtrNoPayload(tls *libc.TLS, pPage uintptr, pCell uintptr) (r Tu16) {
var pEnd, pIter, v1 uintptr
_, _, _ = pEnd, pIter, v1
pIter = pCell + uintptr(4) /* End mark for a varint */
_ = pPage
pEnd = pIter + uintptr(9)
for {
v1 = pIter
pIter++
if !(int32(*(*Tu8)(unsafe.Pointer(v1)))&int32(0x80) != 0 && pIter < pEnd) {
break
}
}
return uint16(int64(pIter) - int64(pCell))
}
func _cellSizePtrTableLeaf(tls *libc.TLS, pPage uintptr, pCell uintptr) (r Tu16) {
var minLocal int32
var nSize Tu32
var pEnd, pIter, v1, v11, v13, v15, v2, v3, v5, v7, v9 uintptr
var v10, v12, v14, v16, v4, v6, v8 bool
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = minLocal, nSize, pEnd, pIter, v1, v10, v11, v12, v13, v14, v15, v16, v2, v3, v4, v5, v6, v7, v8, v9
pIter = pCell /* Size value to return */
nSize = uint32(*(*Tu8)(unsafe.Pointer(pIter)))
if nSize >= uint32(0x80) {
pEnd = pIter + 8
nSize &= uint32(0x7f)
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(pIter))) >= int32(0x80) && pIter < pEnd {
pIter++
v1 = pIter
nSize = nSize< uint32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
nSize = uint32(minLocal)
}
nSize += uint32(int32(4) + int32(uint16(int64(pIter)-int64(pCell))))
}
return uint16(nSize)
}
// C documentation
//
// /*
// ** The cell pCell is currently part of page pSrc but will ultimately be part
// ** of pPage. (pSrc and pPage are often the same.) If pCell contains a
// ** pointer to an overflow page, insert an entry into the pointer-map for
// ** the overflow page that will be valid after pCell has been moved to pPage.
// */
func _ptrmapPutOvflPtr(tls *libc.TLS, pPage uintptr, pSrc uintptr, pCell uintptr, pRC uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var ovfl TPgno
var _ /* info at bp+0 */ TCellInfo
_ = ovfl
if *(*int32)(unsafe.Pointer(pRC)) != 0 {
return
}
(*(*func(*libc.TLS, uintptr, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell})))(tls, pPage, pCell, bp)
if uint32((*(*TCellInfo)(unsafe.Pointer(bp))).FnLocal) < (*(*TCellInfo)(unsafe.Pointer(bp))).FnPayload {
if uint64(pCell) < uint64((*TMemPage)(unsafe.Pointer(pSrc)).FaDataEnd) && uint64(pCell+uintptr((*(*TCellInfo)(unsafe.Pointer(bp))).FnLocal)) > uint64((*TMemPage)(unsafe.Pointer(pSrc)).FaDataEnd) {
*(*int32)(unsafe.Pointer(pRC)) = _sqlite3CorruptError(tls, int32(71741))
return
}
ovfl = _sqlite3Get4byte(tls, pCell+uintptr(int32((*(*TCellInfo)(unsafe.Pointer(bp))).FnSize)-int32(4)))
_ptrmapPut(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpBt, ovfl, uint8(PTRMAP_OVERFLOW1), (*TMemPage)(unsafe.Pointer(pPage)).Fpgno, pRC)
}
}
// C documentation
//
// /*
// ** Defragment the page given. This routine reorganizes cells within the
// ** page so that there are no free-blocks on the free-block list.
// **
// ** Parameter nMaxFrag is the maximum amount of fragmented space that may be
// ** present in the page after this routine returns.
// **
// ** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a
// ** b-tree page so that there are no freeblocks or fragment bytes, all
// ** unused bytes are contained in the unallocated space region, and all
// ** cells are packed tightly at the end of the page.
// */
func _defragmentPage(tls *libc.TLS, pPage uintptr, nMaxFrag int32) (r int32) {
var cbrk, cellOffset, hdr, i, iCellFirst, iCellLast, iCellStart, iFree, iFree2, nCell, pc, size, sz, sz2, top, usableSize int32
var data, pAddr, pAddr1, pEnd, src, temp uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = cbrk, cellOffset, data, hdr, i, iCellFirst, iCellLast, iCellStart, iFree, iFree2, nCell, pAddr, pAddr1, pEnd, pc, size, src, sz, sz2, temp, top, usableSize /* First cell offset in input */
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
hdr = int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)
cellOffset = int32((*TMemPage)(unsafe.Pointer(pPage)).FcellOffset)
nCell = int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
iCellFirst = cellOffset + int32(2)*nCell
usableSize = int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize)
/* This block handles pages with two or fewer free blocks and nMaxFrag
** or fewer fragmented bytes. In this case it is faster to move the
** two (or one) blocks of cells using memmove() and add the required
** offsets to each pointer in the cell-pointer array than it is to
** reconstruct the entire page. */
if int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(7))))) <= nMaxFrag {
iFree = int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(1)))))< usableSize-int32(4) {
return _sqlite3CorruptError(tls, int32(71799))
}
if iFree != 0 {
iFree2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree))))< usableSize-int32(4) {
return _sqlite3CorruptError(tls, int32(71802))
}
if 0 == iFree2 || int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2)))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+int32(1))))) == 0 {
pEnd = data + uintptr(cellOffset+nCell*int32(2))
sz2 = 0
sz = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree+int32(2)))))<= iFree {
return _sqlite3CorruptError(tls, int32(71810))
}
if iFree2 != 0 {
if iFree+sz > iFree2 {
return _sqlite3CorruptError(tls, int32(71813))
}
sz2 = int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFree2+int32(2)))))< usableSize {
return _sqlite3CorruptError(tls, int32(71815))
}
libc.Xmemmove(tls, data+uintptr(iFree+sz+sz2), data+uintptr(iFree+sz), uint64(iFree2-(iFree+sz)))
sz += sz2
} else {
if iFree+sz > usableSize {
return _sqlite3CorruptError(tls, int32(71819))
}
}
cbrk = top + sz
libc.Xmemmove(tls, data+uintptr(cbrk), data+uintptr(top), uint64(iFree-top))
pAddr = data + uintptr(cellOffset)
for {
if !(pAddr < pEnd) {
break
}
pc = int32(*(*Tu8)(unsafe.Pointer(pAddr)))<> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pAddr + 1)) = uint8(pc + sz)
} else {
if pc < iFree2 {
*(*Tu8)(unsafe.Pointer(pAddr)) = uint8((pc + sz2) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pAddr + 1)) = uint8(pc + sz2)
}
}
goto _1
_1:
;
pAddr += uintptr(2)
}
goto defragment_out
}
}
}
cbrk = usableSize
iCellLast = usableSize - int32(4)
iCellStart = int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(5)))))< 0 {
temp = _sqlite3PagerTempSpace(tls, (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FpPager)
libc.Xmemcpy(tls, temp, data, uint64(usableSize))
src = temp
i = 0
for {
if !(i < nCell) {
break
} /* The i-th cell pointer */
pAddr1 = data + uintptr(cellOffset+i*int32(2))
pc = int32(*(*Tu8)(unsafe.Pointer(pAddr1)))< iCellLast {
return _sqlite3CorruptError(tls, int32(71852))
}
size = int32((*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize})))(tls, pPage, src+uintptr(pc)))
cbrk -= size
if cbrk < iCellStart || pc+size > usableSize {
return _sqlite3CorruptError(tls, int32(71858))
}
*(*Tu8)(unsafe.Pointer(pAddr1)) = uint8(cbrk >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pAddr1 + 1)) = uint8(cbrk)
libc.Xmemcpy(tls, data+uintptr(cbrk), src+uintptr(pc), uint64(size))
goto _2
_2:
;
i++
}
}
*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(7)))) = uint8(0)
goto defragment_out
defragment_out:
;
if int32(*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(7)))))+cbrk-iCellFirst != (*TMemPage)(unsafe.Pointer(pPage)).FnFree {
return _sqlite3CorruptError(tls, int32(71872))
}
*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(5)))) = uint8(cbrk >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(5)) + 1)) = uint8(cbrk)
*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(1)))) = uint8(0)
*(*uint8)(unsafe.Pointer(data + uintptr(hdr+int32(2)))) = uint8(0)
libc.Xmemset(tls, data+uintptr(iCellFirst), 0, uint64(cbrk-iCellFirst))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Search the free-list on page pPg for space to store a cell nByte bytes in
// ** size. If one can be found, return a pointer to the space and remove it
// ** from the free-list.
// **
// ** If no suitable space can be found on the free-list, return NULL.
// **
// ** This function may detect corruption within pPg. If corruption is
// ** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned.
// **
// ** Slots on the free list that are between 1 and 3 bytes larger than nByte
// ** will be ignored if adding the extra space to the fragmentation count
// ** causes the fragmentation count to exceed 60.
// */
func _pageFindSlot(tls *libc.TLS, pPg uintptr, nByte int32, pRc uintptr) (r uintptr) {
var aData, pTmp, p2 uintptr
var hdr, iAddr, maxPC, pc, size, x, v1 int32
_, _, _, _, _, _, _, _, _, _ = aData, hdr, iAddr, maxPC, pTmp, pc, size, x, v1, p2
hdr = int32((*TMemPage)(unsafe.Pointer(pPg)).FhdrOffset) /* Offset to page header */
aData = (*TMemPage)(unsafe.Pointer(pPg)).FaData /* Page data */
iAddr = hdr + int32(1) /* Address of ptr to pc */
pTmp = aData + uintptr(iAddr) /* Temporary ptr into aData[] */
pc = int32(*(*Tu8)(unsafe.Pointer(pTmp)))<= 0 {
if x < int32(4) {
/* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total
** number of bytes in fragments may not exceed 60. */
if int32(*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(7))))) > int32(57) {
return uintptr(0)
}
/* Remove the slot from the free-list. Update the number of
** fragmented bytes within the page. */
libc.Xmemcpy(tls, aData+uintptr(iAddr), aData+uintptr(pc), uint64(2))
p2 = aData + uintptr(hdr+int32(7))
*(*Tu8)(unsafe.Pointer(p2)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p2))) + int32(uint8(x)))
return aData + uintptr(pc)
} else {
if x+pc > maxPC {
/* This slot extends off the end of the usable part of the page */
*(*int32)(unsafe.Pointer(pRc)) = _sqlite3CorruptError(tls, int32(71929))
return uintptr(0)
} else {
/* The slot remains on the free-list. Reduce its size to account
** for the portion used by the new allocation. */
*(*Tu8)(unsafe.Pointer(aData + uintptr(pc+int32(2)))) = uint8(x >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(pc+int32(2)) + 1)) = uint8(x)
}
}
return aData + uintptr(pc+x)
}
iAddr = pc
pTmp = aData + uintptr(pc)
pc = int32(*(*Tu8)(unsafe.Pointer(pTmp)))< maxPC+nByte-int32(4) {
/* The free slot chain extends off the end of the page */
*(*int32)(unsafe.Pointer(pRc)) = _sqlite3CorruptError(tls, int32(71951))
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Allocate nByte bytes of space from within the B-Tree page passed
// ** as the first argument. Write into *pIdx the index into pPage->aData[]
// ** of the first byte of allocated space. Return either SQLITE_OK or
// ** an error code (usually SQLITE_CORRUPT).
// **
// ** The caller guarantees that there is sufficient space to make the
// ** allocation. This routine might need to defragment in order to bring
// ** all the space together, however. This routine will avoid using
// ** the first two bytes past the cell pointer area since presumably this
// ** allocation is being made in order to insert a new cell, so we will
// ** also end up needing a new cell pointer.
// */
func _allocateSpace(tls *libc.TLS, pPage uintptr, nByte int32, pIdx uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var data, pSpace, pTmp uintptr
var g2, gap, hdr, top, v1, v2 int32
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _, _ = data, g2, gap, hdr, pSpace, pTmp, top, v1, v2
hdr = int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset) /* Local cache of pPage->hdrOffset */
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData /* First byte of cell content area */
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK /* First byte of gap between cell pointers and cell content */
/* Minimum cell size is 4 */
gap = int32((*TMemPage)(unsafe.Pointer(pPage)).FcellOffset) + int32(2)*int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
/* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size
** and the reserved space is zero (the usual value for reserved space)
** then the cell content offset of an empty page wants to be 65536.
** However, that integer is too large to be stored in a 2-byte unsigned
** integer, so a value of 0 is used in its place. */
pTmp = data + uintptr(hdr+int32(5))
top = int32(*(*Tu8)(unsafe.Pointer(pTmp)))< top {
if top == 0 && (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize == uint32(65536) {
top = int32(65536)
} else {
return _sqlite3CorruptError(tls, int32(71999))
}
} else {
if top > int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) {
return _sqlite3CorruptError(tls, int32(72002))
}
}
/* If there is enough space between gap and top for one more cell pointer,
** and if the freelist is not empty, then search the
** freelist looking for a slot big enough to satisfy the request.
*/
if (*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(2)))) != 0 || *(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(1)))) != 0) && gap+int32(2) <= top {
pSpace = _pageFindSlot(tls, pPage, nByte, bp)
if pSpace != 0 {
v1 = int32(int64(pSpace) - int64(data))
g2 = v1
*(*int32)(unsafe.Pointer(pIdx)) = v1
if g2 <= gap {
return _sqlite3CorruptError(tls, int32(72019))
} else {
return SQLITE_OK
}
} else {
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
}
/* The request could not be fulfilled using a freelist slot. Check
** to see if defragmentation is necessary.
*/
if gap+int32(2)+nByte > top {
if int32(4) < (*TMemPage)(unsafe.Pointer(pPage)).FnFree-(int32(2)+nByte) {
v2 = int32(4)
} else {
v2 = (*TMemPage)(unsafe.Pointer(pPage)).FnFree - (int32(2) + nByte)
}
*(*int32)(unsafe.Pointer(bp)) = _defragmentPage(tls, pPage, v2)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
top = (int32(*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(5)))))<> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(5)) + 1)) = uint8(top)
*(*int32)(unsafe.Pointer(pIdx)) = top
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return a section of the pPage->aData to the freelist.
// ** The first byte of the new free block is pPage->aData[iStart]
// ** and the size of the block is iSize bytes.
// **
// ** Adjacent freeblocks are coalesced.
// **
// ** Even though the freeblock list was checked by btreeComputeFreeSpace(),
// ** that routine will not detect overlap between cells or freeblocks. Nor
// ** does it detect cells or freeblocks that encroach into the reserved bytes
// ** at the end of the page. So do additional corruption checks inside this
// ** routine and return SQLITE_CORRUPT if any problems are found.
// */
func _freeSpace(tls *libc.TLS, pPage uintptr, iStart Tu16, iSize Tu16) (r int32) {
var data, pTmp, p2 uintptr
var hdr, nFrag Tu8
var iEnd Tu32
var iFreeBlk, iOrigSize, iPtr, x, v1 Tu16
var iPtrEnd int32
_, _, _, _, _, _, _, _, _, _, _, _ = data, hdr, iEnd, iFreeBlk, iOrigSize, iPtr, iPtrEnd, nFrag, pTmp, x, v1, p2 /* Page header size. 0 or 100 */
nFrag = uint8(0) /* Reduction in fragmentation */
iOrigSize = iSize /* Offset to cell content area */
iEnd = uint32(int32(iStart) + int32(iSize)) /* First byte past the iStart buffer */
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData /* Temporary ptr into data[] */
/* Minimum cell size is 4 */
/* The list of freeblocks must be in ascending order. Find the
** spot on the list where iStart should be inserted.
*/
hdr = (*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset
iPtr = uint16(int32(hdr) + int32(1))
if int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+int32(1))))) == 0 && int32(*(*uint8)(unsafe.Pointer(data + uintptr(iPtr)))) == 0 {
iFreeBlk = uint16(0) /* Shortcut for the case when the freelist is empty */
} else {
for {
v1 = uint16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iPtr))))< (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-uint32(4) { /* TH3: corrupt081.100 */
return _sqlite3CorruptError(tls, int32(72103))
}
/* At this point:
** iFreeBlk: First freeblock after iStart, or zero if none
** iPtr: The address of a pointer to iFreeBlk
**
** Check to see if iFreeBlk should be coalesced onto the end of iStart.
*/
if iFreeBlk != 0 && iEnd+uint32(3) >= uint32(iFreeBlk) {
nFrag = uint8(uint32(iFreeBlk) - iEnd)
if iEnd > uint32(iFreeBlk) {
return _sqlite3CorruptError(tls, int32(72115))
}
iEnd = uint32(int32(iFreeBlk) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iFreeBlk)+int32(2)))))< (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize {
return _sqlite3CorruptError(tls, int32(72118))
}
iSize = uint16(iEnd - uint32(iStart))
iFreeBlk = uint16(int32(*(*uint8)(unsafe.Pointer(data + uintptr(iFreeBlk))))< int32(hdr)+int32(1) {
iPtrEnd = int32(iPtr) + (int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(iPtr)+int32(2)))))<= int32(iStart) {
if iPtrEnd > int32(iStart) {
return _sqlite3CorruptError(tls, int32(72131))
}
nFrag = Tu8(int32(nFrag) + (int32(iStart) - iPtrEnd))
iSize = uint16(iEnd - uint32(iPtr))
iStart = iPtr
}
}
if int32(nFrag) > int32(*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(7))))) {
return _sqlite3CorruptError(tls, int32(72137))
}
p2 = data + uintptr(int32(hdr)+int32(7))
*(*uint8)(unsafe.Pointer(p2)) = uint8(int32(*(*uint8)(unsafe.Pointer(p2))) - int32(nFrag))
}
pTmp = data + uintptr(int32(hdr)+int32(5))
x = uint16(int32(*(*Tu8)(unsafe.Pointer(pTmp)))<> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(1)) + 1)) = uint8(iFreeBlk)
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(5)))) = uint8(iEnd >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(5)) + 1)) = uint8(iEnd)
} else {
/* Insert the new freeblock into the freelist */
*(*uint8)(unsafe.Pointer(data + uintptr(iPtr))) = uint8(int32(iStart) >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(iPtr) + 1)) = uint8(iStart)
*(*uint8)(unsafe.Pointer(data + uintptr(iStart))) = uint8(int32(iFreeBlk) >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(iStart) + 1)) = uint8(iFreeBlk)
*(*uint8)(unsafe.Pointer(data + uintptr(int32(iStart)+int32(2)))) = uint8(int32(iSize) >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(int32(iStart)+int32(2)) + 1)) = uint8(iSize)
}
*(*int32)(unsafe.Pointer(pPage + 20)) += int32(iOrigSize)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Decode the flags byte (the first byte of the header) for a page
// ** and initialize fields of the MemPage structure accordingly.
// **
// ** Only the following combinations are supported. Anything different
// ** indicates a corrupt database files:
// **
// ** PTF_ZERODATA (0x02, 2)
// ** PTF_LEAFDATA | PTF_INTKEY (0x05, 5)
// ** PTF_ZERODATA | PTF_LEAF (0x0a, 10)
// ** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF (0x0d, 13)
// */
func _decodeFlags(tls *libc.TLS, pPage uintptr, flagByte int32) (r int32) {
var pBt uintptr
_ = pBt /* A copy of pPage->pBt */
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
(*TMemPage)(unsafe.Pointer(pPage)).Fmax1bytePayload = (*TBtShared)(unsafe.Pointer(pBt)).Fmax1bytePayload
if flagByte >= libc.Int32FromInt32(PTF_ZERODATA)|libc.Int32FromInt32(PTF_LEAF) {
(*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).Fleaf = uint8(1)
if flagByte == libc.Int32FromInt32(PTF_LEAFDATA)|libc.Int32FromInt32(PTF_INTKEY)|libc.Int32FromInt32(PTF_LEAF) {
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(1)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtrTableLeaf)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtr)
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(1)
(*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal = (*TBtShared)(unsafe.Pointer(pBt)).FmaxLeaf
(*TMemPage)(unsafe.Pointer(pPage)).FminLocal = (*TBtShared)(unsafe.Pointer(pBt)).FminLeaf
} else {
if flagByte == libc.Int32FromInt32(PTF_ZERODATA)|libc.Int32FromInt32(PTF_LEAF) {
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtrIdxLeaf)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtrIndex)
(*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal = (*TBtShared)(unsafe.Pointer(pBt)).FmaxLocal
(*TMemPage)(unsafe.Pointer(pPage)).FminLocal = (*TBtShared)(unsafe.Pointer(pBt)).FminLocal
} else {
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtrIdxLeaf)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtrIndex)
return _sqlite3CorruptError(tls, int32(72206))
}
}
} else {
(*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize = uint8(4)
(*TMemPage)(unsafe.Pointer(pPage)).Fleaf = uint8(0)
if flagByte == int32(PTF_ZERODATA) {
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtr)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtrIndex)
(*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal = (*TBtShared)(unsafe.Pointer(pBt)).FmaxLocal
(*TMemPage)(unsafe.Pointer(pPage)).FminLocal = (*TBtShared)(unsafe.Pointer(pBt)).FminLocal
} else {
if flagByte == libc.Int32FromInt32(PTF_LEAFDATA)|libc.Int32FromInt32(PTF_INTKEY) {
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtrNoPayload)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtrNoPayload)
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(1)
(*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal = (*TBtShared)(unsafe.Pointer(pBt)).FmaxLeaf
(*TMemPage)(unsafe.Pointer(pPage)).FminLocal = (*TBtShared)(unsafe.Pointer(pBt)).FminLeaf
} else {
(*TMemPage)(unsafe.Pointer(pPage)).FintKey = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize = __ccgo_fp(_cellSizePtr)
(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell = __ccgo_fp(_btreeParseCellPtrIndex)
return _sqlite3CorruptError(tls, int32(72230))
}
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Compute the amount of freespace on the page. In other words, fill
// ** in the pPage->nFree field.
// */
func _btreeComputeFreeSpace(tls *libc.TLS, pPage uintptr) (r int32) {
var data uintptr
var hdr Tu8
var iCellFirst, iCellLast, nFree, pc, top, usableSize int32
var next, size Tu32
_, _, _, _, _, _, _, _, _, _ = data, hdr, iCellFirst, iCellLast, nFree, next, pc, size, top, usableSize /* Last possible cell or freeblock offset */
usableSize = int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize)
hdr = (*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
/* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates
** the start of the cell content area. A zero value for this integer is
** interpreted as 65536. */
top = (int32(*(*Tu8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(5)))))< 0 {
if pc < top {
/* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will
** always be at least one cell before the first freeblock.
*/
return _sqlite3CorruptError(tls, int32(72281))
}
for int32(1) != 0 {
if pc > iCellLast {
/* Freeblock off the end of the page */
return _sqlite3CorruptError(tls, int32(72286))
}
next = uint32(int32(*(*Tu8)(unsafe.Pointer(data + uintptr(pc))))< uint32(0) {
/* Freeblock not in ascending order */
return _sqlite3CorruptError(tls, int32(72296))
}
if uint32(pc)+size > uint32(usableSize) {
/* Last freeblock extends past page end */
return _sqlite3CorruptError(tls, int32(72300))
}
}
/* At this point, nFree contains the sum of the offset to the start
** of the cell-content area plus the number of free bytes within
** the cell-content area. If this is greater than the usable-size
** of the page, then the page must be corrupted. This check also
** serves to verify that the offset to the start of the cell-content
** area, according to the page header, lies within the page.
*/
if nFree > usableSize || nFree < iCellFirst {
return _sqlite3CorruptError(tls, int32(72312))
}
(*TMemPage)(unsafe.Pointer(pPage)).FnFree = int32(uint16(nFree - iCellFirst))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Do additional sanity check after btreeInitPage() if
// ** PRAGMA cell_size_check=ON
// */
func _btreeCellSizeCheck(tls *libc.TLS, pPage uintptr) (r int32) {
var cellOffset, i, iCellFirst, iCellLast, pc, sz, usableSize int32
var data uintptr
_, _, _, _, _, _, _, _ = cellOffset, data, i, iCellFirst, iCellLast, pc, sz, usableSize /* Start of cell content area */
iCellFirst = int32((*TMemPage)(unsafe.Pointer(pPage)).FcellOffset) + int32(2)*int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
usableSize = int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize)
iCellLast = usableSize - int32(4)
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
cellOffset = int32((*TMemPage)(unsafe.Pointer(pPage)).FcellOffset)
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
iCellLast--
}
i = 0
for {
if !(i < int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)) {
break
}
pc = int32(*(*Tu8)(unsafe.Pointer(data + uintptr(cellOffset+i*int32(2)))))< iCellLast {
return _sqlite3CorruptError(tls, int32(72343))
}
sz = int32((*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize})))(tls, pPage, data+uintptr(pc)))
if pc+sz > usableSize {
return _sqlite3CorruptError(tls, int32(72348))
}
goto _1
_1:
;
i++
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Initialize the auxiliary information for a disk block.
// **
// ** Return SQLITE_OK on success. If we see that the page does
// ** not contain a well-formed database page, then return
// ** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not
// ** guarantee that the page is well-formed. It only shows that
// ** we failed to detect any corruption.
// */
func _btreeInitPage(tls *libc.TLS, pPage uintptr) (r int32) {
var data, pBt uintptr
_, _ = data, pBt /* The main btree structure */
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)
/* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating
** the b-tree page type. */
if _decodeFlags(tls, pPage, int32(*(*Tu8)(unsafe.Pointer(data)))) != 0 {
return _sqlite3CorruptError(tls, int32(72380))
}
(*TMemPage)(unsafe.Pointer(pPage)).FmaskPage = uint16((*TBtShared)(unsafe.Pointer(pBt)).FpageSize - libc.Uint32FromInt32(1))
(*TMemPage)(unsafe.Pointer(pPage)).FnOverflow = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FcellOffset = uint16(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset) + int32(8) + int32((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize))
(*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx = data + uintptr((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize) + uintptr(8)
(*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*TBtShared)(unsafe.Pointer(pBt)).FpageSize)
(*TMemPage)(unsafe.Pointer(pPage)).FaDataOfst = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize)
/* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the
** number of cells on the page. */
(*TMemPage)(unsafe.Pointer(pPage)).FnCell = uint16(int32(*(*Tu8)(unsafe.Pointer(data + 3)))< ((*TBtShared)(unsafe.Pointer(pBt)).FpageSize-uint32(8))/uint32(6) {
/* To many cells for a single page. The page must be corrupt */
return _sqlite3CorruptError(tls, int32(72394))
}
/* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only
** possible for a root page of a table that contains no rows) then the
** offset to the cell content area will equal the page size minus the
** bytes of reserved space. */
(*TMemPage)(unsafe.Pointer(pPage)).FnFree = -int32(1) /* Indicate that this value is yet uncomputed */
(*TMemPage)(unsafe.Pointer(pPage)).FisInit = uint8(1)
if (*Tsqlite3)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).Fdb)).Fflags&uint64(SQLITE_CellSizeCk) != 0 {
return _btreeCellSizeCheck(tls, pPage)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Set up a raw page so that it looks like a database page holding
// ** no entries.
// */
func _zeroPage(tls *libc.TLS, pPage uintptr, flags int32) {
var data, pBt uintptr
var first Tu16
var hdr Tu8
var v1 int32
_, _, _, _, _ = data, first, hdr, pBt, v1
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
hdr = (*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset
if int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_FAST_SECURE) != 0 {
libc.Xmemset(tls, data+uintptr(hdr), 0, uint64((*TBtShared)(unsafe.Pointer(pBt)).FusableSize-uint32(hdr)))
}
*(*uint8)(unsafe.Pointer(data + uintptr(hdr))) = uint8(int8(flags))
if flags&int32(PTF_LEAF) == 0 {
v1 = int32(12)
} else {
v1 = int32(8)
}
first = uint16(int32(hdr) + v1)
libc.Xmemset(tls, data+uintptr(int32(hdr)+int32(1)), 0, uint64(4))
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(7)))) = uint8(0)
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(5)))) = uint8((*TBtShared)(unsafe.Pointer(pBt)).FusableSize >> libc.Int32FromInt32(8))
*(*uint8)(unsafe.Pointer(data + uintptr(int32(hdr)+int32(5)) + 1)) = uint8((*TBtShared)(unsafe.Pointer(pBt)).FusableSize)
(*TMemPage)(unsafe.Pointer(pPage)).FnFree = int32(uint16((*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(first)))
_decodeFlags(tls, pPage, flags)
(*TMemPage)(unsafe.Pointer(pPage)).FcellOffset = first
(*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd = data + uintptr((*TBtShared)(unsafe.Pointer(pBt)).FpageSize)
(*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx = data + uintptr(first)
(*TMemPage)(unsafe.Pointer(pPage)).FaDataOfst = data + uintptr((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize)
(*TMemPage)(unsafe.Pointer(pPage)).FnOverflow = uint8(0)
(*TMemPage)(unsafe.Pointer(pPage)).FmaskPage = uint16((*TBtShared)(unsafe.Pointer(pBt)).FpageSize - libc.Uint32FromInt32(1))
(*TMemPage)(unsafe.Pointer(pPage)).FnCell = uint16(0)
(*TMemPage)(unsafe.Pointer(pPage)).FisInit = uint8(1)
}
// C documentation
//
// /*
// ** Convert a DbPage obtained from the pager into a MemPage used by
// ** the btree layer.
// */
func _btreePageFromDbPage(tls *libc.TLS, pDbPage uintptr, pgno TPgno, pBt uintptr) (r uintptr) {
var pPage uintptr
var v1 int32
_, _ = pPage, v1
pPage = _sqlite3PagerGetExtra(tls, pDbPage)
if pgno != (*TMemPage)(unsafe.Pointer(pPage)).Fpgno {
(*TMemPage)(unsafe.Pointer(pPage)).FaData = _sqlite3PagerGetData(tls, pDbPage)
(*TMemPage)(unsafe.Pointer(pPage)).FpDbPage = pDbPage
(*TMemPage)(unsafe.Pointer(pPage)).FpBt = pBt
(*TMemPage)(unsafe.Pointer(pPage)).Fpgno = pgno
if pgno == uint32(1) {
v1 = int32(100)
} else {
v1 = 0
}
(*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset = uint8(v1)
}
return pPage
}
// C documentation
//
// /*
// ** Get a page from the pager. Initialize the MemPage.pBt and
// ** MemPage.aData elements if needed. See also: btreeGetUnusedPage().
// **
// ** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care
// ** about the content of the page at this time. So do not go to the disk
// ** to fetch the content. Just fill in the content with zeros for now.
// ** If in the future we call sqlite3PagerWrite() on this page, that
// ** means we have started to be concerned about content and the disk
// ** read should occur at that point.
// */
func _btreeGetPage(tls *libc.TLS, pBt uintptr, pgno TPgno, ppPage uintptr, flags int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* pDbPage at bp+0 */ uintptr
_ = rc
rc = _sqlite3PagerGet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pgno, bp, flags)
if rc != 0 {
return rc
}
*(*uintptr)(unsafe.Pointer(ppPage)) = _btreePageFromDbPage(tls, *(*uintptr)(unsafe.Pointer(bp)), pgno, pBt)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Retrieve a page from the pager cache. If the requested page is not
// ** already in the pager cache return NULL. Initialize the MemPage.pBt and
// ** MemPage.aData elements if needed.
// */
func _btreePageLookup(tls *libc.TLS, pBt uintptr, pgno TPgno) (r uintptr) {
var pDbPage uintptr
_ = pDbPage
pDbPage = _sqlite3PagerLookup(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pgno)
if pDbPage != 0 {
return _btreePageFromDbPage(tls, pDbPage, pgno, pBt)
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Return the size of the database file in pages. If there is any kind of
// ** error, return ((unsigned int)-1).
// */
func _btreePagecount(tls *libc.TLS, pBt uintptr) (r TPgno) {
return (*TBtShared)(unsafe.Pointer(pBt)).FnPage
}
func _sqlite3BtreeLastPage(tls *libc.TLS, p uintptr) (r TPgno) {
return _btreePagecount(tls, (*TBtree)(unsafe.Pointer(p)).FpBt)
}
// C documentation
//
// /*
// ** Get a page from the pager and initialize it.
// */
func _getAndInitPage(tls *libc.TLS, pBt uintptr, pgno TPgno, ppPage uintptr, bReadOnly int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pPage uintptr
var rc int32
var _ /* pDbPage at bp+0 */ uintptr
_, _ = pPage, rc
if pgno > _btreePagecount(tls, pBt) {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return _sqlite3CorruptError(tls, int32(72537))
}
rc = _sqlite3PagerGet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pgno, bp, bReadOnly)
if rc != 0 {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return rc
}
pPage = _sqlite3PagerGetExtra(tls, *(*uintptr)(unsafe.Pointer(bp)))
if int32((*TMemPage)(unsafe.Pointer(pPage)).FisInit) == 0 {
_btreePageFromDbPage(tls, *(*uintptr)(unsafe.Pointer(bp)), pgno, pBt)
rc = _btreeInitPage(tls, pPage)
if rc != SQLITE_OK {
_releasePage(tls, pPage)
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return rc
}
}
*(*uintptr)(unsafe.Pointer(ppPage)) = pPage
return SQLITE_OK
}
// C documentation
//
// /*
// ** Release a MemPage. This should be called once for each prior
// ** call to btreeGetPage.
// **
// ** Page1 is a special case and must be released using releasePageOne().
// */
func _releasePageNotNull(tls *libc.TLS, pPage uintptr) {
_sqlite3PagerUnrefNotNull(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
}
func _releasePage(tls *libc.TLS, pPage uintptr) {
if pPage != 0 {
_releasePageNotNull(tls, pPage)
}
}
func _releasePageOne(tls *libc.TLS, pPage uintptr) {
_sqlite3PagerUnrefPageOne(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
}
// C documentation
//
// /*
// ** Get an unused page.
// **
// ** This works just like btreeGetPage() with the addition:
// **
// ** * If the page is already in use for some other purpose, immediately
// ** release it and return an SQLITE_CURRUPT error.
// ** * Make sure the isInit flag is clear
// */
func _btreeGetUnusedPage(tls *libc.TLS, pBt uintptr, pgno TPgno, ppPage uintptr, flags int32) (r int32) {
var rc int32
_ = rc
rc = _btreeGetPage(tls, pBt, pgno, ppPage, flags)
if rc == SQLITE_OK {
if _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage) > int32(1) {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage)))
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
return _sqlite3CorruptError(tls, int32(72609))
}
(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FisInit = uint8(0)
} else {
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
}
return rc
}
// C documentation
//
// /*
// ** During a rollback, when the pager reloads information into the cache
// ** so that the cache is restored to its original state at the start of
// ** the transaction, for each page restored this routine is called.
// **
// ** This routine needs to reset the extra data section at the end of the
// ** page to agree with the restored data.
// */
func _pageReinit(tls *libc.TLS, pData uintptr) {
var pPage uintptr
_ = pPage
pPage = _sqlite3PagerGetExtra(tls, pData)
if (*TMemPage)(unsafe.Pointer(pPage)).FisInit != 0 {
(*TMemPage)(unsafe.Pointer(pPage)).FisInit = uint8(0)
if _sqlite3PagerPageRefcount(tls, pData) > int32(1) {
/* pPage might not be a btree page; it might be an overflow page
** or ptrmap page or a free page. In those cases, the following
** call to btreeInitPage() will likely return SQLITE_CORRUPT.
** But no harm is done by this. And it is very important that
** btreeInitPage() be called on every btree page so we make
** the call for every page that comes in for re-initializing. */
_btreeInitPage(tls, pPage)
}
}
}
// C documentation
//
// /*
// ** Invoke the busy handler for a btree.
// */
func _btreeInvokeBusyHandler(tls *libc.TLS, pArg uintptr) (r int32) {
var pBt uintptr
_ = pBt
pBt = pArg
return _sqlite3InvokeBusyHandler(tls, (*TBtShared)(unsafe.Pointer(pBt)).Fdb+672)
}
// C documentation
//
// /*
// ** Open a database file.
// **
// ** zFilename is the name of the database file. If zFilename is NULL
// ** then an ephemeral database is created. The ephemeral database might
// ** be exclusively in memory, or it might use a disk-based memory cache.
// ** Either way, the ephemeral database will be automatically deleted
// ** when sqlite3BtreeClose() is called.
// **
// ** If zFilename is ":memory:" then an in-memory database is created
// ** that is automatically destroyed when it is closed.
// **
// ** The "flags" parameter is a bitmask that might contain bits like
// ** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY.
// **
// ** If the database is already opened in the same database connection
// ** and we are in shared cache mode, then the open will fail with an
// ** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared
// ** objects in the same database connection since doing so will lead
// ** to problems with locking.
// */
func _sqlite3BtreeOpen(tls *libc.TLS, pVfs uintptr, zFilename uintptr, db uintptr, ppBtree uintptr, flags int32, vfsFlags int32) (r int32) {
bp := tls.Alloc(112)
defer tls.Free(112)
var i, iDb, isMemdb, isTempDb, nFilename, nFullPathname, rc, v1, v6, v7 int32
var mutexOpen, mutexShared, mutexShared1, p, pBt, pExisting, pFile, pSib, zFullPathname, v9, p4, p5 uintptr
var nReserve Tu8
var _ /* zDbHeader at bp+0 */ [100]uint8
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = i, iDb, isMemdb, isTempDb, mutexOpen, mutexShared, mutexShared1, nFilename, nFullPathname, nReserve, p, pBt, pExisting, pFile, pSib, rc, zFullPathname, v1, v6, v7, v9, p4, p5
pBt = uintptr(0) /* Handle to return */
mutexOpen = uintptr(0) /* Prevents a race condition. Ticket #3537 */
rc = SQLITE_OK /* Database header content */
/* True if opening an ephemeral, temporary database */
isTempDb = libc.BoolInt32(zFilename == uintptr(0) || int32(*(*int8)(unsafe.Pointer(zFilename))) == 0)
/* Set the variable isMemdb to true for an in-memory database, or
** false for a file-based database.
*/
isMemdb = libc.BoolInt32(zFilename != 0 && libc.Xstrcmp(tls, zFilename, __ccgo_ts+4192) == 0 || isTempDb != 0 && _sqlite3TempInMemory(tls, db) != 0 || vfsFlags&int32(SQLITE_OPEN_MEMORY) != 0)
/* flags fit in 8 bits */
/* Only a BTREE_SINGLE database can be BTREE_UNORDERED */
/* A BTREE_SINGLE database is always a temporary and/or ephemeral */
if isMemdb != 0 {
flags |= int32(BTREE_MEMORY)
}
if vfsFlags&int32(SQLITE_OPEN_MAIN_DB) != 0 && (isMemdb != 0 || isTempDb != 0) {
vfsFlags = vfsFlags & ^libc.Int32FromInt32(SQLITE_OPEN_MAIN_DB) | int32(SQLITE_OPEN_TEMP_DB)
}
p = _sqlite3MallocZero(tls, uint64(72))
if !(p != 0) {
return int32(SQLITE_NOMEM)
}
(*TBtree)(unsafe.Pointer(p)).FinTrans = uint8(TRANS_NONE)
(*TBtree)(unsafe.Pointer(p)).Fdb = db
(*TBtree)(unsafe.Pointer(p)).Flock.FpBtree = p
(*TBtree)(unsafe.Pointer(p)).Flock.FiTable = uint32(1)
/*
** If this Btree is a candidate for shared cache, try to find an
** existing BtShared object that we can share with
*/
if isTempDb == 0 && (isMemdb == 0 || vfsFlags&int32(SQLITE_OPEN_URI) != 0) {
if vfsFlags&int32(SQLITE_OPEN_SHAREDCACHE) != 0 {
nFilename = _sqlite3Strlen30(tls, zFilename) + int32(1)
nFullPathname = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FmxPathname + int32(1)
if nFullPathname > nFilename {
v1 = nFullPathname
} else {
v1 = nFilename
}
zFullPathname = _sqlite3Malloc(tls, uint64(v1))
(*TBtree)(unsafe.Pointer(p)).Fsharable = uint8(1)
if !(zFullPathname != 0) {
Xsqlite3_free(tls, p)
return int32(SQLITE_NOMEM)
}
if isMemdb != 0 {
libc.Xmemcpy(tls, zFullPathname, zFilename, uint64(nFilename))
} else {
rc = _sqlite3OsFullPathname(tls, pVfs, zFilename, nFullPathname, zFullPathname)
if rc != 0 {
if rc == libc.Int32FromInt32(SQLITE_OK)|libc.Int32FromInt32(2)<= 0) {
break
}
pExisting = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32))).FpBt
if pExisting != 0 && (*TBtree)(unsafe.Pointer(pExisting)).FpBt == pBt {
Xsqlite3_mutex_leave(tls, mutexShared)
Xsqlite3_mutex_leave(tls, mutexOpen)
Xsqlite3_free(tls, zFullPathname)
Xsqlite3_free(tls, p)
return int32(SQLITE_CONSTRAINT)
}
goto _3
_3:
;
iDb--
}
(*TBtree)(unsafe.Pointer(p)).FpBt = pBt
(*TBtShared)(unsafe.Pointer(pBt)).FnRef++
break
}
goto _2
_2:
;
pBt = (*TBtShared)(unsafe.Pointer(pBt)).FpNext
}
Xsqlite3_mutex_leave(tls, mutexShared)
Xsqlite3_free(tls, zFullPathname)
}
}
if pBt == uintptr(0) {
/*
** The following asserts make sure that structures used by the btree are
** the right size. This is to guard against size changes that result
** when compiling on a different architecture.
*/
/* Suppress false-positive compiler warning from PVS-Studio */
libc.Xmemset(tls, bp+16, 0, uint64(8))
pBt = _sqlite3MallocZero(tls, uint64(152))
if pBt == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto btree_open_out
}
rc = _sqlite3PagerOpen(tls, pVfs, pBt, zFilename, int32(136), flags, vfsFlags, __ccgo_fp(_pageReinit))
if rc == SQLITE_OK {
_sqlite3PagerSetMmapLimit(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, (*Tsqlite3)(unsafe.Pointer(db)).FszMmap)
rc = _sqlite3PagerReadFileheader(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, int32(100), bp)
}
if rc != SQLITE_OK {
goto btree_open_out
}
(*TBtShared)(unsafe.Pointer(pBt)).FopenFlags = uint8(flags)
(*TBtShared)(unsafe.Pointer(pBt)).Fdb = db
_sqlite3PagerSetBusyHandler(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, __ccgo_fp(_btreeInvokeBusyHandler), pBt)
(*TBtree)(unsafe.Pointer(p)).FpBt = pBt
(*TBtShared)(unsafe.Pointer(pBt)).FpCursor = uintptr(0)
(*TBtShared)(unsafe.Pointer(pBt)).FpPage1 = uintptr(0)
if _sqlite3PagerIsreadonly(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager) != 0 {
p4 = pBt + 40
*(*Tu16)(unsafe.Pointer(p4)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p4))) | libc.Int32FromInt32(BTS_READ_ONLY))
}
/* EVIDENCE-OF: R-51873-39618 The page size for a database file is
** determined by the 2-byte integer located at an offset of 16 bytes from
** the beginning of the database file. */
(*TBtShared)(unsafe.Pointer(pBt)).FpageSize = uint32(int32((*(*[100]uint8)(unsafe.Pointer(bp)))[int32(16)])< uint32(SQLITE_MAX_PAGE_SIZE) || ((*TBtShared)(unsafe.Pointer(pBt)).FpageSize-uint32(1))&(*TBtShared)(unsafe.Pointer(pBt)).FpageSize != uint32(0) {
(*TBtShared)(unsafe.Pointer(pBt)).FpageSize = uint32(0)
/* If the magic name ":memory:" will create an in-memory database, then
** leave the autoVacuum mode at 0 (do not auto-vacuum), even if
** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if
** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a
** regular file-name. In this case the auto-vacuum applies as per normal.
*/
if zFilename != 0 && !(isMemdb != 0) {
(*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum = uint8(libc.Int32FromInt32(0))
(*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum = uint8(libc.Int32FromInt32(0))
}
nReserve = uint8(0)
} else {
/* EVIDENCE-OF: R-37497-42412 The size of the reserved region is
** determined by the one-byte unsigned integer found at an offset of 20
** into the database file header. */
nReserve = (*(*[100]uint8)(unsafe.Pointer(bp)))[int32(20)]
p5 = pBt + 40
*(*Tu16)(unsafe.Pointer(p5)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p5))) | libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
if _sqlite3Get4byte(tls, bp+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(4)*libc.Int32FromInt32(4))) != 0 {
v6 = int32(1)
} else {
v6 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum = uint8(v6)
if _sqlite3Get4byte(tls, bp+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(7)*libc.Int32FromInt32(4))) != 0 {
v7 = int32(1)
} else {
v7 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum = uint8(v7)
}
rc = _sqlite3PagerSetPagesize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pBt+52, int32(nReserve))
if rc != 0 {
goto btree_open_out
}
(*TBtShared)(unsafe.Pointer(pBt)).FusableSize = (*TBtShared)(unsafe.Pointer(pBt)).FpageSize - uint32(nReserve)
/* 8-byte alignment of pageSize */
/* Add the new BtShared object to the linked list sharable BtShareds.
*/
(*TBtShared)(unsafe.Pointer(pBt)).FnRef = int32(1)
if (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 {
mutexShared1 = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
if libc.Bool(int32(SQLITE_THREADSAFE) != 0) && _sqlite3Config.FbCoreMutex != 0 {
(*TBtShared)(unsafe.Pointer(pBt)).Fmutex = _sqlite3MutexAlloc(tls, SQLITE_MUTEX_FAST)
if (*TBtShared)(unsafe.Pointer(pBt)).Fmutex == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto btree_open_out
}
}
Xsqlite3_mutex_enter(tls, mutexShared1)
(*TBtShared)(unsafe.Pointer(pBt)).FpNext = _sqlite3SharedCacheList
_sqlite3SharedCacheList = pBt
Xsqlite3_mutex_leave(tls, mutexShared1)
}
}
/* If the new Btree uses a sharable pBtShared, then link the new
** Btree into the list of all sharable Btrees for the same connection.
** The list is kept in ascending order by pBt address.
*/
if (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 {
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
v9 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
pSib = v9
if v9 != uintptr(0) && (*TBtree)(unsafe.Pointer(pSib)).Fsharable != 0 {
for (*TBtree)(unsafe.Pointer(pSib)).FpPrev != 0 {
pSib = (*TBtree)(unsafe.Pointer(pSib)).FpPrev
}
if uint64((*TBtree)(unsafe.Pointer(p)).FpBt) < uint64((*TBtree)(unsafe.Pointer(pSib)).FpBt) {
(*TBtree)(unsafe.Pointer(p)).FpNext = pSib
(*TBtree)(unsafe.Pointer(p)).FpPrev = uintptr(0)
(*TBtree)(unsafe.Pointer(pSib)).FpPrev = p
} else {
for (*TBtree)(unsafe.Pointer(pSib)).FpNext != 0 && uint64((*TBtree)(unsafe.Pointer((*TBtree)(unsafe.Pointer(pSib)).FpNext)).FpBt) < uint64((*TBtree)(unsafe.Pointer(p)).FpBt) {
pSib = (*TBtree)(unsafe.Pointer(pSib)).FpNext
}
(*TBtree)(unsafe.Pointer(p)).FpNext = (*TBtree)(unsafe.Pointer(pSib)).FpNext
(*TBtree)(unsafe.Pointer(p)).FpPrev = pSib
if (*TBtree)(unsafe.Pointer(p)).FpNext != 0 {
(*TBtree)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpNext)).FpPrev = p
}
(*TBtree)(unsafe.Pointer(pSib)).FpNext = p
}
break
}
goto _8
_8:
;
i++
}
}
*(*uintptr)(unsafe.Pointer(ppBtree)) = p
goto btree_open_out
btree_open_out:
;
if rc != SQLITE_OK {
if pBt != 0 && (*TBtShared)(unsafe.Pointer(pBt)).FpPager != 0 {
_sqlite3PagerClose(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, uintptr(0))
}
Xsqlite3_free(tls, pBt)
Xsqlite3_free(tls, p)
*(*uintptr)(unsafe.Pointer(ppBtree)) = uintptr(0)
} else {
/* If the B-Tree was successfully opened, set the pager-cache size to the
** default value. Except, when opening on an existing shared pager-cache,
** do not change the pager-cache size.
*/
if _sqlite3BtreeSchema(tls, p, 0, uintptr(0)) == uintptr(0) {
_sqlite3BtreeSetCacheSize(tls, p, -int32(2000))
}
pFile = _sqlite3PagerFile(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
if (*Tsqlite3_file)(unsafe.Pointer(pFile)).FpMethods != 0 {
_sqlite3OsFileControlHint(tls, pFile, int32(SQLITE_FCNTL_PDB), pBt+8)
}
}
if mutexOpen != 0 {
Xsqlite3_mutex_leave(tls, mutexOpen)
}
return rc
}
// C documentation
//
// /*
// ** Decrement the BtShared.nRef counter. When it reaches zero,
// ** remove the BtShared structure from the sharing list. Return
// ** true if the BtShared.nRef counter reaches zero and return
// ** false if it is still positive.
// */
func _removeFromSharingList(tls *libc.TLS, pBt uintptr) (r int32) {
var pList, pMainMtx uintptr
var removed int32
_, _, _ = pList, pMainMtx, removed
removed = 0
pMainMtx = _sqlite3MutexAlloc(tls, int32(SQLITE_MUTEX_STATIC_MAIN))
Xsqlite3_mutex_enter(tls, pMainMtx)
(*TBtShared)(unsafe.Pointer(pBt)).FnRef--
if (*TBtShared)(unsafe.Pointer(pBt)).FnRef <= 0 {
if _sqlite3SharedCacheList == pBt {
_sqlite3SharedCacheList = (*TBtShared)(unsafe.Pointer(pBt)).FpNext
} else {
pList = _sqlite3SharedCacheList
for pList != 0 && (*TBtShared)(unsafe.Pointer(pList)).FpNext != pBt {
pList = (*TBtShared)(unsafe.Pointer(pList)).FpNext
}
if pList != 0 {
(*TBtShared)(unsafe.Pointer(pList)).FpNext = (*TBtShared)(unsafe.Pointer(pBt)).FpNext
}
}
if int32(SQLITE_THREADSAFE) != 0 {
Xsqlite3_mutex_free(tls, (*TBtShared)(unsafe.Pointer(pBt)).Fmutex)
}
removed = int32(1)
}
Xsqlite3_mutex_leave(tls, pMainMtx)
return removed
}
// C documentation
//
// /*
// ** Make sure pBt->pTmpSpace points to an allocation of
// ** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child
// ** pointer.
// */
func _allocateTempSpace(tls *libc.TLS, pBt uintptr) (r int32) {
var pCur uintptr
_ = pCur
/* This routine is called only by btreeCursor() when allocating the
** first write cursor for the BtShared object */
(*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace = _sqlite3PageMalloc(tls, int32((*TBtShared)(unsafe.Pointer(pBt)).FpageSize))
if (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace == uintptr(0) {
pCur = (*TBtShared)(unsafe.Pointer(pBt)).FpCursor
(*TBtShared)(unsafe.Pointer(pBt)).FpCursor = (*TBtCursor)(unsafe.Pointer(pCur)).FpNext /* Unlink the cursor */
libc.Xmemset(tls, pCur, 0, uint64(296))
return int32(SQLITE_NOMEM)
}
/* One of the uses of pBt->pTmpSpace is to format cells before
** inserting them into a leaf page (function fillInCell()). If
** a cell is less than 4 bytes in size, it is rounded up to 4 bytes
** by the various routines that manipulate binary cells. Which
** can mean that fillInCell() only initializes the first 2 or 3
** bytes of pTmpSpace, but that the first 4 bytes are copied from
** it into a database page. This is not actually a problem, but it
** does cause a valgrind error when the 1 or 2 bytes of uninitialized
** data is passed to system call write(). So to avoid this error,
** zero the first 4 bytes of temp space here.
**
** Also: Provide four bytes of initialized space before the
** beginning of pTmpSpace as an area available to prepend the
** left-child pointer to the beginning of a cell.
*/
libc.Xmemset(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace, 0, uint64(8))
*(*uintptr)(unsafe.Pointer(pBt + 136)) += uintptr(4)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Free the pBt->pTmpSpace allocation
// */
func _freeTempSpace(tls *libc.TLS, pBt uintptr) {
if (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace != 0 {
*(*uintptr)(unsafe.Pointer(pBt + 136)) -= uintptr(4)
_sqlite3PageFree(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace)
(*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace = uintptr(0)
}
}
// C documentation
//
// /*
// ** Close an open database and invalidate all cursors.
// */
func _sqlite3BtreeClose(tls *libc.TLS, p uintptr) (r int32) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
/* Close all cursors opened via this handle. */
_sqlite3BtreeEnter(tls, p)
/* Verify that no other cursors have this Btree open */
/* Rollback any active transaction and free the handle structure.
** The call to sqlite3BtreeRollback() drops any table-locks held by
** this handle.
*/
_sqlite3BtreeRollback(tls, p, SQLITE_OK, 0)
_sqlite3BtreeLeave(tls, p)
/* If there are still other outstanding references to the shared-btree
** structure, return now. The remainder of this procedure cleans
** up the shared-btree.
*/
if !((*TBtree)(unsafe.Pointer(p)).Fsharable != 0) || _removeFromSharingList(tls, pBt) != 0 {
/* The pBt is no longer on the sharing list, so we can access
** it without having to hold the mutex.
**
** Clean out and delete the BtShared object.
*/
_sqlite3PagerClose(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, (*TBtree)(unsafe.Pointer(p)).Fdb)
if (*TBtShared)(unsafe.Pointer(pBt)).FxFreeSchema != 0 && (*TBtShared)(unsafe.Pointer(pBt)).FpSchema != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TBtShared)(unsafe.Pointer(pBt)).FxFreeSchema})))(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpSchema)
}
_sqlite3DbFree(tls, uintptr(0), (*TBtShared)(unsafe.Pointer(pBt)).FpSchema)
_freeTempSpace(tls, pBt)
Xsqlite3_free(tls, pBt)
}
if (*TBtree)(unsafe.Pointer(p)).FpPrev != 0 {
(*TBtree)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpPrev)).FpNext = (*TBtree)(unsafe.Pointer(p)).FpNext
}
if (*TBtree)(unsafe.Pointer(p)).FpNext != 0 {
(*TBtree)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpNext)).FpPrev = (*TBtree)(unsafe.Pointer(p)).FpPrev
}
Xsqlite3_free(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change the "soft" limit on the number of pages in the cache.
// ** Unused and unmodified pages will be recycled when the number of
// ** pages in the cache exceeds this soft limit. But the size of the
// ** cache is allowed to grow larger than this limit if it contains
// ** dirty pages or pages still in active use.
// */
func _sqlite3BtreeSetCacheSize(tls *libc.TLS, p uintptr, mxPage int32) (r int32) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
_sqlite3PagerSetCachesize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, mxPage)
_sqlite3BtreeLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change the "spill" limit on the number of pages in the cache.
// ** If the number of pages exceeds this limit during a write transaction,
// ** the pager might attempt to "spill" pages to the journal early in
// ** order to free up memory.
// **
// ** The value returned is the current spill size. If zero is passed
// ** as an argument, no changes are made to the spill size setting, so
// ** using mxPage of 0 is a way to query the current spill size.
// */
func _sqlite3BtreeSetSpillSize(tls *libc.TLS, p uintptr, mxPage int32) (r int32) {
var pBt uintptr
var res int32
_, _ = pBt, res
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
res = _sqlite3PagerSetSpillsize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, mxPage)
_sqlite3BtreeLeave(tls, p)
return res
}
// C documentation
//
// /*
// ** Change the limit on the amount of the database file that may be
// ** memory mapped.
// */
func _sqlite3BtreeSetMmapLimit(tls *libc.TLS, p uintptr, szMmap Tsqlite3_int64) (r int32) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
_sqlite3PagerSetMmapLimit(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, szMmap)
_sqlite3BtreeLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change the way data is synced to disk in order to increase or decrease
// ** how well the database resists damage due to OS crashes and power
// ** failures. Level 1 is the same as asynchronous (no syncs() occur and
// ** there is a high probability of damage) Level 2 is the default. There
// ** is a very low but non-zero probability of damage. Level 3 reduces the
// ** probability of damage to near zero but with a write performance reduction.
// */
func _sqlite3BtreeSetPagerFlags(tls *libc.TLS, p uintptr, pgFlags uint32) (r int32) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
_sqlite3PagerSetFlags(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pgFlags)
_sqlite3BtreeLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change the default pages size and the number of reserved bytes per page.
// ** Or, if the page size has already been fixed, return SQLITE_READONLY
// ** without changing anything.
// **
// ** The page size must be a power of 2 between 512 and 65536. If the page
// ** size supplied does not meet this constraint then the page size is not
// ** changed.
// **
// ** Page sizes are constrained to be a power of two so that the region
// ** of the database file used for locking (beginning at PENDING_BYTE,
// ** the first byte past the 1GB boundary, 0x40000000) needs to occur
// ** at the beginning of a page.
// **
// ** If parameter nReserve is less than zero, then the number of reserved
// ** bytes per page is left unchanged.
// **
// ** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size
// ** and autovacuum mode can no longer be changed.
// */
func _sqlite3BtreeSetPageSize(tls *libc.TLS, p uintptr, pageSize int32, nReserve int32, iFix int32) (r int32) {
var pBt, p1 uintptr
var rc, x int32
_, _, _, _ = pBt, rc, x, p1
rc = SQLITE_OK
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
(*TBtShared)(unsafe.Pointer(pBt)).FnReserveWanted = uint8(nReserve)
x = int32((*TBtShared)(unsafe.Pointer(pBt)).FpageSize - (*TBtShared)(unsafe.Pointer(pBt)).FusableSize)
if nReserve < x {
nReserve = x
}
if int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_PAGESIZE_FIXED) != 0 {
_sqlite3BtreeLeave(tls, p)
return int32(SQLITE_READONLY)
}
if pageSize >= int32(512) && pageSize <= int32(SQLITE_MAX_PAGE_SIZE) && (pageSize-int32(1))&pageSize == 0 {
if nReserve > int32(32) && pageSize == int32(512) {
pageSize = int32(1024)
}
(*TBtShared)(unsafe.Pointer(pBt)).FpageSize = uint32(pageSize)
_freeTempSpace(tls, pBt)
}
rc = _sqlite3PagerSetPagesize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pBt+52, nReserve)
(*TBtShared)(unsafe.Pointer(pBt)).FusableSize = (*TBtShared)(unsafe.Pointer(pBt)).FpageSize - uint32(uint16(nReserve))
if iFix != 0 {
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
}
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Return the currently defined page size
// */
func _sqlite3BtreeGetPageSize(tls *libc.TLS, p uintptr) (r int32) {
return int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FpageSize)
}
// C documentation
//
// /*
// ** This function is similar to sqlite3BtreeGetReserve(), except that it
// ** may only be called if it is guaranteed that the b-tree mutex is already
// ** held.
// **
// ** This is useful in one special case in the backup API code where it is
// ** known that the shared b-tree mutex is held, but the mutex on the
// ** database handle that owns *p is not. In this case if sqlite3BtreeEnter()
// ** were to be called, it might collide with some other operation on the
// ** database handle that owns *p, causing undefined behavior.
// */
func _sqlite3BtreeGetReserveNoMutex(tls *libc.TLS, p uintptr) (r int32) {
var n int32
_ = n
n = int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FpageSize - (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FusableSize)
return n
}
// C documentation
//
// /*
// ** Return the number of bytes of space at the end of every page that
// ** are intentionally left unused. This is the "reserved" space that is
// ** sometimes used by extensions.
// **
// ** The value returned is the larger of the current reserve size and
// ** the latest reserve size requested by SQLITE_FILECTRL_RESERVE_BYTES.
// ** The amount of reserve can only grow - never shrink.
// */
func _sqlite3BtreeGetRequestedReserve(tls *libc.TLS, p uintptr) (r int32) {
var n1, n2, v1 int32
_, _, _ = n1, n2, v1
_sqlite3BtreeEnter(tls, p)
n1 = int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FnReserveWanted)
n2 = _sqlite3BtreeGetReserveNoMutex(tls, p)
_sqlite3BtreeLeave(tls, p)
if n1 > n2 {
v1 = n1
} else {
v1 = n2
}
return v1
}
// C documentation
//
// /*
// ** Set the maximum page count for a database if mxPage is positive.
// ** No changes are made if mxPage is 0 or negative.
// ** Regardless of the value of mxPage, return the maximum page count.
// */
func _sqlite3BtreeMaxPageCount(tls *libc.TLS, p uintptr, mxPage TPgno) (r TPgno) {
var n TPgno
_ = n
_sqlite3BtreeEnter(tls, p)
n = _sqlite3PagerMaxPageCount(tls, (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FpPager, mxPage)
_sqlite3BtreeLeave(tls, p)
return n
}
// C documentation
//
// /*
// ** Change the values for the BTS_SECURE_DELETE and BTS_OVERWRITE flags:
// **
// ** newFlag==0 Both BTS_SECURE_DELETE and BTS_OVERWRITE are cleared
// ** newFlag==1 BTS_SECURE_DELETE set and BTS_OVERWRITE is cleared
// ** newFlag==2 BTS_SECURE_DELETE cleared and BTS_OVERWRITE is set
// ** newFlag==(-1) No changes
// **
// ** This routine acts as a query if newFlag is less than zero
// **
// ** With BTS_OVERWRITE set, deleted content is overwritten by zeros, but
// ** freelist leaf pages are not written back to the database. Thus in-page
// ** deleted content is cleared, but freelist deleted content is not.
// **
// ** With BTS_SECURE_DELETE, operation is like BTS_OVERWRITE with the addition
// ** that freelist leaf pages are written back into the database, increasing
// ** the amount of disk I/O.
// */
func _sqlite3BtreeSecureDelete(tls *libc.TLS, p uintptr, newFlag int32) (r int32) {
var b int32
var p1, p2 uintptr
_, _, _ = b, p1, p2
if p == uintptr(0) {
return 0
}
_sqlite3BtreeEnter(tls, p)
if newFlag >= 0 {
p1 = (*TBtree)(unsafe.Pointer(p)).FpBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(BTS_FAST_SECURE))
p2 = (*TBtree)(unsafe.Pointer(p)).FpBt + 40
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(BTS_SECURE_DELETE)*newFlag)
}
b = int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FbtsFlags) & int32(BTS_FAST_SECURE) / int32(BTS_SECURE_DELETE)
_sqlite3BtreeLeave(tls, p)
return b
}
// C documentation
//
// /*
// ** Change the 'auto-vacuum' property of the database. If the 'autoVacuum'
// ** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it
// ** is disabled. The default value for the auto-vacuum property is
// ** determined by the SQLITE_DEFAULT_AUTOVACUUM macro.
// */
func _sqlite3BtreeSetAutoVacuum(tls *libc.TLS, p uintptr, autoVacuum int32) (r int32) {
var av Tu8
var pBt uintptr
var rc, v1, v3, v4 int32
var v2 bool
_, _, _, _, _, _, _ = av, pBt, rc, v1, v2, v3, v4
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
rc = SQLITE_OK
av = uint8(autoVacuum)
_sqlite3BtreeEnter(tls, p)
if v2 = int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_PAGESIZE_FIXED) != 0; v2 {
if av != 0 {
v1 = int32(1)
} else {
v1 = 0
}
}
if v2 && v1 != int32((*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum) {
rc = int32(SQLITE_READONLY)
} else {
if av != 0 {
v3 = int32(1)
} else {
v3 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum = uint8(v3)
if int32(av) == int32(2) {
v4 = int32(1)
} else {
v4 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum = uint8(v4)
}
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Return the value of the 'auto-vacuum' property. If auto-vacuum is
// ** enabled 1 is returned. Otherwise 0.
// */
func _sqlite3BtreeGetAutoVacuum(tls *libc.TLS, p uintptr) (r int32) {
var rc, v1, v2 int32
_, _, _ = rc, v1, v2
_sqlite3BtreeEnter(tls, p)
if !((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FautoVacuum != 0) {
v1 = BTREE_AUTOVACUUM_NONE
} else {
if !((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FincrVacuum != 0) {
v2 = int32(BTREE_AUTOVACUUM_FULL)
} else {
v2 = int32(BTREE_AUTOVACUUM_INCR)
}
v1 = v2
}
rc = v1
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Get a reference to pPage1 of the database file. This will
// ** also acquire a readlock on that file.
// **
// ** SQLITE_OK is returned on success. If the file is not a
// ** well-formed database file, then SQLITE_CORRUPT is returned.
// ** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM
// ** is returned if we run out of memory.
// */
func _lockBtree(tls *libc.TLS, pBt uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nPage, pageSize, usableSize Tu32
var page1, p1, p2, p3 uintptr
var rc, v4, v5 int32
var _ /* isOpen at bp+12 */ int32
var _ /* nPageFile at bp+8 */ Tu32
var _ /* pPage1 at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _ = nPage, page1, pageSize, rc, usableSize, v4, v5, p1, p2, p3 /* Number of pages in the database */
*(*Tu32)(unsafe.Pointer(bp + 8)) = uint32(0) /* Number of pages in the database file */
rc = _sqlite3PagerSharedLock(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
if rc != SQLITE_OK {
return rc
}
rc = _btreeGetPage(tls, pBt, uint32(1), bp, 0)
if rc != SQLITE_OK {
return rc
}
/* Do some checking to help insure the file we opened really is
** a valid database file.
*/
nPage = _sqlite3Get4byte(tls, uintptr(28)+(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData)
_sqlite3PagerPagecount(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, bp+8)
if nPage == uint32(0) || libc.Xmemcmp(tls, uintptr(24)+(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uintptr(92)+(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uint64(4)) != 0 {
nPage = *(*Tu32)(unsafe.Pointer(bp + 8))
}
if (*Tsqlite3)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).Fdb)).Fflags&uint64(SQLITE_ResetDatabase) != uint64(0) {
nPage = uint32(0)
}
if nPage > uint32(0) {
page1 = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData
rc = int32(SQLITE_NOTADB)
/* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins
** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d
** 61 74 20 33 00. */
if libc.Xmemcmp(tls, page1, uintptr(unsafe.Pointer(&_zMagicHeader)), uint64(16)) != 0 {
goto page1_init_failed
}
if int32(*(*Tu8)(unsafe.Pointer(page1 + 18))) > int32(2) {
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTS_READ_ONLY))
}
if int32(*(*Tu8)(unsafe.Pointer(page1 + 19))) > int32(2) {
goto page1_init_failed
}
/* If the read version is set to 2, this database should be accessed
** in WAL mode. If the log is not already open, open it now. Then
** return SQLITE_OK and return without populating BtShared.pPage1.
** The caller detects this and calls this function again. This is
** required as the version of page 1 currently in the page1 buffer
** may not be the latest version - there may be a newer one in the log
** file.
*/
if int32(*(*Tu8)(unsafe.Pointer(page1 + 19))) == int32(2) && int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_NO_WAL) == 0 {
*(*int32)(unsafe.Pointer(bp + 12)) = 0
rc = _sqlite3PagerOpenWal(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, bp+12)
if rc != SQLITE_OK {
goto page1_init_failed
} else {
if *(*int32)(unsafe.Pointer(bp + 12)) == 0 {
_releasePageOne(tls, *(*uintptr)(unsafe.Pointer(bp)))
return SQLITE_OK
}
}
rc = int32(SQLITE_NOTADB)
} else {
}
/* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
** fractions and the leaf payload fraction values must be 64, 32, and 32.
**
** The original design allowed these amounts to vary, but as of
** version 3.6.0, we require them to be fixed.
*/
if libc.Xmemcmp(tls, page1+21, __ccgo_ts+4201, uint64(3)) != 0 {
goto page1_init_failed
}
/* EVIDENCE-OF: R-51873-39618 The page size for a database file is
** determined by the 2-byte integer located at an offset of 16 bytes from
** the beginning of the database file. */
pageSize = uint32(int32(*(*Tu8)(unsafe.Pointer(page1 + 16)))< uint32(SQLITE_MAX_PAGE_SIZE) || pageSize <= uint32(256) {
goto page1_init_failed
}
/* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte
** integer at offset 20 is the number of bytes of space at the end of
** each page to reserve for extensions.
**
** EVIDENCE-OF: R-37497-42412 The size of the reserved region is
** determined by the one-byte unsigned integer found at an offset of 20
** into the database file header. */
usableSize = pageSize - uint32(*(*Tu8)(unsafe.Pointer(page1 + 20)))
if pageSize != (*TBtShared)(unsafe.Pointer(pBt)).FpageSize {
/* After reading the first page of the database assuming a page size
** of BtShared.pageSize, we have discovered that the page-size is
** actually pageSize. Unlock the database, leave pBt->pPage1 at
** zero and return SQLITE_OK. The caller will call this function
** again with the correct page-size.
*/
_releasePageOne(tls, *(*uintptr)(unsafe.Pointer(bp)))
(*TBtShared)(unsafe.Pointer(pBt)).FusableSize = usableSize
(*TBtShared)(unsafe.Pointer(pBt)).FpageSize = pageSize
p2 = pBt + 40
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
_freeTempSpace(tls, pBt)
rc = _sqlite3PagerSetPagesize(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, pBt+52, int32(pageSize-usableSize))
return rc
}
if nPage > *(*Tu32)(unsafe.Pointer(bp + 8)) {
if _sqlite3WritableSchema(tls, (*TBtShared)(unsafe.Pointer(pBt)).Fdb) == 0 {
rc = _sqlite3CorruptError(tls, int32(73547))
goto page1_init_failed
} else {
nPage = *(*Tu32)(unsafe.Pointer(bp + 8))
}
}
/* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to
** be less than 480. In other words, if the page size is 512, then the
** reserved space size cannot exceed 32. */
if usableSize < uint32(480) {
goto page1_init_failed
}
p3 = pBt + 40
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
(*TBtShared)(unsafe.Pointer(pBt)).FpageSize = pageSize
(*TBtShared)(unsafe.Pointer(pBt)).FusableSize = usableSize
if _sqlite3Get4byte(tls, page1+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(4)*libc.Int32FromInt32(4))) != 0 {
v4 = int32(1)
} else {
v4 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum = uint8(v4)
if _sqlite3Get4byte(tls, page1+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(7)*libc.Int32FromInt32(4))) != 0 {
v5 = int32(1)
} else {
v5 = 0
}
(*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum = uint8(v5)
}
/* maxLocal is the maximum amount of payload to store locally for
** a cell. Make sure it is small enough so that at least minFanout
** cells can will fit on one page. We assume a 10-byte page header.
** Besides the payload, the cell must store:
** 2-byte pointer to the cell
** 4-byte child pointer
** 9-byte nKey value
** 4-byte nData value
** 4-byte overflow page pointer
** So a cell consists of a 2-byte pointer, a header which is as much as
** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow
** page pointer.
*/
(*TBtShared)(unsafe.Pointer(pBt)).FmaxLocal = uint16(((*TBtShared)(unsafe.Pointer(pBt)).FusableSize-libc.Uint32FromInt32(12))*libc.Uint32FromInt32(64)/libc.Uint32FromInt32(255) - libc.Uint32FromInt32(23))
(*TBtShared)(unsafe.Pointer(pBt)).FminLocal = uint16(((*TBtShared)(unsafe.Pointer(pBt)).FusableSize-libc.Uint32FromInt32(12))*libc.Uint32FromInt32(32)/libc.Uint32FromInt32(255) - libc.Uint32FromInt32(23))
(*TBtShared)(unsafe.Pointer(pBt)).FmaxLeaf = uint16((*TBtShared)(unsafe.Pointer(pBt)).FusableSize - libc.Uint32FromInt32(35))
(*TBtShared)(unsafe.Pointer(pBt)).FminLeaf = uint16(((*TBtShared)(unsafe.Pointer(pBt)).FusableSize-libc.Uint32FromInt32(12))*libc.Uint32FromInt32(32)/libc.Uint32FromInt32(255) - libc.Uint32FromInt32(23))
if int32((*TBtShared)(unsafe.Pointer(pBt)).FmaxLocal) > int32(127) {
(*TBtShared)(unsafe.Pointer(pBt)).Fmax1bytePayload = uint8(127)
} else {
(*TBtShared)(unsafe.Pointer(pBt)).Fmax1bytePayload = uint8((*TBtShared)(unsafe.Pointer(pBt)).FmaxLocal)
}
(*TBtShared)(unsafe.Pointer(pBt)).FpPage1 = *(*uintptr)(unsafe.Pointer(bp))
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = nPage
return SQLITE_OK
goto page1_init_failed
page1_init_failed:
;
_releasePageOne(tls, *(*uintptr)(unsafe.Pointer(bp)))
(*TBtShared)(unsafe.Pointer(pBt)).FpPage1 = uintptr(0)
return rc
}
// C documentation
//
// /*
// ** If there are no outstanding cursors and we are not in the middle
// ** of a transaction but there is a read lock on the database, then
// ** this routine unrefs the first page of the database file which
// ** has the effect of releasing the read lock.
// **
// ** If there is a transaction in progress, this routine is a no-op.
// */
func _unlockBtreeIfUnused(tls *libc.TLS, pBt uintptr) {
var pPage1 uintptr
_ = pPage1
if int32((*TBtShared)(unsafe.Pointer(pBt)).FinTransaction) == TRANS_NONE && (*TBtShared)(unsafe.Pointer(pBt)).FpPage1 != uintptr(0) {
pPage1 = (*TBtShared)(unsafe.Pointer(pBt)).FpPage1
(*TBtShared)(unsafe.Pointer(pBt)).FpPage1 = uintptr(0)
_releasePageOne(tls, pPage1)
}
}
// C documentation
//
// /*
// ** If pBt points to an empty file then convert that empty file
// ** into a new empty database by initializing the first page of
// ** the database.
// */
func _newDatabase(tls *libc.TLS, pBt uintptr) (r int32) {
var data, pP1, p1 uintptr
var rc int32
_, _, _, _ = data, pP1, rc, p1
if (*TBtShared)(unsafe.Pointer(pBt)).FnPage > uint32(0) {
return SQLITE_OK
}
pP1 = (*TBtShared)(unsafe.Pointer(pBt)).FpPage1
data = (*TMemPage)(unsafe.Pointer(pP1)).FaData
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pP1)).FpDbPage)
if rc != 0 {
return rc
}
libc.Xmemcpy(tls, data, uintptr(unsafe.Pointer(&_zMagicHeader)), uint64(16))
*(*uint8)(unsafe.Pointer(data + 16)) = uint8((*TBtShared)(unsafe.Pointer(pBt)).FpageSize >> libc.Int32FromInt32(8) & libc.Uint32FromInt32(0xff))
*(*uint8)(unsafe.Pointer(data + 17)) = uint8((*TBtShared)(unsafe.Pointer(pBt)).FpageSize >> libc.Int32FromInt32(16) & libc.Uint32FromInt32(0xff))
*(*uint8)(unsafe.Pointer(data + 18)) = uint8(1)
*(*uint8)(unsafe.Pointer(data + 19)) = uint8(1)
*(*uint8)(unsafe.Pointer(data + 20)) = uint8((*TBtShared)(unsafe.Pointer(pBt)).FpageSize - (*TBtShared)(unsafe.Pointer(pBt)).FusableSize)
*(*uint8)(unsafe.Pointer(data + 21)) = uint8(64)
*(*uint8)(unsafe.Pointer(data + 22)) = uint8(32)
*(*uint8)(unsafe.Pointer(data + 23)) = uint8(32)
libc.Xmemset(tls, data+24, 0, uint64(libc.Int32FromInt32(100)-libc.Int32FromInt32(24)))
_zeroPage(tls, pP1, libc.Int32FromInt32(PTF_INTKEY)|libc.Int32FromInt32(PTF_LEAF)|libc.Int32FromInt32(PTF_LEAFDATA))
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
_sqlite3Put4byte(tls, data+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(4)*libc.Int32FromInt32(4)), uint32((*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum))
_sqlite3Put4byte(tls, data+uintptr(libc.Int32FromInt32(36)+libc.Int32FromInt32(7)*libc.Int32FromInt32(4)), uint32((*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum))
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = uint32(1)
*(*uint8)(unsafe.Pointer(data + 31)) = uint8(1)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Initialize the first page of the database file (creating a database
// ** consisting of a single page and no schema objects). Return SQLITE_OK
// ** if successful, or an SQLite error code otherwise.
// */
func _sqlite3BtreeNewDb(tls *libc.TLS, p uintptr) (r int32) {
var rc int32
_ = rc
_sqlite3BtreeEnter(tls, p)
(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FnPage = uint32(0)
rc = _newDatabase(tls, (*TBtree)(unsafe.Pointer(p)).FpBt)
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Attempt to start a new transaction. A write-transaction
// ** is started if the second argument is nonzero, otherwise a read-
// ** transaction. If the second argument is 2 or more and exclusive
// ** transaction is started, meaning that no other process is allowed
// ** to access the database. A preexisting transaction may not be
// ** upgraded to exclusive by calling this routine a second time - the
// ** exclusivity flag only works for a new transaction.
// **
// ** A write-transaction must be started before attempting any
// ** changes to the database. None of the following routines
// ** will work unless a transaction is started first:
// **
// ** sqlite3BtreeCreateTable()
// ** sqlite3BtreeCreateIndex()
// ** sqlite3BtreeClearTable()
// ** sqlite3BtreeDropTable()
// ** sqlite3BtreeInsert()
// ** sqlite3BtreeDelete()
// ** sqlite3BtreeUpdateMeta()
// **
// ** If an initial attempt to acquire the lock fails because of lock contention
// ** and the database was previously unlocked, then invoke the busy handler
// ** if there is one. But if there was previously a read-lock, do not
// ** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is
// ** returned when there is already a read-lock in order to avoid a deadlock.
// **
// ** Suppose there are two processes A and B. A has a read lock and B has
// ** a reserved lock. B tries to promote to exclusive but is blocked because
// ** of A's read lock. A tries to promote to reserved but is blocked by B.
// ** One or the other of the two processes must give way or there can be
// ** no progress. By returning SQLITE_BUSY and not invoking the busy callback
// ** when A already has a read lock, we encourage A to give up and let B
// ** proceed.
// */
func _btreeBeginTrans(tls *libc.TLS, p uintptr, wrflag int32, pSchemaVersion uintptr) (r int32) {
var pBlock, pBt, pIter, pPage1, pPager, p1, p3, p4, p8, p9 uintptr
var rc, v5, v7 int32
var v6 bool
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = pBlock, pBt, pIter, pPage1, pPager, rc, v5, v6, v7, p1, p3, p4, p8, p9
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
pPager = (*TBtShared)(unsafe.Pointer(pBt)).FpPager
rc = SQLITE_OK
_sqlite3BtreeEnter(tls, p)
/* If the btree is already in a write-transaction, or it
** is already in a read-transaction and a read-transaction
** is requested, this is a no-op.
*/
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_WRITE) || int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_READ) && !(wrflag != 0) {
goto trans_begun
}
if (*Tsqlite3)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).Fdb)).Fflags&uint64(SQLITE_ResetDatabase) != 0 && int32(_sqlite3PagerIsreadonly(tls, pPager)) == 0 {
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(BTS_READ_ONLY))
}
/* Write transactions are not possible on a read-only database */
if int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_READ_ONLY) != 0 && wrflag != 0 {
rc = int32(SQLITE_READONLY)
goto trans_begun
}
pBlock = uintptr(0)
/* If another database handle has already opened a write transaction
** on this shared-btree structure and a second write transaction is
** requested, return SQLITE_LOCKED.
*/
if wrflag != 0 && int32((*TBtShared)(unsafe.Pointer(pBt)).FinTransaction) == int32(TRANS_WRITE) || int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_PENDING) != 0 {
pBlock = (*TBtree)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpWriter)).Fdb
} else {
if wrflag > int32(1) {
pIter = (*TBtShared)(unsafe.Pointer(pBt)).FpLock
for {
if !(pIter != 0) {
break
}
if (*TBtLock)(unsafe.Pointer(pIter)).FpBtree != p {
pBlock = (*TBtree)(unsafe.Pointer((*TBtLock)(unsafe.Pointer(pIter)).FpBtree)).Fdb
break
}
goto _2
_2:
;
pIter = (*TBtLock)(unsafe.Pointer(pIter)).FpNext
}
}
}
if pBlock != 0 {
_sqlite3ConnectionBlocked(tls, (*TBtree)(unsafe.Pointer(p)).Fdb, pBlock)
rc = libc.Int32FromInt32(SQLITE_LOCKED) | libc.Int32FromInt32(1)<pPage1 is populated or
** lockBtree() returns something other than SQLITE_OK. lockBtree()
** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after
** reading page 1 it discovers that the page-size of the database
** file is not pBt->pageSize. In this case lockBtree() will update
** pBt->pageSize to the page-size of the file on disk.
*/
for {
if v6 = (*TBtShared)(unsafe.Pointer(pBt)).FpPage1 == uintptr(0); v6 {
v5 = _lockBtree(tls, pBt)
rc = v5
}
if !(v6 && SQLITE_OK == v5) {
break
}
}
if rc == SQLITE_OK && wrflag != 0 {
if int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_READ_ONLY) != 0 {
rc = int32(SQLITE_READONLY)
} else {
rc = _sqlite3PagerBegin(tls, pPager, libc.BoolInt32(wrflag > int32(1)), _sqlite3TempInMemory(tls, (*TBtree)(unsafe.Pointer(p)).Fdb))
if rc == SQLITE_OK {
rc = _newDatabase(tls, pBt)
} else {
if rc == libc.Int32FromInt32(SQLITE_BUSY)|libc.Int32FromInt32(2)< int32((*TBtShared)(unsafe.Pointer(pBt)).FinTransaction) {
(*TBtShared)(unsafe.Pointer(pBt)).FinTransaction = (*TBtree)(unsafe.Pointer(p)).FinTrans
}
if wrflag != 0 {
pPage1 = (*TBtShared)(unsafe.Pointer(pBt)).FpPage1
(*TBtShared)(unsafe.Pointer(pBt)).FpWriter = p
p8 = pBt + 40
*(*Tu16)(unsafe.Pointer(p8)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p8))) & ^libc.Int32FromInt32(BTS_EXCLUSIVE))
if wrflag > int32(1) {
p9 = pBt + 40
*(*Tu16)(unsafe.Pointer(p9)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p9))) | libc.Int32FromInt32(BTS_EXCLUSIVE))
}
/* If the db-size header field is incorrect (as it may be if an old
** client has been writing the database file), update it now. Doing
** this sooner rather than later means the database size can safely
** re-read the database size from page 1 if a savepoint or transaction
** rollback occurs within the transaction.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FnPage != _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+28) {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FpDbPage)
if rc == SQLITE_OK {
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+28, (*TBtShared)(unsafe.Pointer(pBt)).FnPage)
}
}
}
}
goto trans_begun
trans_begun:
;
if rc == SQLITE_OK {
if pSchemaVersion != 0 {
*(*int32)(unsafe.Pointer(pSchemaVersion)) = int32(_sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+40))
}
if wrflag != 0 {
/* This call makes sure that the pager has the correct number of
** open savepoints. If the second parameter is greater than 0 and
** the sub-journal is not already open, then it will be opened here.
*/
rc = _sqlite3PagerOpenSavepoint(tls, pPager, (*Tsqlite3)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).Fdb)).FnSavepoint)
}
}
_sqlite3BtreeLeave(tls, p)
return rc
}
func _sqlite3BtreeBeginTrans(tls *libc.TLS, p uintptr, wrflag int32, pSchemaVersion uintptr) (r int32) {
var pBt uintptr
_ = pBt
if (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 || int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == TRANS_NONE || int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_READ) && wrflag != 0 {
return _btreeBeginTrans(tls, p, wrflag, pSchemaVersion)
}
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
if pSchemaVersion != 0 {
*(*int32)(unsafe.Pointer(pSchemaVersion)) = int32(_sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+40))
}
if wrflag != 0 {
/* This call makes sure that the pager has the correct number of
** open savepoints. If the second parameter is greater than 0 and
** the sub-journal is not already open, then it will be opened here.
*/
return _sqlite3PagerOpenSavepoint(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, (*Tsqlite3)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).Fdb)).FnSavepoint)
} else {
return SQLITE_OK
}
return r
}
// C documentation
//
// /*
// ** Set the pointer-map entries for all children of page pPage. Also, if
// ** pPage contains cells that point to overflow pages, set the pointer
// ** map entries for the overflow pages as well.
// */
func _setChildPtrmaps(tls *libc.TLS, pPage uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var childPgno, childPgno1, pgno TPgno
var i, nCell, v1 int32
var pBt, pCell uintptr
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _ = childPgno, childPgno1, i, nCell, pBt, pCell, pgno, v1 /* Return code */
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
pgno = (*TMemPage)(unsafe.Pointer(pPage)).Fpgno
if (*TMemPage)(unsafe.Pointer(pPage)).FisInit != 0 {
v1 = SQLITE_OK
} else {
v1 = _btreeInitPage(tls, pPage)
}
*(*int32)(unsafe.Pointer(bp)) = v1
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp))
}
nCell = int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
i = 0
for {
if !(i < nCell) {
break
}
pCell = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*i))))< (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) {
return _sqlite3CorruptError(tls, int32(74014))
}
if iFrom == _sqlite3Get4byte(tls, pCell+uintptr((*(*TCellInfo)(unsafe.Pointer(bp))).FnSize)-uintptr(4)) {
_sqlite3Put4byte(tls, pCell+uintptr((*(*TCellInfo)(unsafe.Pointer(bp))).FnSize)-uintptr(4), iTo)
break
}
}
} else {
if pCell+uintptr(4) > (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize) {
return _sqlite3CorruptError(tls, int32(74023))
}
if _sqlite3Get4byte(tls, pCell) == iFrom {
_sqlite3Put4byte(tls, pCell, iTo)
break
}
}
goto _2
_2:
;
i++
}
if i == nCell {
if int32(eType) != int32(PTRMAP_BTREE) || _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8))) != iFrom {
return _sqlite3CorruptError(tls, int32(74035))
}
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8)), iTo)
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Move the open database page pDbPage to location iFreePage in the
// ** database. The pDbPage reference remains valid.
// **
// ** The isCommit flag indicates that there is no need to remember that
// ** the journal needs to be sync()ed before database page pDbPage->pgno
// ** can be written to. The caller has already promised not to write to that
// ** page.
// */
func _relocatePage(tls *libc.TLS, pBt uintptr, pDbPage uintptr, eType Tu8, iPtrPage TPgno, iFreePage TPgno, isCommit int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iDbPage, nextOvfl TPgno
var pPager uintptr
var _ /* pPtrPage at bp+0 */ uintptr
var _ /* rc at bp+8 */ int32
_, _, _ = iDbPage, nextOvfl, pPager /* The page that contains a pointer to pDbPage */
iDbPage = (*TMemPage)(unsafe.Pointer(pDbPage)).Fpgno
pPager = (*TBtShared)(unsafe.Pointer(pBt)).FpPager
if iDbPage < uint32(3) {
return _sqlite3CorruptError(tls, int32(74070))
}
/* Move page iDbPage from its current location to page number iFreePage */
*(*int32)(unsafe.Pointer(bp + 8)) = _sqlite3PagerMovepage(tls, pPager, (*TMemPage)(unsafe.Pointer(pDbPage)).FpDbPage, iFreePage, isCommit)
if *(*int32)(unsafe.Pointer(bp + 8)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 8))
}
(*TMemPage)(unsafe.Pointer(pDbPage)).Fpgno = iFreePage
/* If pDbPage was a btree-page, then it may have child pages and/or cells
** that point to overflow pages. The pointer map entries for all these
** pages need to be changed.
**
** If pDbPage is an overflow page, then the first 4 bytes may store a
** pointer to a subsequent overflow page. If this is the case, then
** the pointer map needs to be updated for the subsequent overflow page.
*/
if int32(eType) == int32(PTRMAP_BTREE) || int32(eType) == int32(PTRMAP_ROOTPAGE) {
*(*int32)(unsafe.Pointer(bp + 8)) = _setChildPtrmaps(tls, pDbPage)
if *(*int32)(unsafe.Pointer(bp + 8)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 8))
}
} else {
nextOvfl = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pDbPage)).FaData)
if nextOvfl != uint32(0) {
_ptrmapPut(tls, pBt, nextOvfl, uint8(PTRMAP_OVERFLOW2), iFreePage, bp+8)
if *(*int32)(unsafe.Pointer(bp + 8)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 8))
}
}
}
/* Fix the database pointer on page iPtrPage that pointed at iDbPage so
** that it points at iFreePage. Also fix the pointer map entry for
** iPtrPage.
*/
if int32(eType) != int32(PTRMAP_ROOTPAGE) {
*(*int32)(unsafe.Pointer(bp + 8)) = _btreeGetPage(tls, pBt, iPtrPage, bp, 0)
if *(*int32)(unsafe.Pointer(bp + 8)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 8))
}
*(*int32)(unsafe.Pointer(bp + 8)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if *(*int32)(unsafe.Pointer(bp + 8)) != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 8))
}
*(*int32)(unsafe.Pointer(bp + 8)) = _modifyPagePointer(tls, *(*uintptr)(unsafe.Pointer(bp)), iDbPage, iFreePage, eType)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK {
_ptrmapPut(tls, pBt, iFreePage, eType, iPtrPage, bp+8)
}
}
return *(*int32)(unsafe.Pointer(bp + 8))
}
// C documentation
//
// /*
// ** Perform a single step of an incremental-vacuum. If successful, return
// ** SQLITE_OK. If there is no work to do (and therefore no point in
// ** calling this function again), return SQLITE_DONE. Or, if an error
// ** occurs, return some other error code.
// **
// ** More specifically, this function attempts to re-organize the database so
// ** that the last page of the file currently in use is no longer in use.
// **
// ** Parameter nFin is the number of pages that this database would contain
// ** were this function called until it returns SQLITE_DONE.
// **
// ** If the bCommit parameter is non-zero, this function assumes that the
// ** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE
// ** or an error. bCommit is passed true for an auto-vacuum-on-commit
// ** operation, or false for an incremental vacuum.
// */
func _incrVacuumStep(tls *libc.TLS, pBt uintptr, nFin TPgno, iLastPg TPgno, bCommit int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var dbSize, iNear, nFreeList TPgno
var eMode Tu8
var rc int32
var _ /* eType at bp+0 */ Tu8
var _ /* iFreePg at bp+24 */ TPgno
var _ /* iFreePg at bp+8 */ TPgno
var _ /* iPtrPage at bp+4 */ TPgno
var _ /* pFreePg at bp+16 */ uintptr
var _ /* pFreePg at bp+40 */ uintptr
var _ /* pLastPg at bp+32 */ uintptr
_, _, _, _, _ = dbSize, eMode, iNear, nFreeList, rc
if !(_ptrmapPageno(tls, pBt, iLastPg) == iLastPg) && iLastPg != uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
nFreeList = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36)
if nFreeList == uint32(0) {
return int32(SQLITE_DONE)
}
rc = _ptrmapGet(tls, pBt, iLastPg, bp, bp+4)
if rc != SQLITE_OK {
return rc
}
if int32(*(*Tu8)(unsafe.Pointer(bp))) == int32(PTRMAP_ROOTPAGE) {
return _sqlite3CorruptError(tls, int32(74168))
}
if int32(*(*Tu8)(unsafe.Pointer(bp))) == int32(PTRMAP_FREEPAGE) {
if bCommit == 0 {
rc = _allocateBtreePage(tls, pBt, bp+16, bp+8, iLastPg, uint8(BTALLOC_EXACT))
if rc != SQLITE_OK {
return rc
}
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16)))
}
} else {
eMode = uint8(BTALLOC_ANY) /* Mode parameter for allocateBtreePage() */
iNear = uint32(0) /* nearby parameter for allocateBtreePage() */
rc = _btreeGetPage(tls, pBt, iLastPg, bp+32, 0)
if rc != SQLITE_OK {
return rc
}
/* If bCommit is zero, this loop runs exactly once and page pLastPg
** is swapped with the first free page pulled off the free list.
**
** On the other hand, if bCommit is greater than zero, then keep
** looping until a free-page located within the first nFin pages
** of the file is found.
*/
if bCommit == 0 {
eMode = uint8(BTALLOC_LE)
iNear = nFin
}
for cond := true; cond; cond = bCommit != 0 && *(*TPgno)(unsafe.Pointer(bp + 24)) > nFin {
dbSize = _btreePagecount(tls, pBt)
rc = _allocateBtreePage(tls, pBt, bp+40, bp+24, iNear, eMode)
if rc != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32)))
return rc
}
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 40)))
if *(*TPgno)(unsafe.Pointer(bp + 24)) > dbSize {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32)))
return _sqlite3CorruptError(tls, int32(74220))
}
}
rc = _relocatePage(tls, pBt, *(*uintptr)(unsafe.Pointer(bp + 32)), *(*Tu8)(unsafe.Pointer(bp)), *(*TPgno)(unsafe.Pointer(bp + 4)), *(*TPgno)(unsafe.Pointer(bp + 24)), bCommit)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 32)))
if rc != SQLITE_OK {
return rc
}
}
}
if bCommit == 0 {
for cond := true; cond; cond = iLastPg == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) || _ptrmapPageno(tls, pBt, iLastPg) == iLastPg {
iLastPg--
}
(*TBtShared)(unsafe.Pointer(pBt)).FbDoTruncate = uint8(1)
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = iLastPg
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** The database opened by the first argument is an auto-vacuum database
// ** nOrig pages in size containing nFree free pages. Return the expected
// ** size of the database in pages following an auto-vacuum operation.
// */
func _finalDbSize(tls *libc.TLS, pBt uintptr, nOrig TPgno, nFree TPgno) (r TPgno) {
var nEntry int32
var nFin, nPtrmap TPgno
_, _, _ = nEntry, nFin, nPtrmap /* Return value */
nEntry = int32((*TBtShared)(unsafe.Pointer(pBt)).FusableSize / uint32(5))
nPtrmap = (nFree - nOrig + _ptrmapPageno(tls, pBt, nOrig) + uint32(nEntry)) / uint32(nEntry)
nFin = nOrig - nFree - nPtrmap
if nOrig > uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) && nFin < uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
nFin--
}
for _ptrmapPageno(tls, pBt, nFin) == nFin || nFin == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
nFin--
}
return nFin
}
// C documentation
//
// /*
// ** A write-transaction must be opened before calling this function.
// ** It performs a single unit of work towards an incremental vacuum.
// **
// ** If the incremental vacuum is finished after this function has run,
// ** SQLITE_DONE is returned. If it is not finished, but no error occurred,
// ** SQLITE_OK is returned. Otherwise an SQLite error code.
// */
func _sqlite3BtreeIncrVacuum(tls *libc.TLS, p uintptr) (r int32) {
var nFin, nFree, nOrig TPgno
var pBt uintptr
var rc int32
_, _, _, _, _ = nFin, nFree, nOrig, pBt, rc
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
if !((*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0) {
rc = int32(SQLITE_DONE)
} else {
nOrig = _btreePagecount(tls, pBt)
nFree = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36)
nFin = _finalDbSize(tls, pBt, nOrig, nFree)
if nOrig < nFin || nFree >= nOrig {
rc = _sqlite3CorruptError(tls, int32(74288))
} else {
if nFree > uint32(0) {
rc = _saveAllCursors(tls, pBt, uint32(0), uintptr(0))
if rc == SQLITE_OK {
_invalidateAllOverflowCache(tls, pBt)
rc = _incrVacuumStep(tls, pBt, nFin, nOrig, 0)
}
if rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FpDbPage)
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+28, (*TBtShared)(unsafe.Pointer(pBt)).FnPage)
}
} else {
rc = int32(SQLITE_DONE)
}
}
}
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** This routine is called prior to sqlite3PagerCommit when a transaction
// ** is committed for an auto-vacuum database.
// */
func _autoVacuumCommit(tls *libc.TLS, p uintptr) (r int32) {
var db, pBt, pPager uintptr
var iDb, rc int32
var iFree, nFin, nFree, nOrig, nVac TPgno
_, _, _, _, _, _, _, _, _, _ = db, iDb, iFree, nFin, nFree, nOrig, nVac, pBt, pPager, rc
rc = SQLITE_OK
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
pPager = (*TBtShared)(unsafe.Pointer(pBt)).FpPager
_invalidateAllOverflowCache(tls, pBt)
if !((*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum != 0) { /* Database size before freeing */
nOrig = _btreePagecount(tls, pBt)
if _ptrmapPageno(tls, pBt, nOrig) == nOrig || nOrig == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
/* It is not possible to create a database for which the final page
** is either a pointer-map page or the pending-byte page. If one
** is encountered, this indicates corruption.
*/
return _sqlite3CorruptError(tls, int32(74339))
}
nFree = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36)
db = (*TBtree)(unsafe.Pointer(p)).Fdb
if (*Tsqlite3)(unsafe.Pointer(db)).FxAutovacPages != 0 {
iDb = 0
for {
if !(iDb < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
if (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32))).FpBt == p {
break
}
goto _1
_1:
;
iDb++
}
nVac = (*(*func(*libc.TLS, uintptr, uintptr, Tu32, Tu32, Tu32) uint32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxAutovacPages})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpAutovacPagesArg, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32))).FzDbSName, nOrig, nFree, (*TBtShared)(unsafe.Pointer(pBt)).FpageSize)
if nVac > nFree {
nVac = nFree
}
if nVac == uint32(0) {
return SQLITE_OK
}
} else {
nVac = nFree
}
nFin = _finalDbSize(tls, pBt, nOrig, nVac)
if nFin > nOrig {
return _sqlite3CorruptError(tls, int32(74366))
}
if nFin < nOrig {
rc = _saveAllCursors(tls, pBt, uint32(0), uintptr(0))
}
iFree = nOrig
for {
if !(iFree > nFin && rc == SQLITE_OK) {
break
}
rc = _incrVacuumStep(tls, pBt, nFin, iFree, libc.BoolInt32(nVac == nFree))
goto _2
_2:
;
iFree--
}
if (rc == int32(SQLITE_DONE) || rc == SQLITE_OK) && nFree > uint32(0) {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FpDbPage)
if nVac == nFree {
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+32, uint32(0))
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36, uint32(0))
}
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+28, nFin)
(*TBtShared)(unsafe.Pointer(pBt)).FbDoTruncate = uint8(1)
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = nFin
}
if rc != SQLITE_OK {
_sqlite3PagerRollback(tls, pPager)
}
}
return rc
}
// C documentation
//
// /*
// ** This routine does the first phase of a two-phase commit. This routine
// ** causes a rollback journal to be created (if it does not already exist)
// ** and populated with enough information so that if a power loss occurs
// ** the database can be restored to its original state by playing back
// ** the journal. Then the contents of the journal are flushed out to
// ** the disk. After the journal is safely on oxide, the changes to the
// ** database are written into the database file and flushed to oxide.
// ** At the end of this call, the rollback journal still exists on the
// ** disk and we are still holding all locks, so the transaction has not
// ** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the
// ** commit process.
// **
// ** This call is a no-op if no write-transaction is currently active on pBt.
// **
// ** Otherwise, sync the database file for the btree pBt. zSuperJrnl points to
// ** the name of a super-journal file that should be written into the
// ** individual journal file, or is NULL, indicating no super-journal file
// ** (single database transaction).
// **
// ** When this is called, the super-journal should already have been
// ** created, populated with this journal pointer and synced to disk.
// **
// ** Once this is routine has returned, the only thing required to commit
// ** the write-transaction for this database file is to delete the journal.
// */
func _sqlite3BtreeCommitPhaseOne(tls *libc.TLS, p uintptr, zSuperJrnl uintptr) (r int32) {
var pBt uintptr
var rc int32
_, _ = pBt, rc
rc = SQLITE_OK
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_WRITE) {
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
rc = _autoVacuumCommit(tls, p)
if rc != SQLITE_OK {
_sqlite3BtreeLeave(tls, p)
return rc
}
}
if (*TBtShared)(unsafe.Pointer(pBt)).FbDoTruncate != 0 {
_sqlite3PagerTruncateImage(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, (*TBtShared)(unsafe.Pointer(pBt)).FnPage)
}
rc = _sqlite3PagerCommitPhaseOne(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, zSuperJrnl, 0)
_sqlite3BtreeLeave(tls, p)
}
return rc
}
// C documentation
//
// /*
// ** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback()
// ** at the conclusion of a transaction.
// */
func _btreeEndTransaction(tls *libc.TLS, p uintptr) {
var db, pBt uintptr
_, _ = db, pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
db = (*TBtree)(unsafe.Pointer(p)).Fdb
(*TBtShared)(unsafe.Pointer(pBt)).FbDoTruncate = uint8(0)
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) > TRANS_NONE && (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead > int32(1) {
/* If there are other active statements that belong to this database
** handle, downgrade to a read-only transaction. The other statements
** may still be reading from the database. */
_downgradeAllSharedCacheTableLocks(tls, p)
(*TBtree)(unsafe.Pointer(p)).FinTrans = uint8(TRANS_READ)
} else {
/* If the handle had any kind of transaction open, decrement the
** transaction count of the shared btree. If the transaction count
** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused()
** call below will unlock the pager. */
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) != TRANS_NONE {
_clearAllSharedCacheTableLocks(tls, p)
(*TBtShared)(unsafe.Pointer(pBt)).FnTransaction--
if 0 == (*TBtShared)(unsafe.Pointer(pBt)).FnTransaction {
(*TBtShared)(unsafe.Pointer(pBt)).FinTransaction = uint8(TRANS_NONE)
}
}
/* Set the current transaction state to TRANS_NONE and unlock the
** pager if this call closed the only read or write transaction. */
(*TBtree)(unsafe.Pointer(p)).FinTrans = uint8(TRANS_NONE)
_unlockBtreeIfUnused(tls, pBt)
}
}
// C documentation
//
// /*
// ** Commit the transaction currently in progress.
// **
// ** This routine implements the second phase of a 2-phase commit. The
// ** sqlite3BtreeCommitPhaseOne() routine does the first phase and should
// ** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne()
// ** routine did all the work of writing information out to disk and flushing the
// ** contents so that they are written onto the disk platter. All this
// ** routine has to do is delete or truncate or zero the header in the
// ** the rollback journal (which causes the transaction to commit) and
// ** drop locks.
// **
// ** Normally, if an error occurs while the pager layer is attempting to
// ** finalize the underlying journal file, this function returns an error and
// ** the upper layer will attempt a rollback. However, if the second argument
// ** is non-zero then this b-tree transaction is part of a multi-file
// ** transaction. In this case, the transaction has already been committed
// ** (by deleting a super-journal file) and the caller will ignore this
// ** functions return code. So, even if an error occurs in the pager layer,
// ** reset the b-tree objects internal state to indicate that the write
// ** transaction has been closed. This is quite safe, as the pager will have
// ** transitioned to the error state.
// **
// ** This will release the write lock on the database file. If there
// ** are no active cursors, it also releases the read lock.
// */
func _sqlite3BtreeCommitPhaseTwo(tls *libc.TLS, p uintptr, bCleanup int32) (r int32) {
var pBt uintptr
var rc int32
_, _ = pBt, rc
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == TRANS_NONE {
return SQLITE_OK
}
_sqlite3BtreeEnter(tls, p)
/* If the handle has a write-transaction open, commit the shared-btrees
** transaction and set the shared state to TRANS_READ.
*/
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_WRITE) {
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
rc = _sqlite3PagerCommitPhaseTwo(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
if rc != SQLITE_OK && bCleanup == 0 {
_sqlite3BtreeLeave(tls, p)
return rc
}
(*TBtree)(unsafe.Pointer(p)).FiBDataVersion-- /* Compensate for pPager->iDataVersion++; */
(*TBtShared)(unsafe.Pointer(pBt)).FinTransaction = uint8(TRANS_READ)
_btreeClearHasContent(tls, pBt)
}
_btreeEndTransaction(tls, p)
_sqlite3BtreeLeave(tls, p)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Do both phases of a commit.
// */
func _sqlite3BtreeCommit(tls *libc.TLS, p uintptr) (r int32) {
var rc int32
_ = rc
_sqlite3BtreeEnter(tls, p)
rc = _sqlite3BtreeCommitPhaseOne(tls, p, uintptr(0))
if rc == SQLITE_OK {
rc = _sqlite3BtreeCommitPhaseTwo(tls, p, 0)
}
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** This routine sets the state to CURSOR_FAULT and the error
// ** code to errCode for every cursor on any BtShared that pBtree
// ** references. Or if the writeOnly flag is set to 1, then only
// ** trip write cursors and leave read cursors unchanged.
// **
// ** Every cursor is a candidate to be tripped, including cursors
// ** that belong to other database connections that happen to be
// ** sharing the cache with pBtree.
// **
// ** This routine gets called when a rollback occurs. If the writeOnly
// ** flag is true, then only write-cursors need be tripped - read-only
// ** cursors save their current positions so that they may continue
// ** following the rollback. Or, if writeOnly is false, all cursors are
// ** tripped. In general, writeOnly is false if the transaction being
// ** rolled back modified the database schema. In this case b-tree root
// ** pages may be moved or deleted from the database altogether, making
// ** it unsafe for read cursors to continue.
// **
// ** If the writeOnly flag is true and an error is encountered while
// ** saving the current position of a read-only cursor, all cursors,
// ** including all read-cursors are tripped.
// **
// ** SQLITE_OK is returned if successful, or if an error occurs while
// ** saving a cursor position, an SQLite error code.
// */
func _sqlite3BtreeTripAllCursors(tls *libc.TLS, pBtree uintptr, errCode int32, writeOnly int32) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
rc = SQLITE_OK
if pBtree != 0 {
_sqlite3BtreeEnter(tls, pBtree)
p = (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(pBtree)).FpBt)).FpCursor
for {
if !(p != 0) {
break
}
if writeOnly != 0 && int32((*TBtCursor)(unsafe.Pointer(p)).FcurFlags)&int32(BTCF_WriteFlag) == 0 {
if int32((*TBtCursor)(unsafe.Pointer(p)).FeState) == CURSOR_VALID || int32((*TBtCursor)(unsafe.Pointer(p)).FeState) == int32(CURSOR_SKIPNEXT) {
rc = _saveCursorPosition(tls, p)
if rc != SQLITE_OK {
_sqlite3BtreeTripAllCursors(tls, pBtree, rc, 0)
break
}
}
} else {
_sqlite3BtreeClearCursor(tls, p)
(*TBtCursor)(unsafe.Pointer(p)).FeState = uint8(CURSOR_FAULT)
(*TBtCursor)(unsafe.Pointer(p)).FskipNext = errCode
}
_btreeReleaseAllCursorPages(tls, p)
goto _1
_1:
;
p = (*TBtCursor)(unsafe.Pointer(p)).FpNext
}
_sqlite3BtreeLeave(tls, pBtree)
}
return rc
}
// C documentation
//
// /*
// ** Set the pBt->nPage field correctly, according to the current
// ** state of the database. Assume pBt->pPage1 is valid.
// */
func _btreeSetNPage(tls *libc.TLS, pBt uintptr, pPage1 uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* nPage at bp+0 */ int32
*(*int32)(unsafe.Pointer(bp)) = int32(_sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+28))
if *(*int32)(unsafe.Pointer(bp)) == 0 {
_sqlite3PagerPagecount(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, bp)
}
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = uint32(*(*int32)(unsafe.Pointer(bp)))
}
// C documentation
//
// /*
// ** Rollback the transaction in progress.
// **
// ** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped).
// ** Only write cursors are tripped if writeOnly is true but all cursors are
// ** tripped if writeOnly is false. Any attempt to use
// ** a tripped cursor will result in an error.
// **
// ** This will release the write lock on the database file. If there
// ** are no active cursors, it also releases the read lock.
// */
func _sqlite3BtreeRollback(tls *libc.TLS, p uintptr, tripCode int32, writeOnly int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pBt uintptr
var rc, rc2, rc21, v1 int32
var _ /* pPage1 at bp+0 */ uintptr
_, _, _, _, _ = pBt, rc, rc2, rc21, v1
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
if tripCode == SQLITE_OK {
v1 = _saveAllCursors(tls, pBt, uint32(0), uintptr(0))
tripCode = v1
rc = v1
if rc != 0 {
writeOnly = 0
}
} else {
rc = SQLITE_OK
}
if tripCode != 0 {
rc2 = _sqlite3BtreeTripAllCursors(tls, p, tripCode, writeOnly)
if rc2 != SQLITE_OK {
rc = rc2
}
}
if int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_WRITE) {
rc21 = _sqlite3PagerRollback(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
if rc21 != SQLITE_OK {
rc = rc21
}
/* The rollback may have destroyed the pPage1->aData value. So
** call btreeGetPage() on page 1 again to make
** sure pPage1->aData is set correctly. */
if _btreeGetPage(tls, pBt, uint32(1), bp, 0) == SQLITE_OK {
_btreeSetNPage(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)))
_releasePageOne(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
(*TBtShared)(unsafe.Pointer(pBt)).FinTransaction = uint8(TRANS_READ)
_btreeClearHasContent(tls, pBt)
}
_btreeEndTransaction(tls, p)
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Start a statement subtransaction. The subtransaction can be rolled
// ** back independently of the main transaction. You must start a transaction
// ** before starting a subtransaction. The subtransaction is ended automatically
// ** if the main transaction commits or rolls back.
// **
// ** Statement subtransactions are used around individual SQL statements
// ** that are contained within a BEGIN...COMMIT block. If a constraint
// ** error occurs within the statement, the effect of that one statement
// ** can be rolled back without having to rollback the entire transaction.
// **
// ** A statement sub-transaction is implemented as an anonymous savepoint. The
// ** value passed as the second parameter is the total number of savepoints,
// ** including the new anonymous savepoint, open on the B-Tree. i.e. if there
// ** are no active savepoints and no other statement-transactions open,
// ** iStatement is 1. This anonymous savepoint can be released or rolled back
// ** using the sqlite3BtreeSavepoint() function.
// */
func _sqlite3BtreeBeginStmt(tls *libc.TLS, p uintptr, iStatement int32) (r int32) {
var pBt uintptr
var rc int32
_, _ = pBt, rc
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
/* At the pager level, a statement transaction is a savepoint with
** an index greater than all savepoints created explicitly using
** SQL statements. It is illegal to open, release or rollback any
** such savepoints while the statement transaction savepoint is active.
*/
rc = _sqlite3PagerOpenSavepoint(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, iStatement)
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** The second argument to this function, op, is always SAVEPOINT_ROLLBACK
// ** or SAVEPOINT_RELEASE. This function either releases or rolls back the
// ** savepoint identified by parameter iSavepoint, depending on the value
// ** of op.
// **
// ** Normally, iSavepoint is greater than or equal to zero. However, if op is
// ** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the
// ** contents of the entire transaction are rolled back. This is different
// ** from a normal transaction rollback, as no locks are released and the
// ** transaction remains open.
// */
func _sqlite3BtreeSavepoint(tls *libc.TLS, p uintptr, op int32, iSavepoint int32) (r int32) {
var pBt uintptr
var rc int32
_, _ = pBt, rc
rc = SQLITE_OK
if p != 0 && int32((*TBtree)(unsafe.Pointer(p)).FinTrans) == int32(TRANS_WRITE) {
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
if op == int32(SAVEPOINT_ROLLBACK) {
rc = _saveAllCursors(tls, pBt, uint32(0), uintptr(0))
}
if rc == SQLITE_OK {
rc = _sqlite3PagerSavepoint(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, op, iSavepoint)
}
if rc == SQLITE_OK {
if iSavepoint < 0 && int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_INITIALLY_EMPTY) != 0 {
(*TBtShared)(unsafe.Pointer(pBt)).FnPage = uint32(0)
}
rc = _newDatabase(tls, pBt)
_btreeSetNPage(tls, pBt, (*TBtShared)(unsafe.Pointer(pBt)).FpPage1)
/* pBt->nPage might be zero if the database was corrupt when
** the transaction was started. Otherwise, it must be at least 1. */
}
_sqlite3BtreeLeave(tls, p)
}
return rc
}
// C documentation
//
// /*
// ** Create a new cursor for the BTree whose root is on the page
// ** iTable. If a read-only cursor is requested, it is assumed that
// ** the caller already has at least a read-only transaction open
// ** on the database already. If a write-cursor is requested, then
// ** the caller is assumed to have an open write transaction.
// **
// ** If the BTREE_WRCSR bit of wrFlag is clear, then the cursor can only
// ** be used for reading. If the BTREE_WRCSR bit is set, then the cursor
// ** can be used for reading or for writing if other conditions for writing
// ** are also met. These are the conditions that must be met in order
// ** for writing to be allowed:
// **
// ** 1: The cursor must have been opened with wrFlag containing BTREE_WRCSR
// **
// ** 2: Other database connections that share the same pager cache
// ** but which are not in the READ_UNCOMMITTED state may not have
// ** cursors open with wrFlag==0 on the same table. Otherwise
// ** the changes made by this write cursor would be visible to
// ** the read cursors in the other database connection.
// **
// ** 3: The database must be writable (not on read-only media)
// **
// ** 4: There must be an active transaction.
// **
// ** The BTREE_FORDELETE bit of wrFlag may optionally be set if BTREE_WRCSR
// ** is set. If FORDELETE is set, that is a hint to the implementation that
// ** this cursor will only be used to seek to and delete entries of an index
// ** as part of a larger DELETE statement. The FORDELETE hint is not used by
// ** this implementation. But in a hypothetical alternative storage engine
// ** in which index entries are automatically deleted when corresponding table
// ** rows are deleted, the FORDELETE flag is a hint that all SEEK and DELETE
// ** operations on this cursor can be no-ops and all READ operations can
// ** return a null row (2-bytes: 0x01 0x00).
// **
// ** No checking is done to make sure that page iTable really is the
// ** root page of a b-tree. If it is not, then the cursor acquired
// ** will not work correctly.
// **
// ** It is assumed that the sqlite3BtreeCursorZero() has been called
// ** on pCur to initialize the memory space prior to invoking this routine.
// */
func _btreeCursor(tls *libc.TLS, p uintptr, iTable TPgno, wrFlag int32, pKeyInfo uintptr, pCur uintptr) (r int32) {
var pBt, pX, p2, p3 uintptr
_, _, _, _ = pBt, pX, p2, p3
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt /* Looping over other all cursors */
/* The following assert statements verify that if this is a sharable
** b-tree database, the connection is holding the required table locks,
** and that no other connection has any open cursor that conflicts with
** this lock. The iTable<1 term disables the check for corrupt schemas. */
/* Assert that the caller has opened the required transaction. */
if iTable <= uint32(1) {
if iTable < uint32(1) {
return _sqlite3CorruptError(tls, int32(74830))
} else {
if _btreePagecount(tls, pBt) == uint32(0) {
iTable = uint32(0)
}
}
}
/* Now that no other errors can occur, finish filling in the BtCursor
** variables and link the cursor into the BtShared list. */
(*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot = iTable
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage = int8(-int32(1))
(*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo = pKeyInfo
(*TBtCursor)(unsafe.Pointer(pCur)).FpBtree = p
(*TBtCursor)(unsafe.Pointer(pCur)).FpBt = pBt
(*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags = uint8(0)
/* If there are two or more cursors on the same btree, then all such
** cursors *must* have the BTCF_Multiple flag set. */
pX = (*TBtShared)(unsafe.Pointer(pBt)).FpCursor
for {
if !(pX != 0) {
break
}
if (*TBtCursor)(unsafe.Pointer(pX)).FpgnoRoot == iTable {
p2 = pX + 1
*(*Tu8)(unsafe.Pointer(p2)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p2))) | libc.Int32FromInt32(BTCF_Multiple))
(*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags = uint8(BTCF_Multiple)
}
goto _1
_1:
;
pX = (*TBtCursor)(unsafe.Pointer(pX)).FpNext
}
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
(*TBtCursor)(unsafe.Pointer(pCur)).FpNext = (*TBtShared)(unsafe.Pointer(pBt)).FpCursor
(*TBtShared)(unsafe.Pointer(pBt)).FpCursor = pCur
if wrFlag != 0 {
p3 = pCur + 1
*(*Tu8)(unsafe.Pointer(p3)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p3))) | libc.Int32FromInt32(BTCF_WriteFlag))
(*TBtCursor)(unsafe.Pointer(pCur)).FcurPagerFlags = uint8(0)
if (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace == uintptr(0) {
return _allocateTempSpace(tls, pBt)
}
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).FcurPagerFlags = uint8(PAGER_GET_READONLY)
}
return SQLITE_OK
}
func _btreeCursorWithLock(tls *libc.TLS, p uintptr, iTable TPgno, wrFlag int32, pKeyInfo uintptr, pCur uintptr) (r int32) {
var rc int32
_ = rc
_sqlite3BtreeEnter(tls, p)
rc = _btreeCursor(tls, p, iTable, wrFlag, pKeyInfo, pCur)
_sqlite3BtreeLeave(tls, p)
return rc
}
func _sqlite3BtreeCursor(tls *libc.TLS, p uintptr, iTable TPgno, wrFlag int32, pKeyInfo uintptr, pCur uintptr) (r int32) {
if (*TBtree)(unsafe.Pointer(p)).Fsharable != 0 {
return _btreeCursorWithLock(tls, p, iTable, wrFlag, pKeyInfo, pCur)
} else {
return _btreeCursor(tls, p, iTable, wrFlag, pKeyInfo, pCur)
}
return r
}
// C documentation
//
// /*
// ** Return the size of a BtCursor object in bytes.
// **
// ** This interfaces is needed so that users of cursors can preallocate
// ** sufficient storage to hold a cursor. The BtCursor object is opaque
// ** to users so they cannot do the sizeof() themselves - they must call
// ** this routine.
// */
func _sqlite3BtreeCursorSize(tls *libc.TLS) (r int32) {
return int32((libc.Uint64FromInt64(296) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
}
// C documentation
//
// /*
// ** Initialize memory that will be converted into a BtCursor object.
// **
// ** The simple approach here would be to memset() the entire object
// ** to zero. But it turns out that the apPage[] and aiIdx[] arrays
// ** do not need to be zeroed and they are large, so we can save a lot
// ** of run-time by skipping the initialization of those elements.
// */
func _sqlite3BtreeCursorZero(tls *libc.TLS, p uintptr) {
libc.Xmemset(tls, p, 0, uint64(libc.UintptrFromInt32(0)+32))
}
// C documentation
//
// /*
// ** Close a cursor. The read lock on the database file is released
// ** when the last cursor is closed.
// */
func _sqlite3BtreeCloseCursor(tls *libc.TLS, pCur uintptr) (r int32) {
var pBt, pBtree, pPrev uintptr
_, _, _ = pBt, pBtree, pPrev
pBtree = (*TBtCursor)(unsafe.Pointer(pCur)).FpBtree
if pBtree != 0 {
pBt = (*TBtCursor)(unsafe.Pointer(pCur)).FpBt
_sqlite3BtreeEnter(tls, pBtree)
if (*TBtShared)(unsafe.Pointer(pBt)).FpCursor == pCur {
(*TBtShared)(unsafe.Pointer(pBt)).FpCursor = (*TBtCursor)(unsafe.Pointer(pCur)).FpNext
} else {
pPrev = (*TBtShared)(unsafe.Pointer(pBt)).FpCursor
for cond := true; cond; cond = pPrev != 0 {
if (*TBtCursor)(unsafe.Pointer(pPrev)).FpNext == pCur {
(*TBtCursor)(unsafe.Pointer(pPrev)).FpNext = (*TBtCursor)(unsafe.Pointer(pCur)).FpNext
break
}
pPrev = (*TBtCursor)(unsafe.Pointer(pPrev)).FpNext
}
}
_btreeReleaseAllCursorPages(tls, pCur)
_unlockBtreeIfUnused(tls, pBt)
Xsqlite3_free(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow)
Xsqlite3_free(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpKey)
if int32((*TBtShared)(unsafe.Pointer(pBt)).FopenFlags)&int32(BTREE_SINGLE) != 0 && (*TBtShared)(unsafe.Pointer(pBt)).FpCursor == uintptr(0) {
/* Since the BtShared is not sharable, there is no need to
** worry about the missing sqlite3BtreeLeave() call here. */
_sqlite3BtreeClose(tls, pBtree)
} else {
_sqlite3BtreeLeave(tls, pBtree)
}
(*TBtCursor)(unsafe.Pointer(pCur)).FpBtree = uintptr(0)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Make sure the BtCursor* given in the argument has a valid
// ** BtCursor.info structure. If it is not already valid, call
// ** btreeParseCell() to fill it in.
// **
// ** BtCursor.info is a cache of the information in the current cell.
// ** Using this cache reduces the number of calls to btreeParseCell().
// */
func _getCellInfo(tls *libc.TLS, pCur uintptr) {
var p1 uintptr
_ = p1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize) == 0 {
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTCF_ValidNKey))
_btreeParseCell(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage, int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix), pCur+48)
} else {
}
}
func _sqlite3BtreeCursorIsValidNN(tls *libc.TLS, pCur uintptr) (r int32) {
return libc.BoolInt32(int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == CURSOR_VALID)
}
// C documentation
//
// /*
// ** Return the value of the integer key or "rowid" for a table btree.
// ** This routine is only valid for a cursor that is pointing into a
// ** ordinary table btree. If the cursor points to an index btree or
// ** is invalid, the result of this routine is undefined.
// */
func _sqlite3BtreeIntegerKey(tls *libc.TLS, pCur uintptr) (r Ti64) {
_getCellInfo(tls, pCur)
return (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey
}
// C documentation
//
// /*
// ** Pin or unpin a cursor.
// */
func _sqlite3BtreeCursorPin(tls *libc.TLS, pCur uintptr) {
var p1 uintptr
_ = p1
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTCF_Pinned))
}
func _sqlite3BtreeCursorUnpin(tls *libc.TLS, pCur uintptr) {
var p1 uintptr
_ = p1
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(BTCF_Pinned))
}
// C documentation
//
// /*
// ** Return the offset into the database file for the start of the
// ** payload to which the cursor is pointing.
// */
func _sqlite3BtreeOffset(tls *libc.TLS, pCur uintptr) (r Ti64) {
_getCellInfo(tls, pCur)
return int64((*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FpageSize)*(int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).Fpgno)-int64(1)) + (int64((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload) - int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FaData))
}
// C documentation
//
// /*
// ** Return the number of bytes of payload for the entry that pCur is
// ** currently pointing to. For table btrees, this will be the amount
// ** of data. For index btrees, this will be the size of the key.
// **
// ** The caller must guarantee that the cursor is pointing to a non-NULL
// ** valid entry. In other words, the calling procedure must guarantee
// ** that the cursor has Cursor.eState==CURSOR_VALID.
// */
func _sqlite3BtreePayloadSize(tls *libc.TLS, pCur uintptr) (r Tu32) {
_getCellInfo(tls, pCur)
return (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnPayload
}
// C documentation
//
// /*
// ** Return an upper bound on the size of any record for the table
// ** that the cursor is pointing into.
// **
// ** This is an optimization. Everything will still work if this
// ** routine always returns 2147483647 (which is the largest record
// ** that SQLite can handle) or more. But returning a smaller value might
// ** prevent large memory allocations when trying to interpret a
// ** corrupt database.
// **
// ** The current implementation merely returns the size of the underlying
// ** database file.
// */
func _sqlite3BtreeMaxRecordSize(tls *libc.TLS, pCur uintptr) (r Tsqlite3_int64) {
return int64((*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FpageSize) * int64((*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage)
}
// C documentation
//
// /*
// ** Given the page number of an overflow page in the database (parameter
// ** ovfl), this function finds the page number of the next page in the
// ** linked list of overflow pages. If possible, it uses the auto-vacuum
// ** pointer-map data instead of reading the content of page ovfl to do so.
// **
// ** If an error occurs an SQLite error code is returned. Otherwise:
// **
// ** The page number of the next overflow page in the linked list is
// ** written to *pPgnoNext. If page ovfl is the last page in its linked
// ** list, *pPgnoNext is set to zero.
// **
// ** If ppPage is not NULL, and a reference to the MemPage object corresponding
// ** to page number pOvfl was obtained, then *ppPage is set to point to that
// ** reference. It is the responsibility of the caller to call releasePage()
// ** on *ppPage to free the reference. In no reference was obtained (because
// ** the pointer-map was used to obtain the value for *pPgnoNext), then
// ** *ppPage is set to zero.
// */
func _getOverflowPage(tls *libc.TLS, pBt uintptr, ovfl TPgno, ppPage uintptr, pPgnoNext uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iGuess, next TPgno
var rc, v1, v2 int32
var _ /* eType at bp+12 */ Tu8
var _ /* pPage at bp+0 */ uintptr
var _ /* pgno at bp+8 */ TPgno
_, _, _, _, _ = iGuess, next, rc, v1, v2
next = uint32(0)
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
rc = SQLITE_OK
/* Try to find the next page in the overflow list using the
** autovacuum pointer-map pages. Guess that the next page in
** the overflow list is page number (ovfl+1). If that guess turns
** out to be wrong, fall back to loading the data of page
** number ovfl to determine the next page number.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
iGuess = ovfl + uint32(1)
for _ptrmapPageno(tls, pBt, iGuess) == iGuess || iGuess == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
iGuess++
}
if iGuess <= _btreePagecount(tls, pBt) {
rc = _ptrmapGet(tls, pBt, iGuess, bp+12, bp+8)
if rc == SQLITE_OK && int32(*(*Tu8)(unsafe.Pointer(bp + 12))) == int32(PTRMAP_OVERFLOW2) && *(*TPgno)(unsafe.Pointer(bp + 8)) == ovfl {
next = iGuess
rc = int32(SQLITE_DONE)
}
}
}
if rc == SQLITE_OK {
if ppPage == uintptr(0) {
v1 = int32(PAGER_GET_READONLY)
} else {
v1 = 0
}
rc = _btreeGetPage(tls, pBt, ovfl, bp, v1)
if rc == SQLITE_OK {
next = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData)
}
}
*(*TPgno)(unsafe.Pointer(pPgnoNext)) = next
if ppPage != 0 {
*(*uintptr)(unsafe.Pointer(ppPage)) = *(*uintptr)(unsafe.Pointer(bp))
} else {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
if rc == int32(SQLITE_DONE) {
v2 = SQLITE_OK
} else {
v2 = rc
}
return v2
}
// C documentation
//
// /*
// ** Copy data from a buffer to a page, or from a page to a buffer.
// **
// ** pPayload is a pointer to data stored on database page pDbPage.
// ** If argument eOp is false, then nByte bytes of data are copied
// ** from pPayload to the buffer pointed at by pBuf. If eOp is true,
// ** then sqlite3PagerWrite() is called on pDbPage and nByte bytes
// ** of data are copied from the buffer pBuf to pPayload.
// **
// ** SQLITE_OK is returned on success, otherwise an error code.
// */
func _copyPayload(tls *libc.TLS, pPayload uintptr, pBuf uintptr, nByte int32, eOp int32, pDbPage uintptr) (r int32) {
var rc int32
_ = rc
if eOp != 0 {
/* Copy data from buffer to page (a write operation) */
rc = _sqlite3PagerWrite(tls, pDbPage)
if rc != SQLITE_OK {
return rc
}
libc.Xmemcpy(tls, pPayload, pBuf, uint64(nByte))
} else {
/* Copy data from page to buffer (a read operation) */
libc.Xmemcpy(tls, pBuf, pPayload, uint64(nByte))
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** This function is used to read or overwrite payload information
// ** for the entry that the pCur cursor is pointing to. The eOp
// ** argument is interpreted as follows:
// **
// ** 0: The operation is a read. Populate the overflow cache.
// ** 1: The operation is a write. Populate the overflow cache.
// **
// ** A total of "amt" bytes are read or written beginning at "offset".
// ** Data is read to or from the buffer pBuf.
// **
// ** The content being read or written might appear on the main page
// ** or be scattered out on multiple overflow pages.
// **
// ** If the current cursor entry uses one or more overflow pages
// ** this function may allocate space for and lazily populate
// ** the overflow page-list cache array (BtCursor.aOverflow).
// ** Subsequent calls use this cache to make seeking to the supplied offset
// ** more efficient.
// **
// ** Once an overflow page-list cache has been allocated, it must be
// ** invalidated if some other cursor writes to the same table, or if
// ** the cursor is moved to a different row. Additionally, in auto-vacuum
// ** mode, the following events may invalidate an overflow page-list cache.
// **
// ** * An incremental vacuum,
// ** * A commit in auto_vacuum="full" mode,
// ** * Creating a table (may require moving an overflow page).
// */
func _accessPayload(tls *libc.TLS, pCur uintptr, offset Tu32, amt Tu32, pBuf uintptr, eOp int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var a, a1, iIdx, nOvfl, rc, v2 int32
var aNew, aPayload, aWrite, fd, pBt, pBufStart, pPage, p1 uintptr
var ovflSize Tu32
var _ /* aSave at bp+4 */ [4]Tu8
var _ /* nextPage at bp+0 */ TPgno
var _ /* pDbPage at bp+8 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = a, a1, aNew, aPayload, aWrite, fd, iIdx, nOvfl, ovflSize, pBt, pBufStart, pPage, rc, v2, p1
rc = SQLITE_OK
iIdx = 0
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage /* Btree page of current entry */
pBt = (*TBtCursor)(unsafe.Pointer(pCur)).FpBt /* Btree this cursor belongs to */
pBufStart = pBuf /* Start of original out buffer */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
return _sqlite3CorruptError(tls, int32(75235))
}
_getCellInfo(tls, pCur)
aPayload = (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload
if uint64(int64(aPayload)-int64((*TMemPage)(unsafe.Pointer(pPage)).FaData)) > uint64((*TBtShared)(unsafe.Pointer(pBt)).FusableSize-uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)) {
/* Trying to read or write past the end of the data is an error. The
** conditional above is really:
** &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize]
** but is recast into its current form to avoid integer overflow problems
*/
return _sqlite3CorruptError(tls, int32(75250))
}
/* Check if data must be read/written to/from the btree page itself. */
if offset < uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) {
a = int32(amt)
if uint32(a)+offset > uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) {
a = int32(uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) - offset)
}
rc = _copyPayload(tls, aPayload+uintptr(offset), pBuf, a, eOp, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
offset = uint32(0)
pBuf += uintptr(a)
amt -= uint32(a)
} else {
offset -= uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)
}
if rc == SQLITE_OK && amt > uint32(0) {
ovflSize = (*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(4)
*(*TPgno)(unsafe.Pointer(bp)) = _sqlite3Get4byte(tls, aPayload+uintptr((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal))
/* If the BtCursor.aOverflow[] has not been allocated, allocate it now.
**
** The aOverflow[] array is sized at one entry for each overflow page
** in the overflow chain. The page number of the first overflow page is
** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array
** means "not yet known" (the cache is lazily populated).
*/
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_ValidOvfl) == 0 {
nOvfl = int32(((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnPayload - uint32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) + ovflSize - uint32(1)) / ovflSize)
if (*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow == uintptr(0) || nOvfl*libc.Int32FromInt64(4) > _sqlite3MallocSize(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow) {
aNew = _sqlite3Realloc(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow, uint64(nOvfl*int32(2))*uint64(4))
if aNew == uintptr(0) {
return int32(SQLITE_NOMEM)
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow = aNew
}
}
libc.Xmemset(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow, 0, uint64(nOvfl)*uint64(4))
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTCF_ValidOvfl))
} else {
/* If the overflow page-list cache has been allocated and the
** entry for the first required overflow page is valid, skip
** directly to it.
*/
if *(*TPgno)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(offset/ovflSize)*4)) != 0 {
iIdx = int32(offset / ovflSize)
*(*TPgno)(unsafe.Pointer(bp)) = *(*TPgno)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4))
offset = offset % ovflSize
}
}
for *(*TPgno)(unsafe.Pointer(bp)) != 0 {
/* If required, populate the overflow page-list cache. */
if *(*TPgno)(unsafe.Pointer(bp)) > (*TBtShared)(unsafe.Pointer(pBt)).FnPage {
return _sqlite3CorruptError(tls, int32(75312))
}
*(*TPgno)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx)*4)) = *(*TPgno)(unsafe.Pointer(bp))
if offset >= ovflSize {
/* The only reason to read this page is to obtain the page
** number for the next page in the overflow chain. The page
** data is not required. So first try to lookup the overflow
** page-list cache, if any, then fall back to the getOverflowPage()
** function.
*/
if *(*TPgno)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx+int32(1))*4)) != 0 {
*(*TPgno)(unsafe.Pointer(bp)) = *(*TPgno)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FaOverflow + uintptr(iIdx+int32(1))*4))
} else {
rc = _getOverflowPage(tls, pBt, *(*TPgno)(unsafe.Pointer(bp)), uintptr(0), bp)
}
offset -= ovflSize
} else {
/* Need to read this page properly. It contains some of the
** range of data that is being read (eOp==0) or written (eOp!=0).
*/
a1 = int32(amt)
if uint32(a1)+offset > ovflSize {
a1 = int32(ovflSize - offset)
}
/* If all the following are true:
**
** 1) this is a read operation, and
** 2) data is required from the start of this overflow page, and
** 3) there are no dirty pages in the page-cache
** 4) the database is file-backed, and
** 5) the page is not in the WAL file
** 6) at least 4 bytes have already been read into the output buffer
**
** then data can be read directly from the database file into the
** output buffer, bypassing the page-cache altogether. This speeds
** up loading large records that span many overflow pages.
*/
if eOp == 0 && offset == uint32(0) && _sqlite3PagerDirectReadOk(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, *(*TPgno)(unsafe.Pointer(bp))) != 0 && pBuf+uintptr(-libc.Int32FromInt32(4)) >= pBufStart {
fd = _sqlite3PagerFile(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
aWrite = pBuf + uintptr(-libc.Int32FromInt32(4))
/* due to (6) */
libc.Xmemcpy(tls, bp+4, aWrite, uint64(4))
rc = _sqlite3OsRead(tls, fd, aWrite, a1+int32(4), int64((*TBtShared)(unsafe.Pointer(pBt)).FpageSize)*int64(*(*TPgno)(unsafe.Pointer(bp))-libc.Uint32FromInt32(1)))
*(*TPgno)(unsafe.Pointer(bp)) = _sqlite3Get4byte(tls, aWrite)
libc.Xmemcpy(tls, aWrite, bp+4, uint64(4))
} else {
if eOp == 0 {
v2 = int32(PAGER_GET_READONLY)
} else {
v2 = 0
}
rc = _sqlite3PagerGet(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager, *(*TPgno)(unsafe.Pointer(bp)), bp+8, v2)
if rc == SQLITE_OK {
aPayload = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
*(*TPgno)(unsafe.Pointer(bp)) = _sqlite3Get4byte(tls, aPayload)
rc = _copyPayload(tls, aPayload+uintptr(offset+uint32(4)), pBuf, a1, eOp, *(*uintptr)(unsafe.Pointer(bp + 8)))
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
offset = uint32(0)
}
}
amt -= uint32(a1)
if amt == uint32(0) {
return rc
}
pBuf += uintptr(a1)
}
if rc != 0 {
break
}
iIdx++
}
}
if rc == SQLITE_OK && amt > uint32(0) {
/* Overflow chain ends prematurely */
return _sqlite3CorruptError(tls, int32(75396))
}
return rc
}
// C documentation
//
// /*
// ** Read part of the payload for the row at which that cursor pCur is currently
// ** pointing. "amt" bytes will be transferred into pBuf[]. The transfer
// ** begins at "offset".
// **
// ** pCur can be pointing to either a table or an index b-tree.
// ** If pointing to a table btree, then the content section is read. If
// ** pCur is pointing to an index b-tree then the key section is read.
// **
// ** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing
// ** to a valid row in the table. For sqlite3BtreePayloadChecked(), the
// ** cursor might be invalid or might need to be restored before being read.
// **
// ** Return SQLITE_OK on success or an error code if anything goes
// ** wrong. An error is returned if "offset+amt" is larger than
// ** the available payload.
// */
func _sqlite3BtreePayload(tls *libc.TLS, pCur uintptr, offset Tu32, amt Tu32, pBuf uintptr) (r int32) {
return _accessPayload(tls, pCur, offset, amt, pBuf, 0)
}
// C documentation
//
// /*
// ** This variant of sqlite3BtreePayload() works even if the cursor has not
// ** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read()
// ** interface.
// */
func _accessPayloadChecked(tls *libc.TLS, pCur uintptr, offset Tu32, amt Tu32, pBuf uintptr) (r int32) {
var rc, v1 int32
_, _ = rc, v1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == int32(CURSOR_INVALID) {
return int32(SQLITE_ABORT)
}
rc = _btreeRestoreCursorPosition(tls, pCur)
if rc != 0 {
v1 = rc
} else {
v1 = _accessPayload(tls, pCur, offset, amt, pBuf, 0)
}
return v1
}
func _sqlite3BtreePayloadChecked(tls *libc.TLS, pCur uintptr, offset Tu32, amt Tu32, pBuf uintptr) (r int32) {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == CURSOR_VALID {
return _accessPayload(tls, pCur, offset, amt, pBuf, 0)
} else {
return _accessPayloadChecked(tls, pCur, offset, amt, pBuf)
}
return r
}
// C documentation
//
// /*
// ** Return a pointer to payload information from the entry that the
// ** pCur cursor is pointing to. The pointer is to the beginning of
// ** the key if index btrees (pPage->intKey==0) and is the data for
// ** table btrees (pPage->intKey==1). The number of bytes of available
// ** key/data is written into *pAmt. If *pAmt==0, then the value
// ** returned will not be a valid pointer.
// **
// ** This routine is an optimization. It is common for the entire key
// ** and data to fit on the local page and for there to be no overflow
// ** pages. When that is so, this routine can be used to access the
// ** key and data without making a copy. If the key and/or data spills
// ** onto overflow pages, then accessPayload() must be used to reassemble
// ** the key/data and copy it into a preallocated buffer.
// **
// ** The pointer returned by this routine looks directly into the cached
// ** page of the database. The data might change or move the next time
// ** any btree routine is called.
// */
func _fetchPayload(tls *libc.TLS, pCur uintptr, pAmt uintptr) (r uintptr) {
var amt, v1 int32
_, _ = amt, v1
amt = int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)
if amt > int32(int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FaDataEnd)-int64((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload)) {
/* There is too little space on the page for the expected amount
** of local content. Database must be corrupt. */
if 0 > int32(int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FaDataEnd)-int64((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload)) {
v1 = 0
} else {
v1 = int32(int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FaDataEnd) - int64((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload))
}
amt = v1
}
*(*Tu32)(unsafe.Pointer(pAmt)) = uint32(amt)
return (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload
}
// C documentation
//
// /*
// ** For the entry that cursor pCur is point to, return as
// ** many bytes of the key or data as are available on the local
// ** b-tree page. Write the number of available bytes into *pAmt.
// **
// ** The pointer returned is ephemeral. The key/data may move
// ** or be destroyed on the next call to any Btree routine,
// ** including calls from other threads against the same cache.
// ** Hence, a mutex on the BtShared should be held prior to calling
// ** this routine.
// **
// ** These routines is used to get quick access to key and data
// ** in the common case where no overflow pages are used.
// */
func _sqlite3BtreePayloadFetch(tls *libc.TLS, pCur uintptr, pAmt uintptr) (r uintptr) {
return _fetchPayload(tls, pCur, pAmt)
}
// C documentation
//
// /*
// ** Move the cursor down to a new child page. The newPgno argument is the
// ** page number of the child page to move to.
// **
// ** This function returns SQLITE_CORRUPT if the page-header flags field of
// ** the new child page does not match the flags field of the parent (i.e.
// ** if an intkey page appears to be the parent of a non-intkey page, or
// ** vice-versa).
// */
func _moveToChild(tls *libc.TLS, pCur uintptr, newPgno Tu32) (r int32) {
var rc int32
var v2 Ti8
var v3, p1 uintptr
_, _, _, _ = rc, v2, v3, p1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) >= libc.Int32FromInt32(BTCURSOR_MAX_DEPTH)-libc.Int32FromInt32(1) {
return _sqlite3CorruptError(tls, int32(75534))
}
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
*(*Tu16)(unsafe.Pointer(pCur + 88 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*2)) = (*TBtCursor)(unsafe.Pointer(pCur)).Fix
*(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*8)) = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(0)
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage++
rc = _getAndInitPage(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpBt, newPgno, pCur+136, int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurPagerFlags))
if rc == SQLITE_OK && (int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnCell) < int32(1) || int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FintKey) != int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurIntKey)) {
_releasePage(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage)
rc = _sqlite3CorruptError(tls, int32(75548))
}
if rc != 0 {
v3 = pCur + 84
*(*Ti8)(unsafe.Pointer(v3))--
v2 = *(*Ti8)(unsafe.Pointer(v3))
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(v2)*8))
}
return rc
}
// C documentation
//
// /*
// ** Move the cursor up to the parent page.
// **
// ** pCur->idx is set to the cell index that contains the pointer
// ** to the page we are coming from. If we are coming from the
// ** right-most child page then pCur->idx is set to one more than
// ** the largest cell index.
// */
func _moveToParent(tls *libc.TLS, pCur uintptr) {
var pLeaf, v3, p1 uintptr
var v2 Ti8
_, _, _, _ = pLeaf, v2, v3, p1
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = *(*Tu16)(unsafe.Pointer(pCur + 88 + uintptr(int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)-int32(1))*2))
pLeaf = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
v3 = pCur + 84
*(*Ti8)(unsafe.Pointer(v3))--
v2 = *(*Ti8)(unsafe.Pointer(v3))
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(v2)*8))
_releasePageNotNull(tls, pLeaf)
}
// C documentation
//
// /*
// ** Move the cursor to point to the root page of its b-tree structure.
// **
// ** If the table has a virtual root page, then the cursor is moved to point
// ** to the virtual root page instead of the actual root page. A table has a
// ** virtual root page when the actual root page contains no cells and a
// ** single child page. This can only happen with the table rooted at page 1.
// **
// ** If the b-tree structure is empty, the cursor state is set to
// ** CURSOR_INVALID and this routine returns SQLITE_EMPTY. Otherwise,
// ** the cursor is set to point to the first cell located on the root
// ** (or virtual root) page and the cursor state is set to CURSOR_VALID.
// **
// ** If this function returns successfully, it may be assumed that the
// ** page-header flags indicate that the [virtual] root-page is the expected
// ** kind of b-tree page (i.e. if when opening the cursor the caller did not
// ** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D,
// ** indicating a table b-tree, or if the caller did specify a KeyInfo
// ** structure the flags byte is set to 0x02 or 0x0A, indicating an index
// ** b-tree).
// */
func _moveToRoot(tls *libc.TLS, pCur uintptr) (r int32) {
var pRoot, v2, v3, p4 uintptr
var rc int32
var subpage TPgno
var v1 Ti8
_, _, _, _, _, _, _ = pRoot, rc, subpage, v1, v2, v3, p4
rc = SQLITE_OK
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) >= 0 {
if (*TBtCursor)(unsafe.Pointer(pCur)).FiPage != 0 {
_releasePageNotNull(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage)
for {
v2 = pCur + 84
*(*Ti8)(unsafe.Pointer(v2))--
v1 = *(*Ti8)(unsafe.Pointer(v2))
if !(v1 != 0) {
break
}
_releasePageNotNull(tls, *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*8)))
}
v3 = *(*uintptr)(unsafe.Pointer(pCur + 144))
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = v3
pRoot = v3
goto skip_init
}
} else {
if (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot == uint32(0) {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
return int32(SQLITE_EMPTY)
} else {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == int32(CURSOR_FAULT) {
return (*TBtCursor)(unsafe.Pointer(pCur)).FskipNext
}
_sqlite3BtreeClearCursor(tls, pCur)
}
rc = _getAndInitPage(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpBt, (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot, pCur+136, int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurPagerFlags))
if rc != SQLITE_OK {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
return rc
}
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage = 0
(*TBtCursor)(unsafe.Pointer(pCur)).FcurIntKey = (*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FintKey
}
}
pRoot = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
/* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor
** expected to open it on an index b-tree. Otherwise, if pKeyInfo is
** NULL, the caller expects a table b-tree. If this is not the case,
** return an SQLITE_CORRUPT error.
**
** Earlier versions of SQLite assumed that this test could not fail
** if the root page was already loaded when this function was called (i.e.
** if pCur->iPage>=0). But this is not so if the database is corrupted
** in such a way that page pRoot is linked into a second b-tree table
** (or the freelist). */
if int32((*TMemPage)(unsafe.Pointer(pRoot)).FisInit) == 0 || libc.BoolInt32((*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0)) != int32((*TMemPage)(unsafe.Pointer(pRoot)).FintKey) {
return _sqlite3CorruptError(tls, int32(75683))
}
goto skip_init
skip_init:
;
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(0)
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
p4 = pCur + 1
*(*Tu8)(unsafe.Pointer(p4)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p4))) & ^(libc.Int32FromInt32(BTCF_AtLast) | libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
if int32((*TMemPage)(unsafe.Pointer(pRoot)).FnCell) > 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_VALID)
} else {
if !((*TMemPage)(unsafe.Pointer(pRoot)).Fleaf != 0) {
if (*TMemPage)(unsafe.Pointer(pRoot)).Fpgno != uint32(1) {
return _sqlite3CorruptError(tls, int32(75695))
}
subpage = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+int32(8)))
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_VALID)
rc = _moveToChild(tls, pCur, subpage)
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
rc = int32(SQLITE_EMPTY)
}
}
return rc
}
// C documentation
//
// /*
// ** Move the cursor down to the left-most leaf entry beneath the
// ** entry to which it is currently pointing.
// **
// ** The left-most leaf is the one with the smallest key - the first
// ** in ascending order.
// */
func _moveToLeftmost(tls *libc.TLS, pCur uintptr) (r int32) {
var pPage, v1 uintptr
var pgno TPgno
var rc int32
var v2 bool
_, _, _, _, _ = pPage, pgno, rc, v1, v2
rc = SQLITE_OK
for {
if v2 = rc == SQLITE_OK; v2 {
v1 = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
pPage = v1
}
if !(v2 && !((*TMemPage)(unsafe.Pointer(v1)).Fleaf != 0)) {
break
}
pgno = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix)))))<0 The cursor is left pointing at an entry that
// ** is larger than intKey.
// */
func _sqlite3BtreeTableMoveto(tls *libc.TLS, pCur uintptr, intKey Ti64, biasRight int32, pRes uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var c, idx, lwr, rc, upr int32
var chldPg TPgno
var pCell, pPage, v3, p4 uintptr
var _ /* nCellKey at bp+0 */ Ti64
_, _, _, _, _, _, _, _, _, _ = c, chldPg, idx, lwr, pCell, pPage, rc, upr, v3, p4
/* If the cursor is already positioned at the point we are trying
** to move to, then just return without doing any work */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == CURSOR_VALID && int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_ValidNKey) != 0 {
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey == intKey {
*(*int32)(unsafe.Pointer(pRes)) = 0
return SQLITE_OK
}
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey < intKey {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_AtLast) != 0 {
*(*int32)(unsafe.Pointer(pRes)) = -int32(1)
return SQLITE_OK
}
/* If the requested key is one more than the previous key, then
** try to get there using sqlite3BtreeNext() rather than a full
** binary search. This is an optimization only. The correct answer
** is still obtained without this case, only a little more slowly. */
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey+int64(1) == intKey {
*(*int32)(unsafe.Pointer(pRes)) = 0
rc = _sqlite3BtreeNext(tls, pCur, 0)
if rc == SQLITE_OK {
_getCellInfo(tls, pCur)
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey == intKey {
return SQLITE_OK
}
} else {
if rc != int32(SQLITE_DONE) {
return rc
}
}
}
}
}
rc = _moveToRoot(tls, pCur)
if rc != 0 {
if rc == int32(SQLITE_EMPTY) {
*(*int32)(unsafe.Pointer(pRes)) = -int32(1)
return SQLITE_OK
}
return rc
}
for {
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage /* Pointer to current cell in pPage */
/* pPage->nCell must be greater than zero. If this is the root-page
** the cursor would have been INVALID above and this for(;;) loop
** not run. If this is not the root-page, then the moveToChild() routine
** would have already detected db corruption. Similarly, pPage must
** be the right kind (index or table) of b-tree page. Otherwise
** a moveToChild() or moveToRoot() call would have detected corruption. */
lwr = 0
upr = int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) - int32(1)
idx = upr >> (int32(1) - biasRight) /* idx = biasRight ? upr : (lwr+upr)/2; */
for {
pCell = (*TMemPage)(unsafe.Pointer(pPage)).FaDataOfst + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*idx))))<= (*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd {
return _sqlite3CorruptError(tls, int32(75937))
}
}
}
_sqlite3GetVarint(tls, pCell, bp)
if *(*Ti64)(unsafe.Pointer(bp)) < intKey {
lwr = idx + int32(1)
if lwr > upr {
c = -int32(1)
break
}
} else {
if *(*Ti64)(unsafe.Pointer(bp)) > intKey {
upr = idx - int32(1)
if lwr > upr {
c = +libc.Int32FromInt32(1)
break
}
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(idx)
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
lwr = idx
goto moveto_table_next_layer
} else {
p4 = pCur + 1
*(*Tu8)(unsafe.Pointer(p4)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p4))) | libc.Int32FromInt32(BTCF_ValidNKey))
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey = *(*Ti64)(unsafe.Pointer(bp))
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
*(*int32)(unsafe.Pointer(pRes)) = 0
return SQLITE_OK
}
}
}
idx = (lwr + upr) >> int32(1) /* idx = (lwr+upr)/2; */
goto _2
_2:
}
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(idx)
*(*int32)(unsafe.Pointer(pRes)) = c
rc = SQLITE_OK
goto moveto_table_finish
}
goto moveto_table_next_layer
moveto_table_next_layer:
;
if lwr >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
chldPg = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8)))
} else {
chldPg = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*lwr))))<0 The cursor is left pointing at an entry that
// ** is larger than pIdxKey.
// **
// ** The pIdxKey->eqSeen field is set to 1 if there
// ** exists an entry in the table that exactly matches pIdxKey.
// */
func _sqlite3BtreeIndexMoveto(tls *libc.TLS, pCur uintptr, pIdxKey uintptr, pRes uintptr) (r int32) {
var c, c1, idx, lwr, nCell, nOverrun, rc, upr, v1, v6 int32
var chldPg TPgno
var pCell, pCellBody, pCellKey, pPage, v11, p3, p8, p9 uintptr
var xRecordCompare TRecordCompare
var v10 Ti8
var v2, v7 bool
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = c, c1, chldPg, idx, lwr, nCell, nOverrun, pCell, pCellBody, pCellKey, pPage, rc, upr, xRecordCompare, v1, v10, v11, v2, v6, v7, p3, p8, p9
xRecordCompare = _sqlite3VdbeFindCompare(tls, pIdxKey)
(*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode = uint8(0)
/* Check to see if we can skip a lot of work. Two cases:
**
** (1) If the cursor is already pointing to the very last cell
** in the table and the pIdxKey search key is greater than or
** equal to that last cell, then no movement is required.
**
** (2) If the cursor is on the last page of the table and the first
** cell on that last page is less than or equal to the pIdxKey
** search key, then we can start the search on the current page
** without needing to go back to root.
*/
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == CURSOR_VALID && (*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).Fleaf != 0 && _cursorOnLastPage(tls, pCur) != 0 {
if v2 = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix) == int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnCell)-int32(1); v2 {
v1 = _indexCellCompare(tls, pCur, int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix), pIdxKey, xRecordCompare)
c = v1
}
if v2 && v1 <= 0 && int32((*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode) == SQLITE_OK {
*(*int32)(unsafe.Pointer(pRes)) = c
return SQLITE_OK /* Cursor already pointing at the correct spot */
}
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) > 0 && _indexCellCompare(tls, pCur, 0, pIdxKey, xRecordCompare) <= 0 && int32((*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode) == SQLITE_OK {
p3 = pCur + 1
*(*Tu8)(unsafe.Pointer(p3)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(BTCF_ValidOvfl))
if !((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FisInit != 0) {
return _sqlite3CorruptError(tls, int32(76133))
}
goto bypass_moveto_root /* Start search on the current page */
}
(*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode = uint8(SQLITE_OK)
}
rc = _moveToRoot(tls, pCur)
if rc != 0 {
if rc == int32(SQLITE_EMPTY) {
*(*int32)(unsafe.Pointer(pRes)) = -int32(1)
return SQLITE_OK
}
return rc
}
goto bypass_moveto_root
bypass_moveto_root:
;
for {
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage /* Pointer to current cell in pPage */
/* pPage->nCell must be greater than zero. If this is the root-page
** the cursor would have been INVALID above and this for(;;) loop
** not run. If this is not the root-page, then the moveToChild() routine
** would have already detected db corruption. Similarly, pPage must
** be the right kind (index or table) of b-tree page. Otherwise
** a moveToChild() or moveToRoot() call would have detected corruption. */
lwr = 0
upr = int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) - int32(1)
idx = upr >> int32(1) /* idx = (lwr+upr)/2; */
for { /* Size of the pCell cell in bytes */
pCell = (*TMemPage)(unsafe.Pointer(pPage)).FaDataOfst + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*idx))))< (*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FnPage {
rc = _sqlite3CorruptError(tls, int32(76220))
goto moveto_index_finish
}
pCellKey = _sqlite3Malloc(tls, uint64(nCell+nOverrun))
if pCellKey == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto moveto_index_finish
}
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(idx)
rc = _accessPayload(tls, pCur, uint32(0), uint32(nCell), pCellKey, 0)
libc.Xmemset(tls, pCellKey+uintptr(nCell), 0, uint64(nOverrun)) /* Fix uninit warnings */
p8 = pCur + 1
*(*Tu8)(unsafe.Pointer(p8)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p8))) & ^libc.Int32FromInt32(BTCF_ValidOvfl))
if rc != 0 {
Xsqlite3_free(tls, pCellKey)
goto moveto_index_finish
}
c1 = _sqlite3VdbeRecordCompare(tls, nCell, pCellKey, pIdxKey)
Xsqlite3_free(tls, pCellKey)
}
}
if c1 < 0 {
lwr = idx + int32(1)
} else {
if c1 > 0 {
upr = idx - int32(1)
} else {
*(*int32)(unsafe.Pointer(pRes)) = 0
rc = SQLITE_OK
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(idx)
if (*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).FerrCode != 0 {
rc = _sqlite3CorruptError(tls, int32(76252))
}
goto moveto_index_finish
}
}
if lwr > upr {
break
}
idx = (lwr + upr) >> int32(1) /* idx = (lwr+upr)/2 */
goto _5
_5:
}
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(idx)
*(*int32)(unsafe.Pointer(pRes)) = c1
rc = SQLITE_OK
goto moveto_index_finish
}
if lwr >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
chldPg = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8)))
} else {
chldPg = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*lwr))))<ix = (u16)lwr;
** rc = moveToChild(pCur, chldPg);
** if( rc ) break;
*/
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
p9 = pCur + 1
*(*Tu8)(unsafe.Pointer(p9)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p9))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) >= libc.Int32FromInt32(BTCURSOR_MAX_DEPTH)-libc.Int32FromInt32(1) {
return _sqlite3CorruptError(tls, int32(76283))
}
*(*Tu16)(unsafe.Pointer(pCur + 88 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*2)) = uint16(lwr)
*(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*8)) = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(0)
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage++
rc = _getAndInitPage(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpBt, chldPg, pCur+136, int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurPagerFlags))
if rc == SQLITE_OK && (int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnCell) < int32(1) || int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FintKey) != int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurIntKey)) {
_releasePage(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage)
rc = _sqlite3CorruptError(tls, int32(76294))
}
if rc != 0 {
v11 = pCur + 84
*(*Ti8)(unsafe.Pointer(v11))--
v10 = *(*Ti8)(unsafe.Pointer(v11))
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(v10)*8))
break
}
/*
***** End of in-lined moveToChild() call */
goto _4
_4:
}
goto moveto_index_finish
moveto_index_finish:
;
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
return rc
}
// C documentation
//
// /*
// ** Return TRUE if the cursor is not pointing at an entry of the table.
// **
// ** TRUE will be returned after a call to sqlite3BtreeNext() moves
// ** past the last entry in the table or sqlite3BtreePrev() moves past
// ** the first entry. TRUE is also returned if the table is empty.
// */
func _sqlite3BtreeEof(tls *libc.TLS, pCur uintptr) (r int32) {
/* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries
** have been deleted? This API will need to change to return an error code
** as well as the boolean result value.
*/
return libc.BoolInt32(CURSOR_VALID != int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState))
}
// C documentation
//
// /*
// ** Return an estimate for the number of rows in the table that pCur is
// ** pointing to. Return a negative number if no estimate is currently
// ** available.
// */
func _sqlite3BtreeRowCountEst(tls *libc.TLS, pCur uintptr) (r Ti64) {
var i Tu8
var n Ti64
_, _ = i, n
/* Currently this interface is only called by the OP_IfSmaller
** opcode, and it that case the cursor will always be valid and
** will always point to a leaf node. */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
return int64(-int32(1))
}
if int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).Fleaf) == 0 {
return int64(-int32(1))
}
n = int64((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnCell)
i = uint8(0)
for {
if !(int32(i) < int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)) {
break
}
n *= int64((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(i)*8)))).FnCell)
goto _1
_1:
;
i++
}
return n
}
// C documentation
//
// /*
// ** Advance the cursor to the next entry in the database.
// ** Return value:
// **
// ** SQLITE_OK success
// ** SQLITE_DONE cursor is already pointing at the last element
// ** otherwise some kind of error occurred
// **
// ** The main entry point is sqlite3BtreeNext(). That routine is optimized
// ** for the common case of merely incrementing the cell counter BtCursor.aiIdx
// ** to the next cell on the current page. The (slower) btreeNext() helper
// ** routine is called when it is necessary to move to a different page or
// ** to restore the cursor.
// **
// ** If bit 0x01 of the F argument in sqlite3BtreeNext(C,F) is 1, then the
// ** cursor corresponds to an SQL index and this routine could have been
// ** skipped if the SQL index had been a unique index. The F argument
// ** is a hint to the implement. SQLite btree implementation does not use
// ** this hint, but COMDB2 does.
// */
func _btreeNext(tls *libc.TLS, pCur uintptr) (r int32) {
var idx, rc, v1 int32
var pPage, v3 uintptr
var v2 Tu16
_, _, _, _, _, _ = idx, pPage, rc, v1, v2, v3
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
v1 = _btreeRestoreCursorPosition(tls, pCur)
} else {
v1 = SQLITE_OK
}
rc = v1
if rc != SQLITE_OK {
return rc
}
if int32(CURSOR_INVALID) == int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) {
return int32(SQLITE_DONE)
}
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) == int32(CURSOR_SKIPNEXT) {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_VALID)
if (*TBtCursor)(unsafe.Pointer(pCur)).FskipNext > 0 {
return SQLITE_OK
}
}
}
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
v3 = pCur + 86
*(*Tu16)(unsafe.Pointer(v3))++
v2 = *(*Tu16)(unsafe.Pointer(v3))
idx = int32(v2)
if _sqlite3FaultSim(tls, int32(412)) != 0 {
(*TMemPage)(unsafe.Pointer(pPage)).FisInit = uint8(0)
}
if !((*TMemPage)(unsafe.Pointer(pPage)).FisInit != 0) {
return _sqlite3CorruptError(tls, int32(76395))
}
if idx >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
rc = _moveToChild(tls, pCur, _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8))))
if rc != 0 {
return rc
}
return _moveToLeftmost(tls, pCur)
}
for cond := true; cond; cond = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) == 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
return int32(SQLITE_DONE)
}
_moveToParent(tls, pCur)
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
}
if (*TMemPage)(unsafe.Pointer(pPage)).FintKey != 0 {
return _sqlite3BtreeNext(tls, pCur, 0)
} else {
return SQLITE_OK
}
}
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 {
return SQLITE_OK
} else {
return _moveToLeftmost(tls, pCur)
}
return r
}
func _sqlite3BtreeNext(tls *libc.TLS, pCur uintptr, flags int32) (r int32) {
var pPage, v3, p1 uintptr
var v2 Tu16
_, _, _, _ = pPage, v2, v3, p1
_ = flags /* Used in COMDB2 but not native SQLite */
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
return _btreeNext(tls, pCur)
}
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
v3 = pCur + 86
*(*Tu16)(unsafe.Pointer(v3))++
v2 = *(*Tu16)(unsafe.Pointer(v3))
if int32(v2) >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
(*TBtCursor)(unsafe.Pointer(pCur)).Fix--
return _btreeNext(tls, pCur)
}
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 {
return SQLITE_OK
} else {
return _moveToLeftmost(tls, pCur)
}
return r
}
// C documentation
//
// /*
// ** Step the cursor to the back to the previous entry in the database.
// ** Return values:
// **
// ** SQLITE_OK success
// ** SQLITE_DONE the cursor is already on the first element of the table
// ** otherwise some kind of error occurred
// **
// ** The main entry point is sqlite3BtreePrevious(). That routine is optimized
// ** for the common case of merely decrementing the cell counter BtCursor.aiIdx
// ** to the previous cell on the current page. The (slower) btreePrevious()
// ** helper routine is called when it is necessary to move to a different page
// ** or to restore the cursor.
// **
// ** If bit 0x01 of the F argument to sqlite3BtreePrevious(C,F) is 1, then
// ** the cursor corresponds to an SQL index and this routine could have been
// ** skipped if the SQL index had been a unique index. The F argument is a
// ** hint to the implement. The native SQLite btree implementation does not
// ** use this hint, but COMDB2 does.
// */
func _btreePrevious(tls *libc.TLS, pCur uintptr) (r int32) {
var idx, rc, v1 int32
var pPage uintptr
_, _, _, _ = idx, pPage, rc, v1
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
v1 = _btreeRestoreCursorPosition(tls, pCur)
} else {
v1 = SQLITE_OK
}
rc = v1
if rc != SQLITE_OK {
return rc
}
if int32(CURSOR_INVALID) == int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) {
return int32(SQLITE_DONE)
}
if int32(CURSOR_SKIPNEXT) == int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_VALID)
if (*TBtCursor)(unsafe.Pointer(pCur)).FskipNext < 0 {
return SQLITE_OK
}
}
}
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if _sqlite3FaultSim(tls, int32(412)) != 0 {
(*TMemPage)(unsafe.Pointer(pPage)).FisInit = uint8(0)
}
if !((*TMemPage)(unsafe.Pointer(pPage)).FisInit != 0) {
return _sqlite3CorruptError(tls, int32(76488))
}
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
idx = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix)
rc = _moveToChild(tls, pCur, _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*idx))))<= mxPage {
return _sqlite3CorruptError(tls, int32(76578))
}
if n > uint32(0) {
searchList = uint8(0) /* If the free-list must be searched for 'nearby' */
nSearch = uint32(0) /* Count of the number of search attempts */
/* If eMode==BTALLOC_EXACT and a query of the pointer-map
** shows that the page 'nearby' is somewhere on the free-list, then
** the entire-list will be searched for that page.
*/
if int32(eMode) == int32(BTALLOC_EXACT) {
if nearby <= mxPage {
rc = _ptrmapGet(tls, pBt, nearby, bp+8, uintptr(0))
if rc != 0 {
return rc
}
if int32(*(*Tu8)(unsafe.Pointer(bp + 8))) == int32(PTRMAP_FREEPAGE) {
searchList = uint8(1)
}
}
} else {
if int32(eMode) == int32(BTALLOC_LE) {
searchList = uint8(1)
}
}
/* Decrement the free-list count by 1. Set iTrunk to the index of the
** first free-list trunk page. iPrevTrunk is initially 1.
*/
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FpDbPage)
if rc != 0 {
return rc
}
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+36, n-uint32(1))
/* The code within this loop is run only once if the 'searchList' variable
** is not true. Otherwise, it runs once for each trunk-page on the
** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT)
** or until a page less than 'nearby' is located (eMode==BTALLOC_LT)
*/
for cond := true; cond; cond = searchList != 0 {
pPrevTrunk = *(*uintptr)(unsafe.Pointer(bp))
if pPrevTrunk != 0 {
/* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page
** is the page number of the next freelist trunk page in the list or
** zero if this is the last freelist trunk page. */
iTrunk = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPrevTrunk)).FaData)
} else {
/* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32
** stores the page number of the first page of the freelist, or zero if
** the freelist is empty. */
iTrunk = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32)
}
if v2 = iTrunk > mxPage; !v2 {
v1 = nSearch
nSearch++
}
if v2 || v1 > n {
rc = _sqlite3CorruptError(tls, int32(76634))
} else {
rc = _btreeGetUnusedPage(tls, pBt, iTrunk, bp, 0)
}
if rc != 0 {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
goto end_allocate_page
}
/* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page
** is the number of leaf page pointers to follow. */
k = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+4)
if k == uint32(0) && !(searchList != 0) {
/* The trunk has no leaves and the list is not being searched.
** So extract the trunk page itself and use it as the newly
** allocated page */
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if rc != 0 {
goto end_allocate_page
}
*(*TPgno)(unsafe.Pointer(pPgno)) = iTrunk
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uint64(4))
*(*uintptr)(unsafe.Pointer(ppPage)) = *(*uintptr)(unsafe.Pointer(bp))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
if k > (*TBtShared)(unsafe.Pointer(pBt)).FusableSize/libc.Uint32FromInt32(4)-libc.Uint32FromInt32(2) {
/* Value of k is out of range. Database corruption */
rc = _sqlite3CorruptError(tls, int32(76663))
goto end_allocate_page
} else {
if searchList != 0 && (nearby == iTrunk || iTrunk < nearby && int32(eMode) == int32(BTALLOC_LE)) {
/* The list is being searched and this trunk page is the page
** to allocate, regardless of whether it has leaves.
*/
*(*TPgno)(unsafe.Pointer(pPgno)) = iTrunk
*(*uintptr)(unsafe.Pointer(ppPage)) = *(*uintptr)(unsafe.Pointer(bp))
searchList = uint8(0)
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if rc != 0 {
goto end_allocate_page
}
if k == uint32(0) {
if !(pPrevTrunk != 0) {
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uint64(4))
} else {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPrevTrunk)).FpDbPage)
if rc != SQLITE_OK {
goto end_allocate_page
}
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(pPrevTrunk)).FaData, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uint64(4))
}
} else {
iNewTrunk = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+8)
if iNewTrunk > mxPage {
rc = _sqlite3CorruptError(tls, int32(76697))
goto end_allocate_page
}
rc = _btreeGetUnusedPage(tls, pBt, iNewTrunk, bp+16, 0)
if rc != SQLITE_OK {
goto end_allocate_page
}
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16)))).FpDbPage)
if rc != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16)))
goto end_allocate_page
}
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16)))).FaData, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData, uint64(4))
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16)))).FaData+4, k-uint32(1))
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16)))).FaData+8, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+12, uint64((k-uint32(1))*uint32(4)))
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 16)))
if !(pPrevTrunk != 0) {
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32, iNewTrunk)
} else {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPrevTrunk)).FpDbPage)
if rc != 0 {
goto end_allocate_page
}
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPrevTrunk)).FaData, iNewTrunk)
}
}
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
if k > uint32(0) {
aData = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData
if nearby > uint32(0) {
closest = uint32(0)
if int32(eMode) == int32(BTALLOC_LE) {
i = uint32(0)
for {
if !(i < k) {
break
}
iPage = _sqlite3Get4byte(tls, aData+uintptr(uint32(8)+i*uint32(4)))
if iPage <= nearby {
closest = i
break
}
goto _3
_3:
;
i++
}
} else {
dist = _sqlite3AbsInt32(tls, int32(_sqlite3Get4byte(tls, aData+8)-nearby))
i = uint32(1)
for {
if !(i < k) {
break
}
d2 = _sqlite3AbsInt32(tls, int32(_sqlite3Get4byte(tls, aData+uintptr(uint32(8)+i*uint32(4)))-nearby))
if d2 < dist {
closest = i
dist = d2
}
goto _4
_4:
;
i++
}
}
} else {
closest = uint32(0)
}
iPage = _sqlite3Get4byte(tls, aData+uintptr(uint32(8)+closest*uint32(4)))
if iPage > mxPage || iPage < uint32(2) {
rc = _sqlite3CorruptError(tls, int32(76762))
goto end_allocate_page
}
if !(searchList != 0) || (iPage == nearby || iPage < nearby && int32(eMode) == int32(BTALLOC_LE)) {
*(*TPgno)(unsafe.Pointer(pPgno)) = iPage
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if rc != 0 {
goto end_allocate_page
}
if closest < k-uint32(1) {
libc.Xmemcpy(tls, aData+uintptr(uint32(8)+closest*uint32(4)), aData+uintptr(uint32(4)+k*uint32(4)), uint64(4))
}
_sqlite3Put4byte(tls, aData+4, k-uint32(1))
if !(_btreeGetHasContent(tls, pBt, *(*TPgno)(unsafe.Pointer(pPgno))) != 0) {
v5 = int32(PAGER_GET_NOCONTENT)
} else {
v5 = 0
}
noContent = v5
rc = _btreeGetUnusedPage(tls, pBt, *(*TPgno)(unsafe.Pointer(pPgno)), ppPage, noContent)
if rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage)
if rc != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage)))
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
}
}
searchList = uint8(0)
}
}
}
}
}
_releasePage(tls, pPrevTrunk)
pPrevTrunk = uintptr(0)
}
} else {
if 0 == int32((*TBtShared)(unsafe.Pointer(pBt)).FbDoTruncate) {
v6 = int32(PAGER_GET_NOCONTENT)
} else {
v6 = 0
}
/* There are no pages on the freelist, so append a new page to the
** database image.
**
** Normally, new pages allocated by this block can be requested from the
** pager layer with the 'no-content' flag set. This prevents the pager
** from trying to read the pages content from disk. However, if the
** current transaction has already run one or more incremental-vacuum
** steps, then the page we are about to allocate may contain content
** that is required in the event of a rollback. In this case, do
** not set the no-content flag. This causes the pager to load and journal
** the current page content before overwriting it.
**
** Note that the pager will not actually attempt to load or journal
** content for any page that really does lie past the end of the database
** file on disk. So the effects of disabling the no-content optimization
** here are confined to those pages that lie between the end of the
** database image and the end of the database file.
*/
bNoContent = v6
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FpDbPage)
if rc != 0 {
return rc
}
(*TBtShared)(unsafe.Pointer(pBt)).FnPage++
if (*TBtShared)(unsafe.Pointer(pBt)).FnPage == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
(*TBtShared)(unsafe.Pointer(pBt)).FnPage++
}
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 && _ptrmapPageno(tls, pBt, (*TBtShared)(unsafe.Pointer(pBt)).FnPage) == (*TBtShared)(unsafe.Pointer(pBt)).FnPage {
/* If *pPgno refers to a pointer-map page, allocate two new pages
** at the end of the file instead of one. The first allocated page
** becomes a new pointer-map page, the second is used by the caller.
*/
*(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0)
rc = _btreeGetUnusedPage(tls, pBt, (*TBtShared)(unsafe.Pointer(pBt)).FnPage, bp+24, bNoContent)
if rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 24)))).FpDbPage)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
}
if rc != 0 {
return rc
}
(*TBtShared)(unsafe.Pointer(pBt)).FnPage++
if (*TBtShared)(unsafe.Pointer(pBt)).FnPage == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
(*TBtShared)(unsafe.Pointer(pBt)).FnPage++
}
}
_sqlite3Put4byte(tls, uintptr(28)+(*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData, (*TBtShared)(unsafe.Pointer(pBt)).FnPage)
*(*TPgno)(unsafe.Pointer(pPgno)) = (*TBtShared)(unsafe.Pointer(pBt)).FnPage
rc = _btreeGetUnusedPage(tls, pBt, *(*TPgno)(unsafe.Pointer(pPgno)), ppPage, bNoContent)
if rc != 0 {
return rc
}
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppPage)))).FpDbPage)
if rc != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(ppPage)))
*(*uintptr)(unsafe.Pointer(ppPage)) = uintptr(0)
}
}
goto end_allocate_page
end_allocate_page:
;
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
_releasePage(tls, pPrevTrunk)
return rc
}
// C documentation
//
// /*
// ** This function is used to add page iPage to the database file free-list.
// ** It is assumed that the page is not already a part of the free-list.
// **
// ** The value passed as the second argument to this function is optional.
// ** If the caller happens to have a pointer to the MemPage object
// ** corresponding to page iPage handy, it may pass it as the second value.
// ** Otherwise, it may pass NULL.
// **
// ** If a pointer to a MemPage object is passed as the second argument,
// ** its reference count is not altered by this function.
// */
func _freePage2(tls *libc.TLS, pBt uintptr, pMemPage uintptr, iPage TPgno) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var iTrunk TPgno
var nFree, nLeaf Tu32
var pPage1 uintptr
var v1, v3, v5 int32
var v2, v4, v6 bool
var _ /* pPage at bp+8 */ uintptr
var _ /* pTrunk at bp+0 */ uintptr
var _ /* rc at bp+16 */ int32
_, _, _, _, _, _, _, _, _, _ = iTrunk, nFree, nLeaf, pPage1, v1, v2, v3, v4, v5, v6
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) /* Free-list trunk page */
iTrunk = uint32(0) /* Page number of free-list trunk page */
pPage1 = (*TBtShared)(unsafe.Pointer(pBt)).FpPage1 /* Initial number of pages on free-list */
if iPage < uint32(2) || iPage > (*TBtShared)(unsafe.Pointer(pBt)).FnPage {
return _sqlite3CorruptError(tls, int32(76889))
}
if pMemPage != 0 {
*(*uintptr)(unsafe.Pointer(bp + 8)) = pMemPage
_sqlite3PagerRef(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage)
} else {
*(*uintptr)(unsafe.Pointer(bp + 8)) = _btreePageLookup(tls, pBt, iPage)
}
/* Increment the free page count on pPage1 */
*(*int32)(unsafe.Pointer(bp + 16)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FpDbPage)
if *(*int32)(unsafe.Pointer(bp + 16)) != 0 {
goto freepage_out
}
nFree = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+36)
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+36, nFree+uint32(1))
if int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_SECURE_DELETE) != 0 {
/* If the secure_delete option is enabled, then
** always fully overwrite deleted information with zeros.
*/
if v2 = !(*(*uintptr)(unsafe.Pointer(bp + 8)) != 0); v2 {
v1 = _btreeGetPage(tls, pBt, iPage, bp+8, 0)
*(*int32)(unsafe.Pointer(bp + 16)) = v1
}
if v4 = v2 && v1 != 0; !v4 {
v3 = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage)
*(*int32)(unsafe.Pointer(bp + 16)) = v3
}
if v4 || v3 != 0 {
goto freepage_out
}
libc.Xmemset(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData, 0, uint64((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpBt)).FpageSize))
}
/* If the database supports auto-vacuum, write an entry in the pointer-map
** to indicate that the page is free.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_ptrmapPut(tls, pBt, iPage, uint8(PTRMAP_FREEPAGE), uint32(0), bp+16)
if *(*int32)(unsafe.Pointer(bp + 16)) != 0 {
goto freepage_out
}
}
/* Now manipulate the actual database free-list structure. There are two
** possibilities. If the free-list is currently empty, or if the first
** trunk page in the free-list is full, then this page will become a
** new free-list trunk page. Otherwise, it will become a leaf of the
** first trunk page in the current free-list. This block tests if it
** is possible to add the page as a new free-list leaf.
*/
if nFree != uint32(0) { /* Initial number of leaf cells on trunk page */
iTrunk = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32)
if iTrunk > _btreePagecount(tls, pBt) {
*(*int32)(unsafe.Pointer(bp + 16)) = _sqlite3CorruptError(tls, int32(76936))
goto freepage_out
}
*(*int32)(unsafe.Pointer(bp + 16)) = _btreeGetPage(tls, pBt, iTrunk, bp, 0)
if *(*int32)(unsafe.Pointer(bp + 16)) != SQLITE_OK {
goto freepage_out
}
nLeaf = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+4)
if nLeaf > (*TBtShared)(unsafe.Pointer(pBt)).FusableSize/uint32(4)-uint32(2) {
*(*int32)(unsafe.Pointer(bp + 16)) = _sqlite3CorruptError(tls, int32(76947))
goto freepage_out
}
if nLeaf < (*TBtShared)(unsafe.Pointer(pBt)).FusableSize/uint32(4)-uint32(8) {
/* In this case there is room on the trunk page to insert the page
** being freed as a new leaf.
**
** Note that the trunk page is not really full until it contains
** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have
** coded. But due to a coding error in versions of SQLite prior to
** 3.6.0, databases with freelist trunk pages holding more than
** usableSize/4 - 8 entries will be reported as corrupt. In order
** to maintain backwards compatibility with older versions of SQLite,
** we will continue to restrict the number of entries to usableSize/4 - 8
** for now. At some point in the future (once everyone has upgraded
** to 3.6.0 or later) we should consider fixing the conditional above
** to read "usableSize/4-2" instead of "usableSize/4-8".
**
** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still
** avoid using the last six entries in the freelist trunk page array in
** order that database files created by newer versions of SQLite can be
** read by older versions of SQLite.
*/
*(*int32)(unsafe.Pointer(bp + 16)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if *(*int32)(unsafe.Pointer(bp + 16)) == SQLITE_OK {
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+4, nLeaf+uint32(1))
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+uintptr(uint32(8)+nLeaf*uint32(4)), iPage)
if *(*uintptr)(unsafe.Pointer(bp + 8)) != 0 && int32((*TBtShared)(unsafe.Pointer(pBt)).FbtsFlags)&int32(BTS_SECURE_DELETE) == 0 {
_sqlite3PagerDontWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage)
}
*(*int32)(unsafe.Pointer(bp + 16)) = _btreeSetHasContent(tls, pBt, iPage)
}
goto freepage_out
}
}
/* If control flows to this point, then it was not possible to add the
** the page being freed as a leaf page of the first trunk in the free-list.
** Possibly because the free-list is empty, or possibly because the
** first trunk in the free-list is full. Either way, the page being freed
** will become the new first trunk page in the free-list.
*/
if v6 = *(*uintptr)(unsafe.Pointer(bp + 8)) == uintptr(0); v6 {
v5 = _btreeGetPage(tls, pBt, iPage, bp+8, 0)
*(*int32)(unsafe.Pointer(bp + 16)) = v5
}
if v6 && SQLITE_OK != v5 {
goto freepage_out
}
*(*int32)(unsafe.Pointer(bp + 16)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage)
if *(*int32)(unsafe.Pointer(bp + 16)) != SQLITE_OK {
goto freepage_out
}
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData, iTrunk)
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData+4, uint32(0))
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pPage1)).FaData+32, iPage)
goto freepage_out
freepage_out:
;
if *(*uintptr)(unsafe.Pointer(bp + 8)) != 0 {
(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FisInit = uint8(0)
}
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 16))
}
func _freePage(tls *libc.TLS, pPage uintptr, pRC uintptr) {
if *(*int32)(unsafe.Pointer(pRC)) == SQLITE_OK {
*(*int32)(unsafe.Pointer(pRC)) = _freePage2(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpBt, pPage, (*TMemPage)(unsafe.Pointer(pPage)).Fpgno)
}
}
// C documentation
//
// /*
// ** Free the overflow pages associated with the given Cell.
// */
func _clearCellOverflow(tls *libc.TLS, pPage uintptr, pCell uintptr, pInfo uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nOvfl, rc, v1 int32
var ovflPageSize Tu32
var ovflPgno TPgno
var pBt, v2 uintptr
var v3 bool
var _ /* iNext at bp+0 */ TPgno
var _ /* pOvfl at bp+8 */ uintptr
_, _, _, _, _, _, _, _ = nOvfl, ovflPageSize, ovflPgno, pBt, rc, v1, v2, v3
if pCell+uintptr((*TCellInfo)(unsafe.Pointer(pInfo)).FnSize) > (*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd {
/* Cell extends past end of page */
return _sqlite3CorruptError(tls, int32(77036))
}
ovflPgno = _sqlite3Get4byte(tls, pCell+uintptr((*TCellInfo)(unsafe.Pointer(pInfo)).FnSize)-uintptr(4))
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
ovflPageSize = (*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(4)
nOvfl = int32(((*TCellInfo)(unsafe.Pointer(pInfo)).FnPayload - uint32((*TCellInfo)(unsafe.Pointer(pInfo)).FnLocal) + ovflPageSize - uint32(1)) / ovflPageSize)
for {
v1 = nOvfl
nOvfl--
if !(v1 != 0) {
break
}
*(*TPgno)(unsafe.Pointer(bp)) = uint32(0)
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
if ovflPgno < uint32(2) || ovflPgno > _btreePagecount(tls, pBt) {
/* 0 is not a legal page number and page 1 cannot be an
** overflow page. Therefore if ovflPgno<2 or past the end of the
** file the database must be corrupt. */
return _sqlite3CorruptError(tls, int32(77053))
}
if nOvfl != 0 {
rc = _getOverflowPage(tls, pBt, ovflPgno, bp+8, bp)
if rc != 0 {
return rc
}
}
if v3 = *(*uintptr)(unsafe.Pointer(bp + 8)) != 0; !v3 {
v2 = _btreePageLookup(tls, pBt, ovflPgno)
*(*uintptr)(unsafe.Pointer(bp + 8)) = v2
}
if (v3 || v2 != uintptr(0)) && _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage) != int32(1) {
/* There is no reason any cursor should have an outstanding reference
** to an overflow page belonging to a cell that is being deleted/updated.
** So if there exists more than one reference to this page, then it
** must not really be an overflow page and the database must be corrupt.
** It is helpful to detect this before calling freePage2(), as
** freePage2() may zero the page contents if secure-delete mode is
** enabled. If this 'overflow' page happens to be a page that the
** caller is iterating through or using in some other way, this
** can be problematic.
*/
rc = _sqlite3CorruptError(tls, int32(77073))
} else {
rc = _freePage2(tls, pBt, *(*uintptr)(unsafe.Pointer(bp + 8)), ovflPgno)
}
if *(*uintptr)(unsafe.Pointer(bp + 8)) != 0 {
_sqlite3PagerUnref(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FpDbPage)
}
if rc != 0 {
return rc
}
ovflPgno = *(*TPgno)(unsafe.Pointer(bp))
}
return SQLITE_OK
}
/* Call xParseCell to compute the size of a cell. If the cell contains
** overflow, then invoke cellClearOverflow to clear out that overflow.
** Store the result code (SQLITE_OK or some error code) in rc.
**
** Implemented as macro to force inlining for performance.
*/
// C documentation
//
// /*
// ** Create the byte sequence used to represent a cell on page pPage
// ** and write that byte sequence into pCell[]. Overflow pages are
// ** allocated and filled in as necessary. The calling procedure
// ** is responsible for making sure sufficient space has been allocated
// ** for pCell[].
// **
// ** Note that pCell does not necessary need to point to the pPage->aData
// ** area. pCell might point to some temporary storage. The cell will
// ** be constructed in this temporary area then copied into pPage->aData
// ** later.
// */
func _fillInCell(tls *libc.TLS, pPage uintptr, pCell uintptr, pX uintptr, pnSize uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var eType Tu8
var mn, n, nHeader, nPayload, nSrc, spaceLeft, v1, v2, v3, v4 int32
var pBt, pPayload, pPrior, pSrc, pToRelease uintptr
var pgnoPtrmap TPgno
var _ /* pOvfl at bp+8 */ uintptr
var _ /* pgnoOvfl at bp+4 */ TPgno
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = eType, mn, n, nHeader, nPayload, nSrc, pBt, pPayload, pPrior, pSrc, pToRelease, pgnoPtrmap, spaceLeft, v1, v2, v3, v4
/* pPage is not necessarily writeable since pCell might be auxiliary
** buffer space that is separate from the pPage buffer area */
/* Fill in the header. */
nHeader = int32((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize)
if (*TMemPage)(unsafe.Pointer(pPage)).FintKey != 0 {
nPayload = (*TBtreePayload)(unsafe.Pointer(pX)).FnData + (*TBtreePayload)(unsafe.Pointer(pX)).FnZero
pSrc = (*TBtreePayload)(unsafe.Pointer(pX)).FpData
nSrc = (*TBtreePayload)(unsafe.Pointer(pX)).FnData
/* fillInCell() only called for leaves */
if uint32(nPayload) < libc.Uint32FromInt32(0x80) {
*(*uint8)(unsafe.Pointer(pCell + uintptr(nHeader))) = uint8(nPayload)
v1 = libc.Int32FromInt32(1)
} else {
v1 = _sqlite3PutVarint(tls, pCell+uintptr(nHeader), uint64(nPayload))
}
nHeader += int32(uint8(v1))
nHeader += _sqlite3PutVarint(tls, pCell+uintptr(nHeader), *(*Tu64)(unsafe.Pointer(pX + 8)))
} else {
v2 = int32((*TBtreePayload)(unsafe.Pointer(pX)).FnKey)
nPayload = v2
nSrc = v2
pSrc = (*TBtreePayload)(unsafe.Pointer(pX)).FpKey
if uint32(nPayload) < libc.Uint32FromInt32(0x80) {
*(*uint8)(unsafe.Pointer(pCell + uintptr(nHeader))) = uint8(nPayload)
v3 = libc.Int32FromInt32(1)
} else {
v3 = _sqlite3PutVarint(tls, pCell+uintptr(nHeader), uint64(nPayload))
}
nHeader += int32(uint8(v3))
}
/* Fill in the payload */
pPayload = pCell + uintptr(nHeader)
if nPayload <= int32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
/* This is the common case where everything fits on the btree page
** and no overflow pages are required. */
n = nHeader + nPayload
if n < int32(4) {
n = int32(4)
*(*uint8)(unsafe.Pointer(pPayload + uintptr(nPayload))) = uint8(0)
}
*(*int32)(unsafe.Pointer(pnSize)) = n
libc.Xmemcpy(tls, pPayload, pSrc, uint64(nSrc))
libc.Xmemset(tls, pPayload+uintptr(nSrc), 0, uint64(nPayload-nSrc))
return SQLITE_OK
}
/* If we reach this point, it means that some of the content will need
** to spill onto overflow pages.
*/
mn = int32((*TMemPage)(unsafe.Pointer(pPage)).FminLocal)
n = int32(uint32(mn) + uint32(nPayload-mn)%((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize-uint32(4)))
if n > int32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
n = mn
}
spaceLeft = n
*(*int32)(unsafe.Pointer(pnSize)) = n + nHeader + int32(4)
pPrior = pCell + uintptr(nHeader+n)
pToRelease = uintptr(0)
*(*TPgno)(unsafe.Pointer(bp + 4)) = uint32(0)
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt
/* At this point variables should be set as follows:
**
** nPayload Total payload size in bytes
** pPayload Begin writing payload here
** spaceLeft Space available at pPayload. If nPayload>spaceLeft,
** that means content must spill into overflow pages.
** *pnSize Size of the local cell (not counting overflow pages)
** pPrior Where to write the pgno of the first overflow page
**
** Use a call to btreeParseCellPtr() to verify that the values above
** were computed correctly.
*/
/* Write the payload into the local Cell and any extra into overflow pages */
for int32(1) != 0 {
n = nPayload
if n > spaceLeft {
n = spaceLeft
}
/* If pToRelease is not zero than pPayload points into the data area
** of pToRelease. Make sure pToRelease is still writeable. */
/* If pPayload is part of the data area of pPage, then make sure pPage
** is still writeable */
if nSrc >= n {
libc.Xmemcpy(tls, pPayload, pSrc, uint64(n))
} else {
if nSrc > 0 {
n = nSrc
libc.Xmemcpy(tls, pPayload, pSrc, uint64(n))
} else {
libc.Xmemset(tls, pPayload, 0, uint64(n))
}
}
nPayload -= n
if nPayload <= 0 {
break
}
pPayload += uintptr(n)
pSrc += uintptr(n)
nSrc -= n
spaceLeft -= n
if spaceLeft == 0 {
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
pgnoPtrmap = *(*TPgno)(unsafe.Pointer(bp + 4)) /* Overflow page pointer-map entry page */
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
for cond := true; cond; cond = _ptrmapPageno(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 4))) == *(*TPgno)(unsafe.Pointer(bp + 4)) || *(*TPgno)(unsafe.Pointer(bp + 4)) == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
*(*TPgno)(unsafe.Pointer(bp + 4))++
}
}
*(*int32)(unsafe.Pointer(bp)) = _allocateBtreePage(tls, pBt, bp+8, bp+4, *(*TPgno)(unsafe.Pointer(bp + 4)), uint8(0))
/* If the database supports auto-vacuum, and the second or subsequent
** overflow page is being allocated, add an entry to the pointer-map
** for that page now.
**
** If this is the first overflow page, then write a partial entry
** to the pointer-map. If we write nothing to this pointer-map slot,
** then the optimistic overflow chain processing in clearCell()
** may misinterpret the uninitialized values and delete the
** wrong pages from the database.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 && *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
if pgnoPtrmap != 0 {
v4 = int32(PTRMAP_OVERFLOW2)
} else {
v4 = int32(PTRMAP_OVERFLOW1)
}
eType = uint8(v4)
_ptrmapPut(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 4)), eType, pgnoPtrmap, bp)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
}
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
_releasePage(tls, pToRelease)
return *(*int32)(unsafe.Pointer(bp))
}
/* If pToRelease is not zero than pPrior points into the data area
** of pToRelease. Make sure pToRelease is still writeable. */
/* If pPrior is part of the data area of pPage, then make sure pPage
** is still writeable */
_sqlite3Put4byte(tls, pPrior, *(*TPgno)(unsafe.Pointer(bp + 4)))
_releasePage(tls, pToRelease)
pToRelease = *(*uintptr)(unsafe.Pointer(bp + 8))
pPrior = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData
_sqlite3Put4byte(tls, pPrior, uint32(0))
pPayload = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData + 4
spaceLeft = int32((*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(4))
}
}
_releasePage(tls, pToRelease)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Remove the i-th cell from pPage. This routine effects pPage only.
// ** The cell content is not freed or deallocated. It is assumed that
// ** the cell content has been copied someplace else. This routine just
// ** removes the reference to the cell from pPage.
// **
// ** "sz" must be the number of bytes in the cell.
// */
func _dropCell(tls *libc.TLS, pPage uintptr, idx int32, sz int32, pRC uintptr) {
var data, ptr uintptr
var hdr, rc int32
var pc Tu32
_, _, _, _, _ = data, hdr, pc, ptr, rc /* Beginning of the header. 0 most pages. 100 page 1 */
if *(*int32)(unsafe.Pointer(pRC)) != 0 {
return
}
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
ptr = (*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*idx)
pc = uint32(int32(*(*Tu8)(unsafe.Pointer(ptr)))< (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize {
*(*int32)(unsafe.Pointer(pRC)) = _sqlite3CorruptError(tls, int32(77329))
return
}
rc = _freeSpace(tls, pPage, uint16(pc), uint16(sz))
if rc != 0 {
*(*int32)(unsafe.Pointer(pRC)) = rc
return
}
(*TMemPage)(unsafe.Pointer(pPage)).FnCell--
if int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) == 0 {
libc.Xmemset(tls, data+uintptr(hdr+int32(1)), 0, uint64(4))
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(7)))) = uint8(0)
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(5)))) = uint8((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(5)) + 1)) = uint8((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize)
(*TMemPage)(unsafe.Pointer(pPage)).FnFree = int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FusableSize - uint32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset) - uint32((*TMemPage)(unsafe.Pointer(pPage)).FchildPtrSize) - uint32(8))
} else {
libc.Xmemmove(tls, ptr, ptr+uintptr(2), uint64(int32(2)*(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)-idx)))
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(3)))) = uint8(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(3)) + 1)) = uint8((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
*(*int32)(unsafe.Pointer(pPage + 20)) += int32(2)
}
}
// C documentation
//
// /*
// ** Insert a new cell on pPage at cell index "i". pCell points to the
// ** content of the cell.
// **
// ** If the cell content will fit on the page, then put it there. If it
// ** will not fit, then make a copy of the cell content into pTemp if
// ** pTemp is not null. Regardless of pTemp, allocate a new entry
// ** in pPage->apOvfl[] and make it point to the cell content (either
// ** in pTemp or the original pCell) and also record its index.
// ** Allocating a new entry in pPage->aCell[] implies that
// ** pPage->nOverflow is incremented.
// **
// ** The insertCellFast() routine below works exactly the same as
// ** insertCell() except that it lacks the pTemp and iChild parameters
// ** which are assumed zero. Other than that, the two routines are the
// ** same.
// **
// ** Fixes or enhancements to this routine should be reflected in
// ** insertCellFast()!
// */
func _insertCell(tls *libc.TLS, pPage uintptr, i int32, pCell uintptr, sz int32, pTemp uintptr, iChild TPgno) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var data, pIns, v2, v4 uintptr
var j, rc int32
var v1, v3 Tu8
var _ /* idx at bp+0 */ int32
var _ /* rc2 at bp+4 */ int32
_, _, _, _, _, _, _, _ = data, j, pIns, rc, v1, v2, v3, v4
*(*int32)(unsafe.Pointer(bp)) = 0 /* The point in pPage->aCellIdx[] where no cell inserted */
if (*TMemPage)(unsafe.Pointer(pPage)).FnOverflow != 0 || sz+int32(2) > (*TMemPage)(unsafe.Pointer(pPage)).FnFree {
if pTemp != 0 {
libc.Xmemcpy(tls, pTemp, pCell, uint64(sz))
pCell = pTemp
}
_sqlite3Put4byte(tls, pCell, iChild)
v2 = pPage + 12
v1 = *(*Tu8)(unsafe.Pointer(v2))
*(*Tu8)(unsafe.Pointer(v2))++
j = int32(v1)
/* Comparison against ArraySize-1 since we hold back one extra slot
** as a contingency. In other words, never need more than 3 overflow
** slots but 4 are allocated, just to be safe. */
*(*uintptr)(unsafe.Pointer(pPage + 40 + uintptr(j)*8)) = pCell
*(*Tu16)(unsafe.Pointer(pPage + 28 + uintptr(j)*2)) = uint16(i)
/* When multiple overflows occur, they are always sequential and in
** sorted order. This invariants arise because multiple overflows can
** only occur when inserting divider cells into the parent page during
** balancing, and the dividers are adjacent and sorted.
*/
/* Overflows in sorted order */
/* Overflows are sequential */
} else {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if rc != SQLITE_OK {
return rc
}
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
rc = _allocateSpace(tls, pPage, sz, bp)
if rc != 0 {
return rc
}
/* The allocateSpace() routine guarantees the following properties
** if it returns successfully */
*(*int32)(unsafe.Pointer(pPage + 20)) -= int32(uint16(libc.Int32FromInt32(2) + sz))
/* In a corrupt database where an entry in the cell index section of
** a btree page has a value of 3 or less, the pCell value might point
** as many as 4 bytes in front of the start of the aData buffer for
** the source page. Make sure this does not cause problems by not
** reading the first 4 bytes */
libc.Xmemcpy(tls, data+uintptr(*(*int32)(unsafe.Pointer(bp))+int32(4)), pCell+uintptr(4), uint64(sz-int32(4)))
_sqlite3Put4byte(tls, data+uintptr(*(*int32)(unsafe.Pointer(bp))), iChild)
pIns = (*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(i*int32(2))
libc.Xmemmove(tls, pIns+uintptr(2), pIns, uint64(int32(2)*(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)-i)))
*(*Tu8)(unsafe.Pointer(pIns)) = uint8(*(*int32)(unsafe.Pointer(bp)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pIns + 1)) = uint8(*(*int32)(unsafe.Pointer(bp)))
(*TMemPage)(unsafe.Pointer(pPage)).FnCell++
/* increment the cell count */
v4 = data + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(4))
*(*Tu8)(unsafe.Pointer(v4))++
v3 = *(*Tu8)(unsafe.Pointer(v4))
if int32(v3) == 0 {
*(*Tu8)(unsafe.Pointer(data + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(3))))++
}
if (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FautoVacuum != 0 {
*(*int32)(unsafe.Pointer(bp + 4)) = SQLITE_OK
/* The cell may contain a pointer to an overflow page. If so, write
** the entry for the overflow page into the pointer map.
*/
_ptrmapPutOvflPtr(tls, pPage, pPage, pCell, bp+4)
if *(*int32)(unsafe.Pointer(bp + 4)) != 0 {
return *(*int32)(unsafe.Pointer(bp + 4))
}
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** This variant of insertCell() assumes that the pTemp and iChild
// ** parameters are both zero. Use this variant in sqlite3BtreeInsert()
// ** for performance improvement, and also so that this variant is only
// ** called from that one place, and is thus inlined, and thus runs must
// ** faster.
// **
// ** Fixes or enhancements to this routine should be reflected into
// ** the insertCell() routine.
// */
func _insertCellFast(tls *libc.TLS, pPage uintptr, i int32, pCell uintptr, sz int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var data, pIns, v2, v4 uintptr
var j, rc int32
var v1, v3 Tu8
var _ /* idx at bp+0 */ int32
var _ /* rc2 at bp+4 */ int32
_, _, _, _, _, _, _, _ = data, j, pIns, rc, v1, v2, v3, v4
*(*int32)(unsafe.Pointer(bp)) = 0 /* The point in pPage->aCellIdx[] where no cell inserted */
if sz+int32(2) > (*TMemPage)(unsafe.Pointer(pPage)).FnFree {
v2 = pPage + 12
v1 = *(*Tu8)(unsafe.Pointer(v2))
*(*Tu8)(unsafe.Pointer(v2))++
j = int32(v1)
/* Comparison against ArraySize-1 since we hold back one extra slot
** as a contingency. In other words, never need more than 3 overflow
** slots but 4 are allocated, just to be safe. */
*(*uintptr)(unsafe.Pointer(pPage + 40 + uintptr(j)*8)) = pCell
*(*Tu16)(unsafe.Pointer(pPage + 28 + uintptr(j)*2)) = uint16(i)
/* When multiple overflows occur, they are always sequential and in
** sorted order. This invariants arise because multiple overflows can
** only occur when inserting divider cells into the parent page during
** balancing, and the dividers are adjacent and sorted.
*/
/* Overflows in sorted order */
/* Overflows are sequential */
} else {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if rc != SQLITE_OK {
return rc
}
data = (*TMemPage)(unsafe.Pointer(pPage)).FaData
rc = _allocateSpace(tls, pPage, sz, bp)
if rc != 0 {
return rc
}
/* The allocateSpace() routine guarantees the following properties
** if it returns successfully */
*(*int32)(unsafe.Pointer(pPage + 20)) -= int32(uint16(libc.Int32FromInt32(2) + sz))
libc.Xmemcpy(tls, data+uintptr(*(*int32)(unsafe.Pointer(bp))), pCell, uint64(sz))
pIns = (*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(i*int32(2))
libc.Xmemmove(tls, pIns+uintptr(2), pIns, uint64(int32(2)*(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)-i)))
*(*Tu8)(unsafe.Pointer(pIns)) = uint8(*(*int32)(unsafe.Pointer(bp)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pIns + 1)) = uint8(*(*int32)(unsafe.Pointer(bp)))
(*TMemPage)(unsafe.Pointer(pPage)).FnCell++
/* increment the cell count */
v4 = data + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(4))
*(*Tu8)(unsafe.Pointer(v4))++
v3 = *(*Tu8)(unsafe.Pointer(v4))
if int32(v3) == 0 {
*(*Tu8)(unsafe.Pointer(data + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(3))))++
}
if (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FpBt)).FautoVacuum != 0 {
*(*int32)(unsafe.Pointer(bp + 4)) = SQLITE_OK
/* The cell may contain a pointer to an overflow page. If so, write
** the entry for the overflow page into the pointer map.
*/
_ptrmapPutOvflPtr(tls, pPage, pPage, pCell, bp+4)
if *(*int32)(unsafe.Pointer(bp + 4)) != 0 {
return *(*int32)(unsafe.Pointer(bp + 4))
}
}
}
return SQLITE_OK
}
/*
** The following parameters determine how many adjacent pages get involved
** in a balancing operation. NN is the number of neighbors on either side
** of the page that participate in the balancing operation. NB is the
** total number of pages that participate, including the target page and
** NN neighbors on either side.
**
** The minimum value of NN is 1 (of course). Increasing NN above 1
** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance
** in exchange for a larger degradation in INSERT and UPDATE performance.
** The value of NN appears to give the best results overall.
**
** (Later:) The description above makes it seem as if these values are
** tunable - as if you could change them and recompile and it would all work.
** But that is unlikely. NB has been 3 since the inception of SQLite and
** we have never tested any other value.
*/
// C documentation
//
// /*
// ** A CellArray object contains a cache of pointers and sizes for a
// ** consecutive sequence of cells that might be held on multiple pages.
// **
// ** The cells in this array are the divider cell or cells from the pParent
// ** page plus up to three child pages. There are a total of nCell cells.
// **
// ** pRef is a pointer to one of the pages that contributes cells. This is
// ** used to access information such as MemPage.intKey and MemPage.pBt->pageSize
// ** which should be common to all pages that contribute cells to this array.
// **
// ** apCell[] and szCell[] hold, respectively, pointers to the start of each
// ** cell and the size of each cell. Some of the apCell[] pointers might refer
// ** to overflow cells. In other words, some apCel[] pointers might not point
// ** to content area of the pages.
// **
// ** A szCell[] of zero means the size of that cell has not yet been computed.
// **
// ** The cells come from as many as four different pages:
// **
// ** -----------
// ** | Parent |
// ** -----------
// ** / | ** / | ** --------- --------- ---------
// ** |Child-1| |Child-2| |Child-3|
// ** --------- --------- ---------
// **
// ** The order of cells is in the array is for an index btree is:
// **
// ** 1. All cells from Child-1 in order
// ** 2. The first divider cell from Parent
// ** 3. All cells from Child-2 in order
// ** 4. The second divider cell from Parent
// ** 5. All cells from Child-3 in order
// **
// ** For a table-btree (with rowids) the items 2 and 4 are empty because
// ** content exists only in leaves and there are no divider cells.
// **
// ** For an index btree, the apEnd[] array holds pointer to the end of page
// ** for Child-1, the Parent, Child-2, the Parent (again), and Child-3,
// ** respectively. The ixNx[] array holds the number of cells contained in
// ** each of these 5 stages, and all stages to the left. Hence:
// **
// ** ixNx[0] = Number of cells in Child-1.
// ** ixNx[1] = Number of cells in Child-1 plus 1 for first divider.
// ** ixNx[2] = Number of cells in Child-1 and Child-2 + 1 for 1st divider.
// ** ixNx[3] = Number of cells in Child-1 and Child-2 + both divider cells
// ** ixNx[4] = Total number of cells.
// **
// ** For a table-btree, the concept is similar, except only apEnd[0]..apEnd[2]
// ** are used and they point to the leaf pages only, and the ixNx value are:
// **
// ** ixNx[0] = Number of cells in Child-1.
// ** ixNx[1] = Number of cells in Child-1 and Child-2.
// ** ixNx[2] = Total number of cells.
// **
// ** Sometimes when deleting, a child page can have zero cells. In those
// ** cases, ixNx[] entries with higher indexes, and the corresponding apEnd[]
// ** entries, shift down. The end result is that each ixNx[] entry should
// ** be larger than the previous
// */
type TCellArray = struct {
FnCell int32
FpRef uintptr
FapCell uintptr
FszCell uintptr
FapEnd [6]uintptr
FixNx [6]int32
}
type CellArray = TCellArray
type TCellArray1 = struct {
FnCell int32
FpRef uintptr
FapCell uintptr
FszCell uintptr
FapEnd [6]uintptr
FixNx [6]int32
}
type CellArray1 = TCellArray1
// C documentation
//
// /*
// ** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been
// ** computed.
// */
func _populateCellCache(tls *libc.TLS, p uintptr, idx int32, N int32) {
var pRef, szCell uintptr
_, _ = pRef, szCell
pRef = (*TCellArray)(unsafe.Pointer(p)).FpRef
szCell = (*TCellArray)(unsafe.Pointer(p)).FszCell
for N > 0 {
if int32(*(*Tu16)(unsafe.Pointer(szCell + uintptr(idx)*2))) == 0 {
*(*Tu16)(unsafe.Pointer(szCell + uintptr(idx)*2)) = (*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pRef)).FxCellSize})))(tls, pRef, *(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FapCell + uintptr(idx)*8)))
} else {
}
idx++
N--
}
}
// C documentation
//
// /*
// ** Return the size of the Nth element of the cell array
// */
func _computeCellSize(tls *libc.TLS, p uintptr, N int32) (r Tu16) {
*(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FszCell + uintptr(N)*2)) = (*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FpRef)).FxCellSize})))(tls, (*TCellArray)(unsafe.Pointer(p)).FpRef, *(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FapCell + uintptr(N)*8)))
return *(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FszCell + uintptr(N)*2))
}
func _cachedCellSize(tls *libc.TLS, p uintptr, N int32) (r Tu16) {
if *(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FszCell + uintptr(N)*2)) != 0 {
return *(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(p)).FszCell + uintptr(N)*2))
}
return _computeCellSize(tls, p, N)
}
// C documentation
//
// /*
// ** Array apCell[] contains pointers to nCell b-tree page cells. The
// ** szCell[] array contains the size in bytes of each cell. This function
// ** replaces the current contents of page pPg with the contents of the cell
// ** array.
// **
// ** Some of the cells in apCell[] may currently be stored in pPg. This
// ** function works around problems caused by this by making a copy of any
// ** such cells before overwriting the page data.
// **
// ** The MemPage.nFree field is invalidated by this function. It is the
// ** responsibility of the caller to set it correctly.
// */
func _rebuildPage(tls *libc.TLS, pCArray uintptr, iFirst int32, nCell int32, pPg uintptr) (r int32) {
var aData, pCell, pCellptr, pData, pEnd, pSrcEnd, pTmp uintptr
var hdr, i, iEnd, k, usableSize int32
var j Tu32
var sz Tu16
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = aData, hdr, i, iEnd, j, k, pCell, pCellptr, pData, pEnd, pSrcEnd, pTmp, sz, usableSize
hdr = int32((*TMemPage)(unsafe.Pointer(pPg)).FhdrOffset) /* Offset of header on pPg */
aData = (*TMemPage)(unsafe.Pointer(pPg)).FaData /* Pointer to data for pPg */
usableSize = int32((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPg)).FpBt)).FusableSize)
pEnd = aData + uintptr(usableSize)
i = iFirst /* Start of cell content area */
iEnd = i + nCell /* Loop terminator */
pCellptr = (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx
pTmp = _sqlite3PagerTempSpace(tls, (*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPg)).FpBt)).FpPager) /* Current pCArray->apEnd[k] value */
j = uint32(int32(*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)))))< uint32(usableSize) {
j = uint32(0)
}
libc.Xmemcpy(tls, pTmp+uintptr(j), aData+uintptr(j), uint64(uint32(usableSize)-j))
k = 0
for {
if !(k < libc.Int32FromInt32(NB)*libc.Int32FromInt32(2) && *(*int32)(unsafe.Pointer(pCArray + 80 + uintptr(k)*4)) <= i) {
break
}
goto _1
_1:
;
k++
}
pSrcEnd = *(*uintptr)(unsafe.Pointer(pCArray + 32 + uintptr(k)*8))
pData = pEnd
for int32(1) != 0 {
pCell = *(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))
sz = *(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FszCell + uintptr(i)*2))
if uint64(pCell) >= uint64(aData+uintptr(j)) && uint64(pCell) < uint64(pEnd) {
if uint64(pCell+uintptr(sz)) > uint64(pEnd) {
return _sqlite3CorruptError(tls, int32(77718))
}
pCell = pTmp + uintptr(int64(pCell)-int64(aData))
} else {
if uint64(pCell+uintptr(sz)) > uint64(pSrcEnd) && uint64(pCell) < uint64(pSrcEnd) {
return _sqlite3CorruptError(tls, int32(77723))
}
}
pData -= uintptr(sz)
*(*Tu8)(unsafe.Pointer(pCellptr)) = uint8((int64(pData) - int64(aData)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pCellptr + 1)) = uint8(int64(pData) - int64(aData))
pCellptr += uintptr(2)
if pData < pCellptr {
return _sqlite3CorruptError(tls, int32(77729))
}
libc.Xmemmove(tls, pData, pCell, uint64(sz))
i++
if i >= iEnd {
break
}
if *(*int32)(unsafe.Pointer(pCArray + 80 + uintptr(k)*4)) <= i {
k++
pSrcEnd = *(*uintptr)(unsafe.Pointer(pCArray + 32 + uintptr(k)*8))
}
}
/* The pPg->nFree field is now set incorrectly. The caller will fix it. */
(*TMemPage)(unsafe.Pointer(pPg)).FnCell = uint16(nCell)
(*TMemPage)(unsafe.Pointer(pPg)).FnOverflow = uint8(0)
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(1)))) = uint8(libc.Int32FromInt32(0) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(1)) + 1)) = uint8(libc.Int32FromInt32(0))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(3)))) = uint8(int32((*TMemPage)(unsafe.Pointer(pPg)).FnCell) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(3)) + 1)) = uint8((*TMemPage)(unsafe.Pointer(pPg)).FnCell)
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)))) = uint8((int64(pData) - int64(aData)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)) + 1)) = uint8(int64(pData) - int64(aData))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(7)))) = uint8(0x00)
return SQLITE_OK
}
// C documentation
//
// /*
// ** The pCArray objects contains pointers to b-tree cells and the cell sizes.
// ** This function attempts to add the cells stored in the array to page pPg.
// ** If it cannot (because the page needs to be defragmented before the cells
// ** will fit), non-zero is returned. Otherwise, if the cells are added
// ** successfully, zero is returned.
// **
// ** Argument pCellptr points to the first entry in the cell-pointer array
// ** (part of page pPg) to populate. After cell apCell[0] is written to the
// ** page body, a 16-bit offset is written to pCellptr. And so on, for each
// ** cell in the array. It is the responsibility of the caller to ensure
// ** that it is safe to overwrite this part of the cell-pointer array.
// **
// ** When this function is called, *ppData points to the start of the
// ** content area on page pPg. If the size of the content area is extended,
// ** *ppData is updated to point to the new start of the content area
// ** before returning.
// **
// ** Finally, argument pBegin points to the byte immediately following the
// ** end of the space required by this page for the cell-pointer area (for
// ** all cells - not just those inserted by the current call). If the content
// ** area must be extended to before this point in order to accommodate all
// ** cells in apCell[], then the cells do not fit and non-zero is returned.
// */
func _pageInsertArray(tls *libc.TLS, pPg uintptr, pBegin uintptr, ppData uintptr, pCellptr uintptr, iFirst int32, nCell int32, pCArray uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aData, pData, pEnd, pSlot, v2 uintptr
var i, iEnd, k, sz int32
var v3 bool
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _ = aData, i, iEnd, k, pData, pEnd, pSlot, sz, v2, v3
i = iFirst /* Loop counter - cell index to insert */
aData = (*TMemPage)(unsafe.Pointer(pPg)).FaData /* Complete page */
pData = *(*uintptr)(unsafe.Pointer(ppData)) /* Content area. A subset of aData[] */
iEnd = iFirst + nCell /* Maximum extent of cell data */
/* Never called on page 1 */
if iEnd <= iFirst {
return 0
}
k = 0
for {
if !(k < libc.Int32FromInt32(NB)*libc.Int32FromInt32(2) && *(*int32)(unsafe.Pointer(pCArray + 80 + uintptr(k)*4)) <= i) {
break
}
goto _1
_1:
;
k++
}
pEnd = *(*uintptr)(unsafe.Pointer(pCArray + 32 + uintptr(k)*8))
for int32(1) != 0 {
sz = int32(*(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FszCell + uintptr(i)*2)))
if v3 = int32(*(*Tu8)(unsafe.Pointer(aData + 1))) == 0 && int32(*(*Tu8)(unsafe.Pointer(aData + 2))) == 0; !v3 {
v2 = _pageFindSlot(tls, pPg, sz, bp)
pSlot = v2
}
if v3 || v2 == uintptr(0) {
if int64(pData)-int64(pBegin) < int64(sz) {
return int32(1)
}
pData -= uintptr(sz)
pSlot = pData
}
/* pSlot and pCArray->apCell[i] will never overlap on a well-formed
** database. But they might for a corrupt database. Hence use memmove()
** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */
if uint64(*(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))+uintptr(sz)) > uint64(pEnd) && uint64(*(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))) < uint64(pEnd) {
_sqlite3CorruptError(tls, int32(77814))
return int32(1)
}
libc.Xmemmove(tls, pSlot, *(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8)), uint64(sz))
*(*Tu8)(unsafe.Pointer(pCellptr)) = uint8((int64(pSlot) - int64(aData)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(pCellptr + 1)) = uint8(int64(pSlot) - int64(aData))
pCellptr += uintptr(2)
i++
if i >= iEnd {
break
}
if *(*int32)(unsafe.Pointer(pCArray + 80 + uintptr(k)*4)) <= i {
k++
pEnd = *(*uintptr)(unsafe.Pointer(pCArray + 32 + uintptr(k)*8))
}
}
*(*uintptr)(unsafe.Pointer(ppData)) = pData
return 0
}
// C documentation
//
// /*
// ** The pCArray object contains pointers to b-tree cells and their sizes.
// **
// ** This function adds the space associated with each cell in the array
// ** that is currently stored within the body of pPg to the pPg free-list.
// ** The cell-pointers and other fields of the page are not updated.
// **
// ** This function returns the total number of cells added to the free-list.
// */
func _pageFreeArray(tls *libc.TLS, pPg uintptr, iFirst int32, nCell int32, pCArray uintptr) (r int32) {
var aAfter, aOfst [10]int32
var aData, pCell, pEnd, pStart uintptr
var i, iAfter, iEnd, iOfst, j, nFree, nRet, sz int32
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = aAfter, aData, aOfst, i, iAfter, iEnd, iOfst, j, nFree, nRet, pCell, pEnd, pStart, sz
aData = (*TMemPage)(unsafe.Pointer(pPg)).FaData
pEnd = aData + uintptr((*TBtShared)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPg)).FpBt)).FusableSize)
pStart = aData + uintptr(int32((*TMemPage)(unsafe.Pointer(pPg)).FhdrOffset)+int32(8)+int32((*TMemPage)(unsafe.Pointer(pPg)).FchildPtrSize))
nRet = 0
iEnd = iFirst + nCell
nFree = 0
i = iFirst
for {
if !(i < iEnd) {
break
}
pCell = *(*uintptr)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FapCell + uintptr(i)*8))
if uint64(pCell) >= uint64(pStart) && uint64(pCell) < uint64(pEnd) {
/* No need to use cachedCellSize() here. The sizes of all cells that
** are to be freed have already been computing while deciding which
** cells need freeing */
sz = int32(*(*Tu16)(unsafe.Pointer((*TCellArray)(unsafe.Pointer(pCArray)).FszCell + uintptr(i)*2)))
iOfst = int32(uint16(int64(pCell) - int64(aData)))
iAfter = iOfst + sz
j = 0
for {
if !(j < nFree) {
break
}
if aOfst[j] == iAfter {
aOfst[j] = iOfst
break
} else {
if aAfter[j] == iOfst {
aAfter[j] = iAfter
break
}
}
goto _2
_2:
;
j++
}
if j >= nFree {
if nFree >= int32(libc.Uint64FromInt64(40)/libc.Uint64FromInt64(4)) {
j = 0
for {
if !(j < nFree) {
break
}
_freeSpace(tls, pPg, uint16(aOfst[j]), uint16(aAfter[j]-aOfst[j]))
goto _3
_3:
;
j++
}
nFree = 0
}
aOfst[nFree] = iOfst
aAfter[nFree] = iAfter
if aData+uintptr(iAfter) > pEnd {
return 0
}
nFree++
}
nRet++
}
goto _1
_1:
;
i++
}
j = 0
for {
if !(j < nFree) {
break
}
_freeSpace(tls, pPg, uint16(aOfst[j]), uint16(aAfter[j]-aOfst[j]))
goto _4
_4:
;
j++
}
return nRet
}
// C documentation
//
// /*
// ** pCArray contains pointers to and sizes of all cells in the page being
// ** balanced. The current page, pPg, has pPg->nCell cells starting with
// ** pCArray->apCell[iOld]. After balancing, this page should hold nNew cells
// ** starting at apCell[iNew].
// **
// ** This routine makes the necessary adjustments to pPg so that it contains
// ** the correct cells after being balanced.
// **
// ** The pPg->nFree field is invalid when this function returns. It is the
// ** responsibility of the caller to set it correctly.
// */
func _editPage(tls *libc.TLS, pPg uintptr, iOld int32, iNew int32, nNew int32, pCArray uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aData, pBegin, pCellptr uintptr
var hdr, i, iCell, iNewEnd, iOldEnd, nAdd, nCell, nShift, nTail, v1 int32
var _ /* pData at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _ = aData, hdr, i, iCell, iNewEnd, iOldEnd, nAdd, nCell, nShift, nTail, pBegin, pCellptr, v1
aData = (*TMemPage)(unsafe.Pointer(pPg)).FaData
hdr = int32((*TMemPage)(unsafe.Pointer(pPg)).FhdrOffset)
pBegin = (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx + uintptr(nNew*int32(2))
nCell = int32((*TMemPage)(unsafe.Pointer(pPg)).FnCell)
iOldEnd = iOld + int32((*TMemPage)(unsafe.Pointer(pPg)).FnCell) + int32((*TMemPage)(unsafe.Pointer(pPg)).FnOverflow)
iNewEnd = iNew + nNew
/* Remove cells from the start and end of the page */
if iOld < iNew {
nShift = _pageFreeArray(tls, pPg, iOld, iNew-iOld, pCArray)
if nShift > nCell {
return _sqlite3CorruptError(tls, int32(77936))
}
libc.Xmemmove(tls, (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx, (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx+uintptr(nShift*int32(2)), uint64(nCell*int32(2)))
nCell -= nShift
}
if iNewEnd < iOldEnd {
nTail = _pageFreeArray(tls, pPg, iNewEnd, iOldEnd-iNewEnd, pCArray)
nCell -= nTail
}
*(*uintptr)(unsafe.Pointer(bp)) = aData + uintptr(int32(*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)))))< (*TMemPage)(unsafe.Pointer(pPg)).FaDataEnd {
goto editpage_fail
}
/* Add cells to the start of the page */
if iNew < iOld {
if nNew < iOld-iNew {
v1 = nNew
} else {
v1 = iOld - iNew
}
nAdd = v1
pCellptr = (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx
libc.Xmemmove(tls, pCellptr+uintptr(nAdd*int32(2)), pCellptr, uint64(nCell*int32(2)))
if _pageInsertArray(tls, pPg, pBegin, bp, pCellptr, iNew, nAdd, pCArray) != 0 {
goto editpage_fail
}
nCell += nAdd
}
/* Add any overflow cells */
i = 0
for {
if !(i < int32((*TMemPage)(unsafe.Pointer(pPg)).FnOverflow)) {
break
}
iCell = iOld + int32(*(*Tu16)(unsafe.Pointer(pPg + 28 + uintptr(i)*2))) - iNew
if iCell >= 0 && iCell < nNew {
pCellptr = (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx + uintptr(iCell*int32(2))
if nCell > iCell {
libc.Xmemmove(tls, pCellptr+2, pCellptr, uint64((nCell-iCell)*int32(2)))
}
nCell++
_cachedCellSize(tls, pCArray, iCell+iNew)
if _pageInsertArray(tls, pPg, pBegin, bp, pCellptr, iCell+iNew, int32(1), pCArray) != 0 {
goto editpage_fail
}
}
goto _2
_2:
;
i++
}
/* Append cells to the end of the page */
pCellptr = (*TMemPage)(unsafe.Pointer(pPg)).FaCellIdx + uintptr(nCell*int32(2))
if _pageInsertArray(tls, pPg, pBegin, bp, pCellptr, iNew+nCell, nNew-nCell, pCArray) != 0 {
goto editpage_fail
}
(*TMemPage)(unsafe.Pointer(pPg)).FnCell = uint16(nNew)
(*TMemPage)(unsafe.Pointer(pPg)).FnOverflow = uint8(0)
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(3)))) = uint8(int32((*TMemPage)(unsafe.Pointer(pPg)).FnCell) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(3)) + 1)) = uint8((*TMemPage)(unsafe.Pointer(pPg)).FnCell)
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)))) = uint8((int64(*(*uintptr)(unsafe.Pointer(bp))) - int64(aData)) >> libc.Int32FromInt32(8))
*(*Tu8)(unsafe.Pointer(aData + uintptr(hdr+int32(5)) + 1)) = uint8(int64(*(*uintptr)(unsafe.Pointer(bp))) - int64(aData))
return SQLITE_OK
goto editpage_fail
editpage_fail:
;
/* Unable to edit this page. Rebuild it from scratch instead. */
if nNew < int32(1) {
return _sqlite3CorruptError(tls, int32(78010))
}
_populateCellCache(tls, pCArray, iNew, nNew)
return _rebuildPage(tls, pCArray, iNew, nNew, pPg)
}
// C documentation
//
// /*
// ** This version of balance() handles the common special case where
// ** a new entry is being inserted on the extreme right-end of the
// ** tree, in other words, when the new entry will become the largest
// ** entry in the tree.
// **
// ** Instead of trying to balance the 3 right-most leaf pages, just add
// ** a new page to the right-hand side and put the one new entry in
// ** that page. This leaves the right side of the tree somewhat
// ** unbalanced. But odds are that we will be inserting new entries
// ** at the end soon afterwards so the nearly empty page will quickly
// ** fill up. On average.
// **
// ** pPage is the leaf page which is the right-most page in the tree.
// ** pParent is its parent. pPage must have a single overflow entry
// ** which is also the right-most entry on the page.
// **
// ** The pSpace buffer is used to store a temporary copy of the divider
// ** cell that will be inserted into pParent. Such a cell consists of a 4
// ** byte page number followed by a variable length integer. In other
// ** words, at most 13 bytes. Hence the pSpace buffer must be at
// ** least 13 bytes in size.
// */
func _balance_quick(tls *libc.TLS, pParent uintptr, pPage uintptr, pSpace uintptr) (r int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var pBt, pOut, pStop, v1, v3, v4 uintptr
var v2 Tu8
var _ /* b at bp+32 */ TCellArray
var _ /* pCell at bp+16 */ uintptr
var _ /* pNew at bp+0 */ uintptr
var _ /* pgnoNew at bp+12 */ TPgno
var _ /* rc at bp+8 */ int32
var _ /* szCell at bp+24 */ Tu16
_, _, _, _, _, _, _ = pBt, pOut, pStop, v1, v2, v3, v4
pBt = (*TMemPage)(unsafe.Pointer(pPage)).FpBt /* Page number of pNew */
if int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) == 0 {
return _sqlite3CorruptError(tls, int32(78050))
} /* dbfuzz001.test */
/* Allocate a new page. This page will become the right-sibling of
** pPage. Make the parent page writable, so that the new divider cell
** may be inserted. If both these operations are successful, proceed.
*/
*(*int32)(unsafe.Pointer(bp + 8)) = _allocateBtreePage(tls, pBt, bp, bp+12, uint32(0), uint8(0))
if *(*int32)(unsafe.Pointer(bp + 8)) == SQLITE_OK {
pOut = pSpace + 4
*(*uintptr)(unsafe.Pointer(bp + 16)) = *(*uintptr)(unsafe.Pointer(pPage + 40))
*(*Tu16)(unsafe.Pointer(bp + 24)) = (*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxCellSize})))(tls, pPage, *(*uintptr)(unsafe.Pointer(bp + 16)))
_zeroPage(tls, *(*uintptr)(unsafe.Pointer(bp)), libc.Int32FromInt32(PTF_INTKEY)|libc.Int32FromInt32(PTF_LEAFDATA)|libc.Int32FromInt32(PTF_LEAF))
(*(*TCellArray)(unsafe.Pointer(bp + 32))).FnCell = int32(1)
(*(*TCellArray)(unsafe.Pointer(bp + 32))).FpRef = pPage
(*(*TCellArray)(unsafe.Pointer(bp + 32))).FapCell = bp + 16
(*(*TCellArray)(unsafe.Pointer(bp + 32))).FszCell = bp + 24
*(*uintptr)(unsafe.Pointer(bp + 32 + 32)) = (*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd
*(*int32)(unsafe.Pointer(bp + 32 + 80)) = int32(2)
*(*int32)(unsafe.Pointer(bp + 8)) = _rebuildPage(tls, bp+32, 0, int32(1), *(*uintptr)(unsafe.Pointer(bp)))
if *(*int32)(unsafe.Pointer(bp + 8)) != 0 {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 8))
}
(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnFree = int32((*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FcellOffset) - uint32(2) - uint32(*(*Tu16)(unsafe.Pointer(bp + 24))))
/* If this is an auto-vacuum database, update the pointer map
** with entries for the new page, and any pointer from the
** cell on the page to an overflow page. If either of these
** operations fails, the return code is set, but the contents
** of the parent page are still manipulated by the code below.
** That is Ok, at this point the parent page is guaranteed to
** be marked as dirty. Returning an error code will cause a
** rollback, undoing any changes made to the parent page.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_ptrmapPut(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 12)), uint8(PTRMAP_BTREE), (*TMemPage)(unsafe.Pointer(pParent)).Fpgno, bp+8)
if int32(*(*Tu16)(unsafe.Pointer(bp + 24))) > int32((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FminLocal) {
_ptrmapPutOvflPtr(tls, *(*uintptr)(unsafe.Pointer(bp)), *(*uintptr)(unsafe.Pointer(bp)), *(*uintptr)(unsafe.Pointer(bp + 16)), bp+8)
}
}
/* Create a divider cell to insert into pParent. The divider cell
** consists of a 4-byte page number (the page number of pPage) and
** a variable length key value (which must be the same value as the
** largest key on pPage).
**
** To find the largest key value on pPage, first find the right-most
** cell on pPage. The first two fields of this cell are the
** record-length (a variable length integer at most 32-bits in size)
** and the key value (a variable length integer, may have any value).
** The first of the while(...) loops below skips over the record-length
** field. The second while(...) loop copies the key value from the
** cell on pPage into the pSpace buffer.
*/
*(*uintptr)(unsafe.Pointer(bp + 16)) = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell)-int32(1))))))<aCell[] */
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK /* Value of pPage->aData[0] */
iSpace1 = 0 /* First unused byte of aSpace1[] */
iOvflSpace = 0 /* Parsed information on cells being balanced */
libc.Xmemset(tls, bp+100, 0, uint64(5))
libc.Xmemset(tls, bp+112, 0, uint64(104))
pBt = (*TMemPage)(unsafe.Pointer(pParent)).FpBt
/* At this point pParent may have at most one overflow cell. And if
** this overflow cell is present, it must be the cell with
** index iParentIdx. This scenario comes about when this function
** is called (indirectly) from sqlite3BtreeDelete().
*/
if !(aOvflSpace != 0) {
return int32(SQLITE_NOMEM)
}
/* Find the sibling pages to balance. Also locate the cells in pParent
** that divide the siblings. An attempt is made to find NN siblings on
** either side of pPage. More siblings are taken from one side, however,
** if there are fewer than NN siblings on the other side. If pParent
** has NB or fewer children then all children of pParent are taken.
**
** This loop also drops the divider cells from the parent page. This
** way, the remainder of the function does not have to deal with any
** overflow cells in the parent page, since if any existed they will
** have already been removed.
*/
i = int32((*TMemPage)(unsafe.Pointer(pParent)).FnOverflow) + int32((*TMemPage)(unsafe.Pointer(pParent)).FnCell)
if i < int32(2) {
nxDiv = 0
} else {
if iParentIdx == 0 {
nxDiv = 0
} else {
if iParentIdx == i {
nxDiv = i - int32(2) + bBulk
} else {
nxDiv = iParentIdx - int32(1)
}
}
i = int32(2) - bBulk
}
nOld = i + int32(1)
if i+nxDiv-int32((*TMemPage)(unsafe.Pointer(pParent)).FnOverflow) == int32((*TMemPage)(unsafe.Pointer(pParent)).FnCell) {
pRight = (*TMemPage)(unsafe.Pointer(pParent)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pParent)).FhdrOffset)+int32(8))
} else {
pRight = (*TMemPage)(unsafe.Pointer(pParent)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pParent)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pParent)).FaCellIdx + uintptr(int32(2)*(i+nxDiv-int32((*TMemPage)(unsafe.Pointer(pParent)).FnOverflow))))))< 0 {
if limit < int32(*(*Tu16)(unsafe.Pointer(pOld + 28))) {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78495))
goto balance_cleanup
}
limit = int32(*(*Tu16)(unsafe.Pointer(pOld + 28)))
j = 0
for {
if !(j < limit) {
break
}
*(*uintptr)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FapCell + uintptr((*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell)*8)) = aData + uintptr(int32(maskPage)&(int32(*(*Tu8)(unsafe.Pointer(piCell)))< usableSpace {
if i+int32(1) >= k {
k = i + int32(2)
if k > libc.Int32FromInt32(NB)+libc.Int32FromInt32(2) {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78596))
goto balance_cleanup
}
(*(*[5]int32)(unsafe.Pointer(bp + 72)))[k-int32(1)] = 0
cntNew[k-int32(1)] = (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell
}
sz1 = int32(2) + int32(_cachedCellSize(tls, bp+112, cntNew[i]-int32(1)))
*(*int32)(unsafe.Pointer(bp + 72 + uintptr(i)*4)) -= sz1
if !(leafData != 0) {
if cntNew[i] < (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell {
sz1 = int32(2) + int32(_cachedCellSize(tls, bp+112, cntNew[i]))
} else {
sz1 = 0
}
}
*(*int32)(unsafe.Pointer(bp + 72 + uintptr(i+int32(1))*4)) += sz1
cntNew[i]--
}
for cntNew[i] < (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell {
sz1 = int32(2) + int32(_cachedCellSize(tls, bp+112, cntNew[i]))
if (*(*[5]int32)(unsafe.Pointer(bp + 72)))[i]+sz1 > usableSpace {
break
}
*(*int32)(unsafe.Pointer(bp + 72 + uintptr(i)*4)) += sz1
cntNew[i]++
if !(leafData != 0) {
if cntNew[i] < (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell {
sz1 = int32(2) + int32(_cachedCellSize(tls, bp+112, cntNew[i]))
} else {
sz1 = 0
}
}
*(*int32)(unsafe.Pointer(bp + 72 + uintptr(i+int32(1))*4)) -= sz1
}
if cntNew[i] >= (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell {
k = i + int32(1)
} else {
if i > 0 {
v10 = cntNew[i-int32(1)]
} else {
v10 = 0
}
if cntNew[i] <= v10 {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78629))
goto balance_cleanup
}
}
goto _9
_9:
;
i++
}
/*
** The packing computed by the previous block is biased toward the siblings
** on the left side (siblings with smaller keys). The left siblings are
** always nearly full, while the right-most sibling might be nearly empty.
** The next block of code attempts to adjust the packing of siblings to
** get a better balance.
**
** This adjustment is more than an optimization. The packing above might
** be so out of balance as to be illegal. For example, the right-most
** sibling might be completely empty. This adjustment is not optional.
*/
i = k - int32(1)
for {
if !(i > 0) {
break
}
szRight = (*(*[5]int32)(unsafe.Pointer(bp + 72)))[i] /* Size of sibling on the right */
szLeft = (*(*[5]int32)(unsafe.Pointer(bp + 72)))[i-int32(1)] /* Index of first cell to the left of right sibling */
r = cntNew[i-int32(1)] - int32(1)
d = r + int32(1) - leafData
_cachedCellSize(tls, bp+112, d)
for cond := true; cond; cond = r >= 0 {
szR = int32(_cachedCellSize(tls, bp+112, r))
szD = int32(*(*Tu16)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FszCell + uintptr(d)*2)))
if v14 = szRight != 0; v14 {
if v13 = bBulk != 0; !v13 {
if i == k-int32(1) {
v12 = 0
} else {
v12 = int32(2)
}
}
}
if v14 && (v13 || szRight+szD+int32(2) > szLeft-(szR+v12)) {
break
}
szRight += szD + int32(2)
szLeft -= szR + int32(2)
cntNew[i-int32(1)] = r
r--
d--
}
(*(*[5]int32)(unsafe.Pointer(bp + 72)))[i] = szRight
(*(*[5]int32)(unsafe.Pointer(bp + 72)))[i-int32(1)] = szLeft
if i > int32(1) {
v15 = cntNew[i-int32(2)]
} else {
v15 = 0
}
if cntNew[i-int32(1)] <= v15 {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78673))
goto balance_cleanup
}
goto _11
_11:
;
i--
}
/* Sanity check: For a non-corrupt database file one of the following
** must be true:
** (1) We found one or more cells (cntNew[0])>0), or
** (2) pPage is a virtual root page. A virtual root page is when
** the real root page is page 1 and we are the only child of
** that page.
*/
/*
** Allocate k new pages. Reuse old pages where possible.
*/
pageFlags = int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer((*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[0])).FaData)))
i = 0
for {
if !(i < k) {
break
}
if i < nOld {
v17 = (*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[i]
(*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i] = v17
*(*uintptr)(unsafe.Pointer(bp + 216)) = v17
(*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[i] = uintptr(0)
*(*int32)(unsafe.Pointer(bp)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 216)))).FpDbPage)
nNew++
if _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 216)))).FpDbPage) != int32(1)+libc.BoolInt32(i == iParentIdx-nxDiv) && *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78706))
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto balance_cleanup
}
} else {
if bBulk != 0 {
v18 = uint32(1)
} else {
v18 = *(*TPgno)(unsafe.Pointer(bp + 92))
}
*(*int32)(unsafe.Pointer(bp)) = _allocateBtreePage(tls, pBt, bp+216, bp+92, v18, uint8(0))
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto balance_cleanup
}
_zeroPage(tls, *(*uintptr)(unsafe.Pointer(bp + 216)), pageFlags)
(*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i] = *(*uintptr)(unsafe.Pointer(bp + 216))
nNew++
cntOld[i] = (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell
/* Set the pointer-map entry for the new sibling page. */
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_ptrmapPut(tls, pBt, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 216)))).Fpgno, uint8(PTRMAP_BTREE), (*TMemPage)(unsafe.Pointer(pParent)).Fpgno, bp)
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
goto balance_cleanup
}
}
}
goto _16
_16:
;
i++
}
/*
** Reassign page numbers so that the new pages are in ascending order.
** This helps to keep entries in the disk file in order so that a scan
** of the table is closer to a linear scan through the file. That in turn
** helps the operating system to deliver pages from the disk more rapidly.
**
** An O(N*N) sort algorithm is used, but since N is never more than NB+2
** (5), that is not a performance concern.
**
** When NB==3, this one optimization makes the database about 25% faster
** for large insertions and deletions.
*/
i = 0
for {
if !(i < nNew) {
break
}
aPgno[i] = (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).Fpgno
goto _19
_19:
;
i++
}
i = 0
for {
if !(i < nNew-int32(1)) {
break
}
iB = i
j = i + int32(1)
for {
if !(j < nNew) {
break
}
if (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[j])).Fpgno < (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iB])).Fpgno {
iB = j
}
goto _21
_21:
;
j++
}
/* If apNew[i] has a page number that is bigger than any of the
** subsequence apNew[i] entries, then swap apNew[i] with the subsequent
** entry that has the smallest page number (which we know to be
** entry apNew[iB]).
*/
if iB != i {
pgnoA = (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).Fpgno
pgnoB = (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iB])).Fpgno
pgnoTemp = uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize + uint32(1)
fgA = (*TDbPage)(unsafe.Pointer((*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).FpDbPage)).Fflags
fgB = (*TDbPage)(unsafe.Pointer((*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iB])).FpDbPage)).Fflags
_sqlite3PagerRekey(tls, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).FpDbPage, pgnoTemp, fgB)
_sqlite3PagerRekey(tls, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iB])).FpDbPage, pgnoA, fgA)
_sqlite3PagerRekey(tls, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).FpDbPage, pgnoB, fgB)
(*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).Fpgno = pgnoB
(*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iB])).Fpgno = pgnoA
}
goto _20
_20:
;
i++
}
_sqlite3Put4byte(tls, pRight, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[nNew-int32(1)])).Fpgno)
/* If the sibling pages are not leaves, ensure that the right-child pointer
** of the right-most new sibling page is set to the value that was
** originally in the same field of the right-most old sibling page. */
if pageFlags&int32(PTF_LEAF) == 0 && nOld != nNew {
if nNew > nOld {
v22 = bp + 32
} else {
v22 = bp + 8
}
pOld1 = *(*uintptr)(unsafe.Pointer(v22 + uintptr(nOld-int32(1))*8))
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[nNew-int32(1)])).FaData+8, (*TMemPage)(unsafe.Pointer(pOld1)).FaData+8, uint64(4))
}
/* Make any required updates to pointer map entries associated with
** cells stored on sibling pages following the balance operation. Pointer
** map entries associated with divider cells are set by the insertCell()
** routine. The associated pointer map entries are:
**
** a) if the cell contains a reference to an overflow chain, the
** entry associated with the first page in the overflow chain, and
**
** b) if the sibling pages are not leaves, the child page associated
** with the cell.
**
** If the sibling pages are not leaves, then the pointer map entry
** associated with the right-child of each sibling may also need to be
** updated. This happens below, after the sibling pages have been
** populated, not here.
*/
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
v23 = (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[0]
pOld2 = v23
pNew1 = v23
cntOldNext = int32((*TMemPage)(unsafe.Pointer(pNew1)).FnCell) + int32((*TMemPage)(unsafe.Pointer(pNew1)).FnOverflow)
iNew = 0
iOld = 0
i = 0
for {
if !(i < (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell) {
break
}
pCell = *(*uintptr)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FapCell + uintptr(i)*8))
for i == cntOldNext {
iOld++
if iOld < nNew {
v25 = (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iOld]
} else {
v25 = (*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[iOld]
}
pOld2 = v25
cntOldNext += int32((*TMemPage)(unsafe.Pointer(pOld2)).FnCell) + int32((*TMemPage)(unsafe.Pointer(pOld2)).FnOverflow) + libc.BoolInt32(!(leafData != 0))
}
if i == cntNew[iNew] {
iNew++
v26 = iNew
pNew1 = (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[v26]
if !(leafData != 0) {
goto _24
}
}
/* Cell pCell is destined for new sibling page pNew. Originally, it
** was either part of sibling page iOld (possibly an overflow cell),
** or else the divider cell to the left of sibling page iOld. So,
** if sibling page iOld had the same page number as pNew, and if
** pCell really was a part of sibling page iOld (not a divider or
** overflow cell), we can skip updating the pointer map entries. */
if iOld >= nNew || (*TMemPage)(unsafe.Pointer(pNew1)).Fpgno != aPgno[iOld] || !(uint64(pCell) >= uint64((*TMemPage)(unsafe.Pointer(pOld2)).FaData) && uint64(pCell) < uint64((*TMemPage)(unsafe.Pointer(pOld2)).FaDataEnd)) {
if !(leafCorrection != 0) {
_ptrmapPut(tls, pBt, _sqlite3Get4byte(tls, pCell), uint8(PTRMAP_BTREE), (*TMemPage)(unsafe.Pointer(pNew1)).Fpgno, bp)
}
if int32(_cachedCellSize(tls, bp+112, i)) > int32((*TMemPage)(unsafe.Pointer(pNew1)).FminLocal) {
_ptrmapPutOvflPtr(tls, pNew1, pOld2, pCell, bp)
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto balance_cleanup
}
}
goto _24
_24:
;
i++
}
}
/* Insert new divider cells into pParent. */
i = 0
for {
if !(i < nNew-int32(1)) {
break
}
pNew2 = (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i]
j = cntNew[i]
pCell1 = *(*uintptr)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FapCell + uintptr(j)*8))
sz2 = int32(*(*Tu16)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FszCell + uintptr(j)*2))) + int32(leafCorrection)
pTemp1 = aOvflSpace + uintptr(iOvflSpace)
if !((*TMemPage)(unsafe.Pointer(pNew2)).Fleaf != 0) {
libc.Xmemcpy(tls, (*TMemPage)(unsafe.Pointer(pNew2)).FaData+8, pCell1, uint64(4))
} else {
if leafData != 0 {
j--
(*(*func(*libc.TLS, uintptr, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pNew2)).FxParseCell})))(tls, pNew2, *(*uintptr)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FapCell + uintptr(j)*8)), bp+224)
pCell1 = pTemp1
sz2 = int32(4) + _sqlite3PutVarint(tls, pCell1+4, uint64((*(*TCellInfo)(unsafe.Pointer(bp + 224))).FnKey))
pTemp1 = uintptr(0)
} else {
pCell1 -= uintptr(4)
/* Obscure case for non-leaf-data trees: If the cell at pCell was
** previously stored on a leaf node, and its reported size was 4
** bytes, then it may actually be smaller than this
** (see btreeParseCellPtr(), 4 bytes is the minimum size of
** any cell). But it is important to pass the correct size to
** insertCell(), so reparse the cell now.
**
** This can only happen for b-trees used to evaluate "IN (SELECT ...)"
** and WITHOUT ROWID tables with exactly one column which is the
** primary key.
*/
if int32(*(*Tu16)(unsafe.Pointer((*(*TCellArray)(unsafe.Pointer(bp + 112))).FszCell + uintptr(j)*2))) == int32(4) {
sz2 = int32((*(*func(*libc.TLS, uintptr, uintptr) Tu16)(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pParent)).FxCellSize})))(tls, pParent, pCell1))
}
}
}
iOvflSpace += sz2
k = 0
for {
if !(k < libc.Int32FromInt32(NB)*libc.Int32FromInt32(2) && *(*int32)(unsafe.Pointer(bp + 112 + 80 + uintptr(k)*4)) <= j) {
break
}
goto _28
_28:
;
k++
}
pSrcEnd = *(*uintptr)(unsafe.Pointer(bp + 112 + 32 + uintptr(k)*8))
if uint64(pCell1) < uint64(pSrcEnd) && uint64(pCell1+uintptr(sz2)) > uint64(pSrcEnd) {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(78906))
goto balance_cleanup
}
*(*int32)(unsafe.Pointer(bp)) = _insertCell(tls, pParent, nxDiv+i, pCell1, sz2, pTemp1, (*TMemPage)(unsafe.Pointer(pNew2)).Fpgno)
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
goto balance_cleanup
}
goto _27
_27:
;
i++
}
/* Now update the actual sibling pages. The order in which they are updated
** is important, as this code needs to avoid disrupting any page from which
** cells may still to be read. In practice, this means:
**
** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1])
** then it is not safe to update page apNew[iPg] until after
** the left-hand sibling apNew[iPg-1] has been updated.
**
** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1])
** then it is not safe to update page apNew[iPg] until after
** the right-hand sibling apNew[iPg+1] has been updated.
**
** If neither of the above apply, the page is safe to update.
**
** The iPg value in the following loop starts at nNew-1 goes down
** to 0, then back up to nNew-1 again, thus making two passes over
** the pages. On the initial downward pass, only condition (1) above
** needs to be tested because (2) will always be true from the previous
** step. On the upward pass, both conditions are always true, so the
** upwards pass simply processes pages that were missed on the downward
** pass.
*/
i = int32(1) - nNew
for {
if !(i < nNew) {
break
}
if i < 0 {
v30 = -i
} else {
v30 = i
}
iPg = v30
if (*(*[5]Tu8)(unsafe.Pointer(bp + 100)))[iPg] != 0 {
goto _29
} /* Skip pages already processed */
if i >= 0 || cntOld[iPg-int32(1)] >= cntNew[iPg-int32(1)] {
/* Verify condition (1): If cells are moving left, update iPg
** only after iPg-1 has already been updated. */
/* Verify condition (2): If cells are moving right, update iPg
** only after iPg+1 has already been updated. */
if iPg == 0 {
v31 = libc.Int32FromInt32(0)
iOld1 = v31
iNew1 = v31
nNewCell = cntNew[0]
} else {
if iPg < nOld {
v32 = cntOld[iPg-int32(1)] + libc.BoolInt32(!(leafData != 0))
} else {
v32 = (*(*TCellArray)(unsafe.Pointer(bp + 112))).FnCell
}
iOld1 = v32
iNew1 = cntNew[iPg-int32(1)] + libc.BoolInt32(!(leafData != 0))
nNewCell = cntNew[iPg] - iNew1
}
*(*int32)(unsafe.Pointer(bp)) = _editPage(tls, (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iPg], iOld1, iNew1, nNewCell, bp+112)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto balance_cleanup
}
(*(*[5]Tu8)(unsafe.Pointer(bp + 100)))[iPg]++
(*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[iPg])).FnFree = usableSpace - (*(*[5]int32)(unsafe.Pointer(bp + 72)))[iPg]
}
goto _29
_29:
;
i++
}
/* All pages have been processed exactly once */
if isRoot != 0 && int32((*TMemPage)(unsafe.Pointer(pParent)).FnCell) == 0 && int32((*TMemPage)(unsafe.Pointer(pParent)).FhdrOffset) <= (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[0])).FnFree {
/* The root page of the b-tree now contains no cells. The only sibling
** page is the right-child of the parent. Copy the contents of the
** child page into the parent, decreasing the overall height of the
** b-tree structure by one. This is described as the "balance-shallower"
** sub-algorithm in some documentation.
**
** If this is an auto-vacuum database, the call to copyNodeContent()
** sets all pointer-map entries corresponding to database image pages
** for which the pointer is stored within the content being copied.
**
** It is critical that the child page be defragmented before being
** copied into the parent, because if the parent is page 1 then it will
** by smaller than the child due to the database header, and so all the
** free space needs to be up front.
*/
*(*int32)(unsafe.Pointer(bp)) = _defragmentPage(tls, (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[0], -int32(1))
_copyNodeContent(tls, (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[0], pParent, bp)
_freePage(tls, (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[0], bp)
} else {
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 && !(leafCorrection != 0) {
/* Fix the pointer map entries associated with the right-child of each
** sibling page. All other pointer map entries have already been taken
** care of. */
i = 0
for {
if !(i < nNew) {
break
}
key = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).FaData+8)
_ptrmapPut(tls, pBt, key, uint8(PTRMAP_BTREE), (*TMemPage)(unsafe.Pointer((*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])).Fpgno, bp)
goto _33
_33:
;
i++
}
}
}
/* Free any old pages that were not reused as new pages.
*/
i = nNew
for {
if !(i < nOld) {
break
}
_freePage(tls, (*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[i], bp)
goto _34
_34:
;
i++
}
/*
** Cleanup before returning.
*/
goto balance_cleanup
balance_cleanup:
;
_sqlite3DbFree(tls, uintptr(0), (*(*TCellArray)(unsafe.Pointer(bp + 112))).FapCell)
i = 0
for {
if !(i < nOld) {
break
}
_releasePage(tls, (*(*[3]uintptr)(unsafe.Pointer(bp + 8)))[i])
goto _35
_35:
;
i++
}
i = 0
for {
if !(i < nNew) {
break
}
_releasePage(tls, (*(*[5]uintptr)(unsafe.Pointer(bp + 32)))[i])
goto _36
_36:
;
i++
}
return *(*int32)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** This function is called when the root page of a b-tree structure is
// ** overfull (has one or more overflow pages).
// **
// ** A new child page is allocated and the contents of the current root
// ** page, including overflow cells, are copied into the child. The root
// ** page is then overwritten to make it an empty page with the right-child
// ** pointer pointing to the new page.
// **
// ** Before returning, all pointer-map entries corresponding to pages
// ** that the new child-page now contains pointers to are updated. The
// ** entry corresponding to the new right-child pointer of the root
// ** page is also updated.
// **
// ** If successful, *ppChild is set to contain a reference to the child
// ** page and SQLITE_OK is returned. In this case the caller is required
// ** to call releasePage() on *ppChild exactly once. If an error occurs,
// ** an error code is returned and *ppChild is set to 0.
// */
func _balance_deeper(tls *libc.TLS, pRoot uintptr, ppChild uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var pBt uintptr
var _ /* pChild at bp+8 */ uintptr
var _ /* pgnoChild at bp+16 */ TPgno
var _ /* rc at bp+0 */ int32
_ = pBt /* Return value from subprocedures */
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) /* Pointer to a new child page */
*(*TPgno)(unsafe.Pointer(bp + 16)) = uint32(0) /* Page number of the new child page */
pBt = (*TMemPage)(unsafe.Pointer(pRoot)).FpBt /* The BTree */
/* Make pRoot, the root page of the b-tree, writable. Allocate a new
** page that will become the new right-child of pPage. Copy the contents
** of the node stored on pRoot into the new child page.
*/
*(*int32)(unsafe.Pointer(bp)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pRoot)).FpDbPage)
if *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
*(*int32)(unsafe.Pointer(bp)) = _allocateBtreePage(tls, pBt, bp+8, bp+16, (*TMemPage)(unsafe.Pointer(pRoot)).Fpgno, uint8(0))
_copyNodeContent(tls, pRoot, *(*uintptr)(unsafe.Pointer(bp + 8)), bp)
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_ptrmapPut(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 16)), uint8(PTRMAP_BTREE), (*TMemPage)(unsafe.Pointer(pRoot)).Fpgno, bp)
}
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
*(*uintptr)(unsafe.Pointer(ppChild)) = uintptr(0)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
return *(*int32)(unsafe.Pointer(bp))
}
/* Copy the overflow cells from pRoot to pChild */
libc.Xmemcpy(tls, *(*uintptr)(unsafe.Pointer(bp + 8))+28, pRoot+28, uint64((*TMemPage)(unsafe.Pointer(pRoot)).FnOverflow)*uint64(2))
libc.Xmemcpy(tls, *(*uintptr)(unsafe.Pointer(bp + 8))+40, pRoot+40, uint64((*TMemPage)(unsafe.Pointer(pRoot)).FnOverflow)*uint64(8))
(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FnOverflow = (*TMemPage)(unsafe.Pointer(pRoot)).FnOverflow
/* Zero the contents of pRoot. Then install pChild as the right-child. */
_zeroPage(tls, pRoot, int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FaData))) & ^libc.Int32FromInt32(PTF_LEAF))
_sqlite3Put4byte(tls, (*TMemPage)(unsafe.Pointer(pRoot)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pRoot)).FhdrOffset)+int32(8)), *(*TPgno)(unsafe.Pointer(bp + 16)))
*(*uintptr)(unsafe.Pointer(ppChild)) = *(*uintptr)(unsafe.Pointer(bp + 8))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return SQLITE_CORRUPT if any cursor other than pCur is currently valid
// ** on the same B-tree as pCur.
// **
// ** This can occur if a database is corrupt with two or more SQL tables
// ** pointing to the same b-tree. If an insert occurs on one SQL table
// ** and causes a BEFORE TRIGGER to do a secondary insert on the other SQL
// ** table linked to the same b-tree. If the secondary insert causes a
// ** rebalance, that can change content out from under the cursor on the
// ** first SQL table, violating invariants on the first insert.
// */
func _anotherValidCursor(tls *libc.TLS, pCur uintptr) (r int32) {
var pOther uintptr
_ = pOther
pOther = (*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FpCursor
for {
if !(pOther != 0) {
break
}
if pOther != pCur && int32((*TBtCursor)(unsafe.Pointer(pOther)).FeState) == CURSOR_VALID && (*TBtCursor)(unsafe.Pointer(pOther)).FpPage == (*TBtCursor)(unsafe.Pointer(pCur)).FpPage {
return _sqlite3CorruptError(tls, int32(79138))
}
goto _1
_1:
;
pOther = (*TBtCursor)(unsafe.Pointer(pOther)).FpNext
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** The page that pCur currently points to has just been modified in
// ** some way. This function figures out if this modification means the
// ** tree needs to be balanced, and if so calls the appropriate balancing
// ** routine. Balancing routines are:
// **
// ** balance_quick()
// ** balance_deeper()
// ** balance_nonroot()
// */
func _balance(tls *libc.TLS, pCur uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iIdx, iPage, rc, v1, v2 int32
var pFree, pPage, pParent, pSpace uintptr
var v3 bool
var _ /* aBalanceQuickSpace at bp+0 */ [13]Tu8
_, _, _, _, _, _, _, _, _, _ = iIdx, iPage, pFree, pPage, pParent, pSpace, rc, v1, v2, v3
rc = SQLITE_OK
pFree = uintptr(0)
for cond := true; cond; cond = rc == SQLITE_OK {
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if (*TMemPage)(unsafe.Pointer(pPage)).FnFree < 0 && _btreeComputeFreeSpace(tls, pPage) != 0 {
break
}
if int32((*TMemPage)(unsafe.Pointer(pPage)).FnOverflow) == 0 && (*TMemPage)(unsafe.Pointer(pPage)).FnFree*int32(3) <= int32((*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FusableSize)*int32(2) {
/* No rebalance required as long as:
** (1) There are no overflow cells
** (2) The amount of free space on the page is less than 2/3rds of
** the total usable space on the page. */
break
} else {
v1 = int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)
iPage = v1
if v1 == 0 {
if v3 = (*TMemPage)(unsafe.Pointer(pPage)).FnOverflow != 0; v3 {
v2 = _anotherValidCursor(tls, pCur)
rc = v2
}
if v3 && v2 == SQLITE_OK {
/* The root page of the b-tree is overfull. In this case call the
** balance_deeper() function to create a new child for the root-page
** and copy the current contents of the root-page to it. The
** next iteration of the do-loop will balance the child page.
*/
rc = _balance_deeper(tls, pPage, pCur+144+1*8)
if rc == SQLITE_OK {
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage = int8(1)
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(0)
*(*Tu16)(unsafe.Pointer(pCur + 88)) = uint16(0)
*(*uintptr)(unsafe.Pointer(pCur + 144)) = pPage
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + 1*8))
}
} else {
break
}
} else {
if _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage) > int32(1) {
/* The page being written is not a root page, and there is currently
** more than one reference to it. This only happens if the page is one
** of its own ancestor pages. Corruption. */
rc = _sqlite3CorruptError(tls, int32(79198))
} else {
pParent = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iPage-int32(1))*8))
iIdx = int32(*(*Tu16)(unsafe.Pointer(pCur + 88 + uintptr(iPage-int32(1))*2)))
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pParent)).FpDbPage)
if rc == SQLITE_OK && (*TMemPage)(unsafe.Pointer(pParent)).FnFree < 0 {
rc = _btreeComputeFreeSpace(tls, pParent)
}
if rc == SQLITE_OK {
if (*TMemPage)(unsafe.Pointer(pPage)).FintKeyLeaf != 0 && int32((*TMemPage)(unsafe.Pointer(pPage)).FnOverflow) == int32(1) && int32(*(*Tu16)(unsafe.Pointer(pPage + 28))) == int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) && (*TMemPage)(unsafe.Pointer(pParent)).Fpgno != uint32(1) && int32((*TMemPage)(unsafe.Pointer(pParent)).FnCell) == iIdx {
/* Call balance_quick() to create a new sibling of pPage on which
** to store the overflow cell. balance_quick() inserts a new cell
** into pParent, which may cause pParent overflow. If this
** happens, the next iteration of the do-loop will balance pParent
** use either balance_nonroot() or balance_deeper(). Until this
** happens, the overflow cell is stored in the aBalanceQuickSpace[]
** buffer.
**
** The purpose of the following assert() is to check that only a
** single call to balance_quick() is made for each call to this
** function. If this were not verified, a subtle bug involving reuse
** of the aBalanceQuickSpace[] might sneak in.
*/
rc = _balance_quick(tls, pParent, pPage, bp)
} else {
/* In this case, call balance_nonroot() to redistribute cells
** between pPage and up to 2 of its sibling pages. This involves
** modifying the contents of pParent, which may cause pParent to
** become overfull or underfull. The next iteration of the do-loop
** will balance the parent page to correct this.
**
** If the parent page becomes overfull, the overflow cell or cells
** are stored in the pSpace buffer allocated immediately below.
** A subsequent iteration of the do-loop will deal with this by
** calling balance_nonroot() (balance_deeper() may be called first,
** but it doesn't deal with overflow cells - just moves them to a
** different page). Once this subsequent call to balance_nonroot()
** has completed, it is safe to release the pSpace buffer used by
** the previous call, as the overflow cell data will have been
** copied either into the body of a database page or into the new
** pSpace buffer passed to the latter call to balance_nonroot().
*/
pSpace = _sqlite3PageMalloc(tls, int32((*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBt)).FpageSize))
rc = _balance_nonroot(tls, pParent, iIdx, pSpace, libc.BoolInt32(iPage == int32(1)), int32((*TBtCursor)(unsafe.Pointer(pCur)).Fhints)&int32(BTREE_BULKLOAD))
if pFree != 0 {
/* If pFree is not NULL, it points to the pSpace buffer used
** by a previous call to balance_nonroot(). Its contents are
** now stored either on real database pages or within the
** new pSpace buffer, so it may be safely freed here. */
_sqlite3PageFree(tls, pFree)
}
/* The pSpace buffer will be freed after the next call to
** balance_nonroot(), or just before this function returns, whichever
** comes first. */
pFree = pSpace
}
}
(*TMemPage)(unsafe.Pointer(pPage)).FnOverflow = uint8(0)
/* The next iteration of the do-loop balances the parent page. */
_releasePage(tls, pPage)
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage--
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*8))
}
}
}
}
if pFree != 0 {
_sqlite3PageFree(tls, pFree)
}
return rc
}
// C documentation
//
// /* Overwrite content from pX into pDest. Only do the write if the
// ** content is different from what is already there.
// */
func _btreeOverwriteContent(tls *libc.TLS, pPage uintptr, pDest uintptr, pX uintptr, iOffset int32, iAmt int32) (r int32) {
var i, nData, rc, rc1, rc2 int32
_, _, _, _, _ = i, nData, rc, rc1, rc2
nData = (*TBtreePayload)(unsafe.Pointer(pX)).FnData - iOffset
if nData <= 0 {
i = 0
for {
if !(i < iAmt && int32(*(*Tu8)(unsafe.Pointer(pDest + uintptr(i)))) == 0) {
break
}
goto _1
_1:
;
i++
}
if i < iAmt {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if rc != 0 {
return rc
}
libc.Xmemset(tls, pDest+uintptr(i), 0, uint64(iAmt-i))
}
} else {
if nData < iAmt {
/* Mixed read data and zeros at the end. Make a recursive call
** to write the zeros then fall through to write the real data */
rc1 = _btreeOverwriteContent(tls, pPage, pDest+uintptr(nData), pX, iOffset+nData, iAmt-nData)
if rc1 != 0 {
return rc1
}
iAmt = nData
}
if libc.Xmemcmp(tls, pDest, (*TBtreePayload)(unsafe.Pointer(pX)).FpData+uintptr(iOffset), uint64(iAmt)) != 0 {
rc2 = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if rc2 != 0 {
return rc2
}
/* In a corrupt database, it is possible for the source and destination
** buffers to overlap. This is harmless since the database is already
** corrupt but it does cause valgrind and ASAN warnings. So use
** memmove(). */
libc.Xmemmove(tls, pDest, (*TBtreePayload)(unsafe.Pointer(pX)).FpData+uintptr(iOffset), uint64(iAmt))
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Overwrite the cell that cursor pCur is pointing to with fresh content
// ** contained in pX. In this variant, pCur is pointing to an overflow
// ** cell.
// */
func _btreeOverwriteOverflowCell(tls *libc.TLS, pCur uintptr, pX uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iOffset, nTotal, rc int32
var ovflPageSize Tu32
var ovflPgno TPgno
var pBt uintptr
var _ /* pPage at bp+0 */ uintptr
_, _, _, _, _, _ = iOffset, nTotal, ovflPageSize, ovflPgno, pBt, rc /* Next byte of pX->pData to write */
nTotal = (*TBtreePayload)(unsafe.Pointer(pX)).FnData + (*TBtreePayload)(unsafe.Pointer(pX)).FnZero /* Return code */
*(*uintptr)(unsafe.Pointer(bp)) = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage /* Size to write on overflow page */
/* pCur is an overflow cell */
/* Overwrite the local portion first */
rc = _btreeOverwriteContent(tls, *(*uintptr)(unsafe.Pointer(bp)), (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, 0, int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal))
if rc != 0 {
return rc
}
/* Now overwrite the overflow pages */
iOffset = int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal)
ovflPgno = _sqlite3Get4byte(tls, (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr(iOffset))
pBt = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpBt
ovflPageSize = (*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(4)
for cond := true; cond; cond = iOffset < nTotal {
rc = _btreeGetPage(tls, pBt, ovflPgno, bp, 0)
if rc != 0 {
return rc
}
if _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != int32(1) || (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FisInit != 0 {
rc = _sqlite3CorruptError(tls, int32(79362))
} else {
if uint32(iOffset)+ovflPageSize < uint32(nTotal) {
ovflPgno = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData)
} else {
ovflPageSize = uint32(nTotal - iOffset)
}
rc = _btreeOverwriteContent(tls, *(*uintptr)(unsafe.Pointer(bp)), (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData+uintptr(4), pX, iOffset, int32(ovflPageSize))
}
_sqlite3PagerUnref(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if rc != 0 {
return rc
}
iOffset = int32(uint32(iOffset) + ovflPageSize)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Overwrite the cell that cursor pCur is pointing to with fresh content
// ** contained in pX.
// */
func _btreeOverwriteCell(tls *libc.TLS, pCur uintptr, pX uintptr) (r int32) {
var nTotal int32
var pPage uintptr
_, _ = nTotal, pPage
nTotal = (*TBtreePayload)(unsafe.Pointer(pX)).FnData + (*TBtreePayload)(unsafe.Pointer(pX)).FnZero /* Total bytes of to write */
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage /* Page being written */
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload+uintptr((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) > (*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd || (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload < (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr((*TMemPage)(unsafe.Pointer(pPage)).FcellOffset) {
return _sqlite3CorruptError(tls, int32(79390))
}
if int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal) == nTotal {
/* The entire cell is local */
return _btreeOverwriteContent(tls, pPage, (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FpPayload, pX, 0, int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnLocal))
} else {
/* The cell contains overflow content */
return _btreeOverwriteOverflowCell(tls, pCur, pX)
}
return r
}
// C documentation
//
// /*
// ** Insert a new record into the BTree. The content of the new record
// ** is described by the pX object. The pCur cursor is used only to
// ** define what table the record should be inserted into, and is left
// ** pointing at a random location.
// **
// ** For a table btree (used for rowid tables), only the pX.nKey value of
// ** the key is used. The pX.pKey value must be NULL. The pX.nKey is the
// ** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields
// ** hold the content of the row.
// **
// ** For an index btree (used for indexes and WITHOUT ROWID tables), the
// ** key is an arbitrary byte sequence stored in pX.pKey,nKey. The
// ** pX.pData,nData,nZero fields must be zero.
// **
// ** If the seekResult parameter is non-zero, then a successful call to
// ** sqlite3BtreeIndexMoveto() to seek cursor pCur to (pKey,nKey) has already
// ** been performed. In other words, if seekResult!=0 then the cursor
// ** is currently pointing to a cell that will be adjacent to the cell
// ** to be inserted. If seekResult<0 then pCur points to a cell that is
// ** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell
// ** that is larger than (pKey,nKey).
// **
// ** If seekResult==0, that means pCur is pointing at some unknown location.
// ** In that case, this routine must seek the cursor to the correct insertion
// ** point for (pKey,nKey) before doing the insertion. For index btrees,
// ** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked
// ** key values and pX->aMem can be used instead of pX->pKey to avoid having
// ** to decode the key.
// */
func _sqlite3BtreeInsert(tls *libc.TLS, pCur uintptr, pX uintptr, flags int32, seekResult int32) (r int32) {
bp := tls.Alloc(160)
defer tls.Free(160)
var idx int32
var newCell, oldCell, p, pPage, v3, p1, p4, p5 uintptr
var ovfl TPgno
var v2 Tu16
var _ /* info at bp+104 */ TCellInfo
var _ /* info at bp+128 */ TCellInfo
var _ /* loc at bp+4 */ int32
var _ /* r at bp+16 */ TUnpackedRecord
var _ /* rc at bp+0 */ int32
var _ /* szNew at bp+8 */ int32
var _ /* x2 at bp+56 */ TBtreePayload
_, _, _, _, _, _, _, _, _, _, _ = idx, newCell, oldCell, ovfl, p, pPage, v2, v3, p1, p4, p5
*(*int32)(unsafe.Pointer(bp + 4)) = seekResult /* -1: before desired location +1: after */
*(*int32)(unsafe.Pointer(bp + 8)) = 0
p = (*TBtCursor)(unsafe.Pointer(pCur)).FpBtree
newCell = uintptr(0)
/* Save the positions of any other cursors open on this table.
**
** In some cases, the call to btreeMoveto() below is a no-op. For
** example, when inserting data into a table with auto-generated integer
** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the
** integer key to use. It then calls this function to actually insert the
** data into the intkey B-Tree. In this case btreeMoveto() recognizes
** that the cursor is already where it needs to be and returns without
** doing any work. To avoid thwarting these optimizations, it is important
** not to clear the cursor here.
*/
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_Multiple) != 0 {
*(*int32)(unsafe.Pointer(bp)) = _saveAllCursors(tls, (*TBtree)(unsafe.Pointer(p)).FpBt, (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot, pCur)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
if *(*int32)(unsafe.Pointer(bp + 4)) != 0 && int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) < 0 {
/* This can only happen if the schema is corrupt such that there is more
** than one table or index with the same root page as used by the cursor.
** Which can only happen if the SQLITE_NoSchemaError flag was set when
** the schema was loaded. This cannot be asserted though, as a user might
** set the flag, load the schema, and then unset the flag. */
return _sqlite3CorruptError(tls, int32(79471))
}
}
/* Ensure that the cursor is not in the CURSOR_FAULT state and that it
** points to a valid cell.
*/
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
*(*int32)(unsafe.Pointer(bp)) = _moveToRoot(tls, pCur)
if *(*int32)(unsafe.Pointer(bp)) != 0 && *(*int32)(unsafe.Pointer(bp)) != int32(SQLITE_EMPTY) {
return *(*int32)(unsafe.Pointer(bp))
}
}
/* Assert that the caller has been consistent. If this cursor was opened
** expecting an index b-tree, then the caller should be inserting blob
** keys with no associated data. If the cursor was opened expecting an
** intkey table, the caller should be inserting integer keys with a
** blob of associated data. */
if (*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0) {
/* If this is an insert into a table b-tree, invalidate any incrblob
** cursors open on the row being replaced */
if (*TBtree)(unsafe.Pointer(p)).FhasIncrblobCur != 0 {
_invalidateIncrblobCursors(tls, p, (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot, (*TBtreePayload)(unsafe.Pointer(pX)).FnKey, 0)
}
/* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
** to a row with the same key as the new entry being inserted.
*/
/* On the other hand, BTREE_SAVEPOSITION==0 does not imply
** that the cursor is not pointing to a row to be overwritten.
** So do a complete check.
*/
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_ValidNKey) != 0 && (*TBtreePayload)(unsafe.Pointer(pX)).FnKey == (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey {
/* The cursor is pointing to the entry that is to be
** overwritten */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize) != 0 && (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnPayload == uint32((*TBtreePayload)(unsafe.Pointer(pX)).FnData)+uint32((*TBtreePayload)(unsafe.Pointer(pX)).FnZero) {
/* New entry is the same size as the old. Do an overwrite */
return _btreeOverwriteCell(tls, pCur, pX)
}
} else {
if *(*int32)(unsafe.Pointer(bp + 4)) == 0 {
/* The cursor is *not* pointing to the cell to be overwritten, nor
** to an adjacent cell. Move the cursor so that it is pointing either
** to the cell to be overwritten or an adjacent cell.
*/
*(*int32)(unsafe.Pointer(bp)) = _sqlite3BtreeTableMoveto(tls, pCur, (*TBtreePayload)(unsafe.Pointer(pX)).FnKey, libc.BoolInt32(flags&int32(BTREE_APPEND) != 0), bp+4)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
}
} else {
/* This is an index or a WITHOUT ROWID table */
/* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
** to a row with the same key as the new entry being inserted.
*/
/* If the cursor is not already pointing either to the cell to be
** overwritten, or if a new cell is being inserted, if the cursor is
** not pointing to an immediately adjacent cell, then move the cursor
** so that it does.
*/
if *(*int32)(unsafe.Pointer(bp + 4)) == 0 && flags&int32(BTREE_SAVEPOSITION) == 0 {
if (*TBtreePayload)(unsafe.Pointer(pX)).FnMem != 0 {
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 16))).FpKeyInfo = (*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 16))).FaMem = (*TBtreePayload)(unsafe.Pointer(pX)).FaMem
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 16))).FnField = (*TBtreePayload)(unsafe.Pointer(pX)).FnMem
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 16))).Fdefault_rc = 0
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 16))).FeqSeen = uint8(0)
*(*int32)(unsafe.Pointer(bp)) = _sqlite3BtreeIndexMoveto(tls, pCur, bp+16, bp+4)
} else {
*(*int32)(unsafe.Pointer(bp)) = _btreeMoveto(tls, pCur, (*TBtreePayload)(unsafe.Pointer(pX)).FpKey, (*TBtreePayload)(unsafe.Pointer(pX)).FnKey, libc.BoolInt32(flags&int32(BTREE_APPEND) != 0), bp+4)
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
/* If the cursor is currently pointing to an entry to be overwritten
** and the new content is the same as as the old, then use the
** overwrite optimization.
*/
if *(*int32)(unsafe.Pointer(bp + 4)) == 0 {
_getCellInfo(tls, pCur)
if (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey == (*TBtreePayload)(unsafe.Pointer(pX)).FnKey {
(*(*TBtreePayload)(unsafe.Pointer(bp + 56))).FpData = (*TBtreePayload)(unsafe.Pointer(pX)).FpKey
(*(*TBtreePayload)(unsafe.Pointer(bp + 56))).FnData = int32((*TBtreePayload)(unsafe.Pointer(pX)).FnKey)
(*(*TBtreePayload)(unsafe.Pointer(bp + 56))).FnZero = 0
return _btreeOverwriteCell(tls, pCur, bp+56)
}
}
}
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if (*TMemPage)(unsafe.Pointer(pPage)).FnFree < 0 {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) > int32(CURSOR_INVALID) {
/* ^^^^^--- due to the moveToRoot() call above */
*(*int32)(unsafe.Pointer(bp)) = _sqlite3CorruptError(tls, int32(79594))
} else {
*(*int32)(unsafe.Pointer(bp)) = _btreeComputeFreeSpace(tls, pPage)
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
newCell = (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FpTmpSpace
if flags&int32(BTREE_PREFORMAT) != 0 {
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK
*(*int32)(unsafe.Pointer(bp + 8)) = (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FnPreformatSize
if *(*int32)(unsafe.Pointer(bp + 8)) < int32(4) {
*(*int32)(unsafe.Pointer(bp + 8)) = int32(4)
*(*uint8)(unsafe.Pointer(newCell + 3)) = uint8(0)
}
if (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FautoVacuum != 0 && *(*int32)(unsafe.Pointer(bp + 8)) > int32((*TMemPage)(unsafe.Pointer(pPage)).FmaxLocal) {
(*(*func(*libc.TLS, uintptr, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell})))(tls, pPage, newCell, bp+104)
if (*(*TCellInfo)(unsafe.Pointer(bp + 104))).FnPayload != uint32((*(*TCellInfo)(unsafe.Pointer(bp + 104))).FnLocal) {
ovfl = _sqlite3Get4byte(tls, newCell+uintptr(*(*int32)(unsafe.Pointer(bp + 8))-int32(4)))
_ptrmapPut(tls, (*TBtree)(unsafe.Pointer(p)).FpBt, ovfl, uint8(PTRMAP_OVERFLOW1), (*TMemPage)(unsafe.Pointer(pPage)).Fpgno, bp)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto end_insert
}
}
}
} else {
*(*int32)(unsafe.Pointer(bp)) = _fillInCell(tls, pPage, newCell, pX, bp+8)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto end_insert
}
}
idx = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix)
(*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnSize = uint16(0)
if *(*int32)(unsafe.Pointer(bp + 4)) == 0 {
if idx >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
return _sqlite3CorruptError(tls, int32(79636))
}
*(*int32)(unsafe.Pointer(bp)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto end_insert
}
oldCell = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*idx))))< (*TMemPage)(unsafe.Pointer(pPage)).FaDataEnd {
return _sqlite3CorruptError(tls, int32(79666))
}
libc.Xmemcpy(tls, oldCell, newCell, uint64(*(*int32)(unsafe.Pointer(bp + 8))))
return SQLITE_OK
}
_dropCell(tls, pPage, idx, int32((*(*TCellInfo)(unsafe.Pointer(bp + 128))).FnSize), bp)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
goto end_insert
}
} else {
if *(*int32)(unsafe.Pointer(bp + 4)) < 0 && int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) > 0 {
v3 = pCur + 86
*(*Tu16)(unsafe.Pointer(v3))++
v2 = *(*Tu16)(unsafe.Pointer(v3))
idx = int32(v2)
p4 = pCur + 1
*(*Tu8)(unsafe.Pointer(p4)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p4))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
} else {
}
}
*(*int32)(unsafe.Pointer(bp)) = _insertCellFast(tls, pPage, idx, newCell, *(*int32)(unsafe.Pointer(bp + 8)))
/* If no error has occurred and pPage has an overflow cell, call balance()
** to redistribute the cells within the tree. Since balance() may move
** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey
** variables.
**
** Previous versions of SQLite called moveToRoot() to move the cursor
** back to the root page as balance() used to invalidate the contents
** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that,
** set the cursor state to "invalid". This makes common insert operations
** slightly faster.
**
** There is a subtle but important optimization here too. When inserting
** multiple records into an intkey b-tree using a single cursor (as can
** happen while processing an "INSERT INTO ... SELECT" statement), it
** is advantageous to leave the cursor pointing to the last entry in
** the b-tree if possible. If the cursor is left pointing to the last
** entry in the table, and the next row inserted has an integer key
** larger than the largest existing key, it is possible to insert the
** row without seeking the cursor. This can be a big performance boost.
*/
if (*TMemPage)(unsafe.Pointer(pPage)).FnOverflow != 0 {
p5 = pCur + 1
*(*Tu8)(unsafe.Pointer(p5)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p5))) & ^(libc.Int32FromInt32(BTCF_ValidNKey) | libc.Int32FromInt32(BTCF_ValidOvfl)))
*(*int32)(unsafe.Pointer(bp)) = _balance(tls, pCur)
/* Must make sure nOverflow is reset to zero even if the balance()
** fails. Internal data structure corruption will result otherwise.
** Also, set the cursor state to invalid. This stops saveCursorPosition()
** from trying to save the current position of the cursor. */
(*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnOverflow = uint8(0)
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_INVALID)
if flags&int32(BTREE_SAVEPOSITION) != 0 && *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
_btreeReleaseAllCursorPages(tls, pCur)
if (*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo != 0 {
(*TBtCursor)(unsafe.Pointer(pCur)).FpKey = _sqlite3Malloc(tls, uint64((*TBtreePayload)(unsafe.Pointer(pX)).FnKey))
if (*TBtCursor)(unsafe.Pointer(pCur)).FpKey == uintptr(0) {
*(*int32)(unsafe.Pointer(bp)) = int32(SQLITE_NOMEM)
} else {
libc.Xmemcpy(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpKey, (*TBtreePayload)(unsafe.Pointer(pX)).FpKey, uint64((*TBtreePayload)(unsafe.Pointer(pX)).FnKey))
}
}
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_REQUIRESEEK)
(*TBtCursor)(unsafe.Pointer(pCur)).FnKey = (*TBtreePayload)(unsafe.Pointer(pX)).FnKey
}
}
goto end_insert
end_insert:
;
return *(*int32)(unsafe.Pointer(bp))
return r
}
// C documentation
//
// /*
// ** This function is used as part of copying the current row from cursor
// ** pSrc into cursor pDest. If the cursors are open on intkey tables, then
// ** parameter iKey is used as the rowid value when the record is copied
// ** into pDest. Otherwise, the record is copied verbatim.
// **
// ** This function does not actually write the new value to cursor pDest.
// ** Instead, it creates and populates any required overflow pages and
// ** writes the data for the new cell into the BtShared.pTmpSpace buffer
// ** for the destination database. The size of the cell, in bytes, is left
// ** in BtShared.nPreformatSize. The caller completes the insertion by
// ** calling sqlite3BtreeInsert() with the BTREE_PREFORMAT flag specified.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _sqlite3BtreeTransferRow(tls *libc.TLS, pDest uintptr, pSrc uintptr, iKey Ti64) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var aIn, aOut, pBt, pPageOut, pPgnoOut, pSrcPager, v1 uintptr
var nCopy int32
var nIn, nOut, nRem Tu32
var ovflIn TPgno
var v2, v3 uint32
var _ /* pNew at bp+24 */ uintptr
var _ /* pPageIn at bp+8 */ uintptr
var _ /* pgnoNew at bp+16 */ TPgno
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _, _ = aIn, aOut, nCopy, nIn, nOut, nRem, ovflIn, pBt, pPageOut, pPgnoOut, pSrcPager, v1, v2, v3
pBt = (*TBtCursor)(unsafe.Pointer(pDest)).FpBt
aOut = (*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace /* Bytes of data still to copy */
_getCellInfo(tls, pSrc)
if (*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload < uint32(0x80) {
v1 = aOut
aOut++
*(*Tu8)(unsafe.Pointer(v1)) = uint8((*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload)
} else {
aOut += uintptr(_sqlite3PutVarint(tls, aOut, uint64((*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload)))
}
if (*TBtCursor)(unsafe.Pointer(pDest)).FpKeyInfo == uintptr(0) {
aOut += uintptr(_sqlite3PutVarint(tls, aOut, uint64(iKey)))
}
nIn = uint32((*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnLocal)
aIn = (*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload
if aIn+uintptr(nIn) > (*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd {
return _sqlite3CorruptError(tls, int32(79768))
}
nRem = (*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload
if nIn == nRem && nIn < uint32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pDest)).FpPage)).FmaxLocal) {
libc.Xmemcpy(tls, aOut, aIn, uint64(nIn))
(*TBtShared)(unsafe.Pointer(pBt)).FnPreformatSize = int32(int64(nIn) + (int64(aOut) - int64((*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace)))
return SQLITE_OK
} else {
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK
pSrcPager = (*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pSrc)).FpBt)).FpPager
pPgnoOut = uintptr(0)
ovflIn = uint32(0)
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
pPageOut = uintptr(0) /* Size of output buffer aOut[] */
nOut = uint32(_btreePayloadToLocal(tls, (*TBtCursor)(unsafe.Pointer(pDest)).FpPage, int64((*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload)))
(*TBtShared)(unsafe.Pointer(pBt)).FnPreformatSize = int32(int64(nOut) + (int64(aOut) - int64((*TBtShared)(unsafe.Pointer(pBt)).FpTmpSpace)))
if nOut < (*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FnPayload {
pPgnoOut = aOut + uintptr(nOut)
*(*int32)(unsafe.Pointer(pBt + 144)) += int32(4)
}
if nRem > nIn {
if aIn+uintptr(nIn)+uintptr(4) > (*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pSrc)).FpPage)).FaDataEnd {
return _sqlite3CorruptError(tls, int32(79793))
}
ovflIn = _sqlite3Get4byte(tls, (*TBtCursor)(unsafe.Pointer(pSrc)).Finfo.FpPayload+uintptr(nIn))
}
for cond := true; cond; cond = nRem > uint32(0) && *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
nRem -= nOut
for cond := true; cond; cond = *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK && nOut > uint32(0) {
if nIn > uint32(0) {
if nOut < nIn {
v2 = nOut
} else {
v2 = nIn
}
nCopy = int32(v2)
libc.Xmemcpy(tls, aOut, aIn, uint64(nCopy))
nOut -= uint32(nCopy)
nIn -= uint32(nCopy)
aOut += uintptr(nCopy)
aIn += uintptr(nCopy)
}
if nOut > uint32(0) {
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
*(*int32)(unsafe.Pointer(bp)) = _sqlite3PagerGet(tls, pSrcPager, ovflIn, bp+8, int32(PAGER_GET_READONLY))
if *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
aIn = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
ovflIn = _sqlite3Get4byte(tls, aIn)
aIn += uintptr(4)
nIn = (*TBtShared)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pSrc)).FpBt)).FusableSize - uint32(4)
}
}
}
if *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK && nRem > uint32(0) && pPgnoOut != 0 {
*(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0)
*(*int32)(unsafe.Pointer(bp)) = _allocateBtreePage(tls, pBt, bp+24, bp+16, uint32(0), uint8(0))
_sqlite3Put4byte(tls, pPgnoOut, *(*TPgno)(unsafe.Pointer(bp + 16)))
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 && pPageOut != 0 {
_ptrmapPut(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 16)), uint8(PTRMAP_OVERFLOW2), (*TMemPage)(unsafe.Pointer(pPageOut)).Fpgno, bp)
}
_releasePage(tls, pPageOut)
pPageOut = *(*uintptr)(unsafe.Pointer(bp + 24))
if pPageOut != 0 {
pPgnoOut = (*TMemPage)(unsafe.Pointer(pPageOut)).FaData
_sqlite3Put4byte(tls, pPgnoOut, uint32(0))
aOut = pPgnoOut + 4
if (*TBtShared)(unsafe.Pointer(pBt)).FusableSize-uint32(4) < nRem {
v3 = (*TBtShared)(unsafe.Pointer(pBt)).FusableSize - uint32(4)
} else {
v3 = nRem
}
nOut = v3
}
}
}
_releasePage(tls, pPageOut)
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
return *(*int32)(unsafe.Pointer(bp))
}
return r
}
// C documentation
//
// /*
// ** Delete the entry that the cursor is pointing to.
// **
// ** If the BTREE_SAVEPOSITION bit of the flags parameter is zero, then
// ** the cursor is left pointing at an arbitrary location after the delete.
// ** But if that bit is set, then the cursor is left in a state such that
// ** the next call to BtreeNext() or BtreePrev() moves it to the same row
// ** as it would have been on if the call to BtreeDelete() had been omitted.
// **
// ** The BTREE_AUXDELETE bit of flags indicates that is one of several deletes
// ** associated with a single table entry and its indexes. Only one of those
// ** deletes is considered the "primary" delete. The primary delete occurs
// ** on a cursor that is not a BTREE_FORDELETE cursor. All but one delete
// ** operation on non-FORDELETE cursors is tagged with the AUXDELETE flag.
// ** The BTREE_AUXDELETE bit is a hint that is not used by this implementation,
// ** but which might be used by alternative storage engines.
// */
func _sqlite3BtreeDelete(tls *libc.TLS, pCur uintptr, flags Tu8) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var bPreserve Tu8
var iCellDepth, iCellIdx, nCell int32
var n TPgno
var p, pBt, pCell, pLeaf, pPage, pTmp, v2 uintptr
var v1 Ti8
var _ /* info at bp+8 */ TCellInfo
var _ /* rc at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _ = bPreserve, iCellDepth, iCellIdx, n, nCell, p, pBt, pCell, pLeaf, pPage, pTmp, v1, v2
p = (*TBtCursor)(unsafe.Pointer(pCur)).FpBtree
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt /* Keep cursor valid. 2 for CURSOR_SKIPNEXT */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) >= int32(CURSOR_REQUIRESEEK) {
*(*int32)(unsafe.Pointer(bp)) = _btreeRestoreCursorPosition(tls, pCur)
if *(*int32)(unsafe.Pointer(bp)) != 0 || int32((*TBtCursor)(unsafe.Pointer(pCur)).FeState) != CURSOR_VALID {
return *(*int32)(unsafe.Pointer(bp))
}
} else {
return _sqlite3CorruptError(tls, int32(79889))
}
}
iCellDepth = int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)
iCellIdx = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix)
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) <= iCellIdx {
return _sqlite3CorruptError(tls, int32(79898))
}
pCell = (*TMemPage)(unsafe.Pointer(pPage)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*iCellIdx))))< int32((*TBtShared)(unsafe.Pointer(pBt)).FusableSize*libc.Uint32FromInt32(2)/libc.Uint32FromInt32(3)) || int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) == int32(1) {
/* A b-tree rebalance will be required after deleting this entry.
** Save the cursor key. */
*(*int32)(unsafe.Pointer(bp)) = _saveCursorKey(tls, pCur)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
} else {
bPreserve = uint8(2)
}
}
/* If the page containing the entry to delete is not a leaf page, move
** the cursor to the largest entry in the tree that is smaller than
** the entry being deleted. This cell will replace the cell being deleted
** from the internal node. The 'previous' entry is used for this instead
** of the 'next' entry, as the previous entry is always a part of the
** sub-tree headed by the child page of the cell being deleted. This makes
** balancing the tree following the delete operation easier. */
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3BtreePrevious(tls, pCur, 0)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
/* Save the positions of any other cursors open on this table before
** making any modifications. */
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FcurFlags)&int32(BTCF_Multiple) != 0 {
*(*int32)(unsafe.Pointer(bp)) = _saveAllCursors(tls, pBt, (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot, pCur)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
/* If this is a delete operation to remove a row from a table b-tree,
** invalidate any incrblob cursors open on the row being deleted. */
if (*TBtCursor)(unsafe.Pointer(pCur)).FpKeyInfo == uintptr(0) && (*TBtree)(unsafe.Pointer(p)).FhasIncrblobCur != 0 {
_invalidateIncrblobCursors(tls, p, (*TBtCursor)(unsafe.Pointer(pCur)).FpgnoRoot, (*TBtCursor)(unsafe.Pointer(pCur)).Finfo.FnKey, 0)
}
/* Make the page containing the entry to be deleted writable. Then free any
** overflow pages associated with the entry and finally remove the cell
** itself from within the page. */
*(*int32)(unsafe.Pointer(bp)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(pPage)).FpDbPage)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
(*(*func(*libc.TLS, uintptr, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(pPage)).FxParseCell})))(tls, pPage, pCell, bp+8)
if uint32((*(*TCellInfo)(unsafe.Pointer(bp + 8))).FnLocal) != (*(*TCellInfo)(unsafe.Pointer(bp + 8))).FnPayload {
*(*int32)(unsafe.Pointer(bp)) = _clearCellOverflow(tls, pPage, pCell, bp+8)
} else {
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK
}
_dropCell(tls, pPage, iCellIdx, int32((*(*TCellInfo)(unsafe.Pointer(bp + 8))).FnSize), bp)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
/* If the cell deleted was not located on a leaf page, then the cursor
** is currently pointing to the largest entry in the sub-tree headed
** by the child-page of the cell that was just deleted from an internal
** node. The cell from the leaf node needs to be moved to the internal
** node to replace the deleted cell. */
if !((*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0) {
pLeaf = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if (*TMemPage)(unsafe.Pointer(pLeaf)).FnFree < 0 {
*(*int32)(unsafe.Pointer(bp)) = _btreeComputeFreeSpace(tls, pLeaf)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
}
if iCellDepth < int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)-int32(1) {
n = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(iCellDepth+int32(1))*8)))).Fpgno
} else {
n = (*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).Fpgno
}
pCell = (*TMemPage)(unsafe.Pointer(pLeaf)).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(pLeaf)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pLeaf)).FaCellIdx + uintptr(int32(2)*(int32((*TMemPage)(unsafe.Pointer(pLeaf)).FnCell)-int32(1))))))< iCellDepth {
_releasePageNotNull(tls, (*TBtCursor)(unsafe.Pointer(pCur)).FpPage)
(*TBtCursor)(unsafe.Pointer(pCur)).FiPage--
for int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) > iCellDepth {
v2 = pCur + 84
v1 = *(*Ti8)(unsafe.Pointer(v2))
*(*Ti8)(unsafe.Pointer(v2))--
_releasePage(tls, *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr(v1)*8)))
}
(*TBtCursor)(unsafe.Pointer(pCur)).FpPage = *(*uintptr)(unsafe.Pointer(pCur + 144 + uintptr((*TBtCursor)(unsafe.Pointer(pCur)).FiPage)*8))
*(*int32)(unsafe.Pointer(bp)) = _balance(tls, pCur)
}
if *(*int32)(unsafe.Pointer(bp)) == SQLITE_OK {
if int32(bPreserve) > int32(1) {
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_SKIPNEXT)
if iCellIdx >= int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
(*TBtCursor)(unsafe.Pointer(pCur)).FskipNext = -int32(1)
(*TBtCursor)(unsafe.Pointer(pCur)).Fix = uint16(int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) - int32(1))
} else {
(*TBtCursor)(unsafe.Pointer(pCur)).FskipNext = int32(1)
}
} else {
*(*int32)(unsafe.Pointer(bp)) = _moveToRoot(tls, pCur)
if bPreserve != 0 {
_btreeReleaseAllCursorPages(tls, pCur)
(*TBtCursor)(unsafe.Pointer(pCur)).FeState = uint8(CURSOR_REQUIRESEEK)
}
if *(*int32)(unsafe.Pointer(bp)) == int32(SQLITE_EMPTY) {
*(*int32)(unsafe.Pointer(bp)) = SQLITE_OK
}
}
}
return *(*int32)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Create a new BTree table. Write into *piTable the page
// ** number for the root page of the new table.
// **
// ** The type of type is determined by the flags parameter. Only the
// ** following values of flags are currently in use. Other values for
// ** flags might not work:
// **
// ** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys
// ** BTREE_ZERODATA Used for SQL indices
// */
func _btreeCreateTable(tls *libc.TLS, p uintptr, piTable uintptr, createTabFlags int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var pBt uintptr
var ptfFlags int32
var _ /* eType at bp+32 */ Tu8
var _ /* iPtrPage at bp+36 */ TPgno
var _ /* pPageMove at bp+24 */ uintptr
var _ /* pRoot at bp+0 */ uintptr
var _ /* pgnoMove at bp+16 */ TPgno
var _ /* pgnoRoot at bp+8 */ TPgno
var _ /* rc at bp+12 */ int32
_, _ = pBt, ptfFlags
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt /* Page-type flags for the root page of new table */
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 { /* The page to move to. */
/* Creating a new table may probably require moving an existing database
** to make room for the new tables root page. In case this page turns
** out to be an overflow page, delete all overflow page-map caches
** held by open cursors.
*/
_invalidateAllOverflowCache(tls, pBt)
/* Read the value of meta[3] from the database to determine where the
** root page of the new table should go. meta[3] is the largest root-page
** created so far, so the new root-page is (meta[3]+1).
*/
_sqlite3BtreeGetMeta(tls, p, int32(BTREE_LARGEST_ROOT_PAGE), bp+8)
if *(*TPgno)(unsafe.Pointer(bp + 8)) > _btreePagecount(tls, pBt) {
return _sqlite3CorruptError(tls, int32(80112))
}
*(*TPgno)(unsafe.Pointer(bp + 8))++
/* The new root-page may not be allocated on a pointer-map page, or the
** PENDING_BYTE page.
*/
for *(*TPgno)(unsafe.Pointer(bp + 8)) == _ptrmapPageno(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 8))) || *(*TPgno)(unsafe.Pointer(bp + 8)) == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) {
*(*TPgno)(unsafe.Pointer(bp + 8))++
}
/* Allocate a page. The page that currently resides at pgnoRoot will
** be moved to the allocated page (unless the allocated page happens
** to reside at pgnoRoot).
*/
*(*int32)(unsafe.Pointer(bp + 12)) = _allocateBtreePage(tls, pBt, bp+24, bp+16, *(*TPgno)(unsafe.Pointer(bp + 8)), uint8(BTALLOC_EXACT))
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 12))
}
if *(*TPgno)(unsafe.Pointer(bp + 16)) != *(*TPgno)(unsafe.Pointer(bp + 8)) {
/* pgnoRoot is the page that will be used for the root-page of
** the new table (assuming an error did not occur). But we were
** allocated pgnoMove. If required (i.e. if it was not allocated
** by extending the file), the current page at position pgnoMove
** is already journaled.
*/
*(*Tu8)(unsafe.Pointer(bp + 32)) = uint8(0)
*(*TPgno)(unsafe.Pointer(bp + 36)) = uint32(0)
/* Save the positions of any open cursors. This is required in
** case they are holding a reference to an xFetch reference
** corresponding to page pgnoRoot. */
*(*int32)(unsafe.Pointer(bp + 12)) = _saveAllCursors(tls, pBt, uint32(0), uintptr(0))
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 12))
}
/* Move the page currently at pgnoRoot to pgnoMove. */
*(*int32)(unsafe.Pointer(bp + 12)) = _btreeGetPage(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 8)), bp, 0)
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 12))
}
*(*int32)(unsafe.Pointer(bp + 12)) = _ptrmapGet(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 8)), bp+32, bp+36)
if int32(*(*Tu8)(unsafe.Pointer(bp + 32))) == int32(PTRMAP_ROOTPAGE) || int32(*(*Tu8)(unsafe.Pointer(bp + 32))) == int32(PTRMAP_FREEPAGE) {
*(*int32)(unsafe.Pointer(bp + 12)) = _sqlite3CorruptError(tls, int32(80160))
}
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 12))
}
*(*int32)(unsafe.Pointer(bp + 12)) = _relocatePage(tls, pBt, *(*uintptr)(unsafe.Pointer(bp)), *(*Tu8)(unsafe.Pointer(bp + 32)), *(*TPgno)(unsafe.Pointer(bp + 36)), *(*TPgno)(unsafe.Pointer(bp + 16)), 0)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
/* Obtain the page at pgnoRoot */
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 12))
}
*(*int32)(unsafe.Pointer(bp + 12)) = _btreeGetPage(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 8)), bp, 0)
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp + 12))
}
*(*int32)(unsafe.Pointer(bp + 12)) = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
if *(*int32)(unsafe.Pointer(bp + 12)) != SQLITE_OK {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 12))
}
} else {
*(*uintptr)(unsafe.Pointer(bp)) = *(*uintptr)(unsafe.Pointer(bp + 24))
}
/* Update the pointer-map and meta-data with the new root-page number. */
_ptrmapPut(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 8)), uint8(PTRMAP_ROOTPAGE), uint32(0), bp+12)
if *(*int32)(unsafe.Pointer(bp + 12)) != 0 {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 12))
}
/* When the new root page was allocated, page 1 was made writable in
** order either to increase the database filesize, or to decrement the
** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail.
*/
*(*int32)(unsafe.Pointer(bp + 12)) = _sqlite3BtreeUpdateMeta(tls, p, int32(4), *(*TPgno)(unsafe.Pointer(bp + 8)))
if *(*int32)(unsafe.Pointer(bp + 12)) != 0 {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp)))
return *(*int32)(unsafe.Pointer(bp + 12))
}
} else {
*(*int32)(unsafe.Pointer(bp + 12)) = _allocateBtreePage(tls, pBt, bp, bp+8, uint32(1), uint8(0))
if *(*int32)(unsafe.Pointer(bp + 12)) != 0 {
return *(*int32)(unsafe.Pointer(bp + 12))
}
}
if createTabFlags&int32(BTREE_INTKEY) != 0 {
ptfFlags = libc.Int32FromInt32(PTF_INTKEY) | libc.Int32FromInt32(PTF_LEAFDATA) | libc.Int32FromInt32(PTF_LEAF)
} else {
ptfFlags = libc.Int32FromInt32(PTF_ZERODATA) | libc.Int32FromInt32(PTF_LEAF)
}
_zeroPage(tls, *(*uintptr)(unsafe.Pointer(bp)), ptfFlags)
_sqlite3PagerUnref(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage)
*(*TPgno)(unsafe.Pointer(piTable)) = *(*TPgno)(unsafe.Pointer(bp + 8))
return SQLITE_OK
}
func _sqlite3BtreeCreateTable(tls *libc.TLS, p uintptr, piTable uintptr, flags int32) (r int32) {
var rc int32
_ = rc
_sqlite3BtreeEnter(tls, p)
rc = _btreeCreateTable(tls, p, piTable, flags)
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** Erase the given database page and all its children. Return
// ** the page to the freelist.
// */
func _clearDatabasePage(tls *libc.TLS, pBt uintptr, pgno TPgno, freePageFlag int32, pnChange uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var hdr, i, v2 int32
var pCell uintptr
var _ /* info at bp+16 */ TCellInfo
var _ /* pPage at bp+0 */ uintptr
var _ /* rc at bp+8 */ int32
_, _, _, _ = hdr, i, pCell, v2
if pgno > _btreePagecount(tls, pBt) {
return _sqlite3CorruptError(tls, int32(80250))
}
*(*int32)(unsafe.Pointer(bp + 8)) = _getAndInitPage(tls, pBt, pgno, bp, 0)
if *(*int32)(unsafe.Pointer(bp + 8)) != 0 {
return *(*int32)(unsafe.Pointer(bp + 8))
}
if int32((*TBtShared)(unsafe.Pointer(pBt)).FopenFlags)&int32(BTREE_SINGLE) == 0 && _sqlite3PagerPageRefcount(tls, (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FpDbPage) != int32(1)+libc.BoolInt32(pgno == uint32(1)) {
*(*int32)(unsafe.Pointer(bp + 8)) = _sqlite3CorruptError(tls, int32(80257))
goto cleardatabasepage_out
}
hdr = int32((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FhdrOffset)
i = 0
for {
if !(i < int32((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnCell)) {
break
}
pCell = (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaData + uintptr(int32((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaCellIdx + uintptr(int32(2)*i))))< _btreePagecount(tls, pBt) {
return _sqlite3CorruptError(tls, int32(80361))
}
*(*int32)(unsafe.Pointer(bp)) = _sqlite3BtreeClearTable(tls, p, int32(iTable), uintptr(0))
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return *(*int32)(unsafe.Pointer(bp))
}
*(*int32)(unsafe.Pointer(bp)) = _btreeGetPage(tls, pBt, iTable, bp+8, 0)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
return *(*int32)(unsafe.Pointer(bp))
}
*(*int32)(unsafe.Pointer(piMoved)) = 0
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_sqlite3BtreeGetMeta(tls, p, int32(BTREE_LARGEST_ROOT_PAGE), bp+16)
if iTable == *(*TPgno)(unsafe.Pointer(bp + 16)) {
/* If the table being dropped is the table with the largest root-page
** number in the database, put the root page on the free list.
*/
_freePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp))
}
} else {
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
*(*int32)(unsafe.Pointer(bp)) = _btreeGetPage(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 16)), bp+24, 0)
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp))
}
*(*int32)(unsafe.Pointer(bp)) = _relocatePage(tls, pBt, *(*uintptr)(unsafe.Pointer(bp + 24)), uint8(PTRMAP_ROOTPAGE), uint32(0), iTable, 0)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp))
}
*(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0)
*(*int32)(unsafe.Pointer(bp)) = _btreeGetPage(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 16)), bp+24, 0)
_freePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24)), bp)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
if *(*int32)(unsafe.Pointer(bp)) != SQLITE_OK {
return *(*int32)(unsafe.Pointer(bp))
}
*(*int32)(unsafe.Pointer(piMoved)) = int32(*(*TPgno)(unsafe.Pointer(bp + 16)))
}
/* Set the new 'max-root-page' value in the database header. This
** is the old value less one, less one more if that happens to
** be a root-page number, less one again if that is the
** PENDING_BYTE_PAGE.
*/
*(*TPgno)(unsafe.Pointer(bp + 16))--
for *(*TPgno)(unsafe.Pointer(bp + 16)) == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize+libc.Uint32FromInt32(1) || _ptrmapPageno(tls, pBt, *(*TPgno)(unsafe.Pointer(bp + 16))) == *(*TPgno)(unsafe.Pointer(bp + 16)) {
*(*TPgno)(unsafe.Pointer(bp + 16))--
}
*(*int32)(unsafe.Pointer(bp)) = _sqlite3BtreeUpdateMeta(tls, p, int32(4), *(*TPgno)(unsafe.Pointer(bp + 16)))
} else {
_freePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), bp)
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
}
return *(*int32)(unsafe.Pointer(bp))
}
func _sqlite3BtreeDropTable(tls *libc.TLS, p uintptr, iTable int32, piMoved uintptr) (r int32) {
var rc int32
_ = rc
_sqlite3BtreeEnter(tls, p)
rc = _btreeDropTable(tls, p, uint32(iTable), piMoved)
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** This function may only be called if the b-tree connection already
// ** has a read or write transaction open on the database.
// **
// ** Read the meta-information out of a database file. Meta[0]
// ** is the number of free pages currently in the database. Meta[1]
// ** through meta[15] are available for use by higher layers. Meta[0]
// ** is read-only, the others are read/write.
// **
// ** The schema layer numbers meta values differently. At the schema
// ** layer (and the SetCookie and ReadCookie opcodes) the number of
// ** free pages is not visible. So Cookie[0] is the same as Meta[1].
// **
// ** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead
// ** of reading the value out of the header, it instead loads the "DataVersion"
// ** from the pager. The BTREE_DATA_VERSION value is not actually stored in the
// ** database file. It is a number computed by the pager. But its access
// ** pattern is the same as header meta values, and so it is convenient to
// ** read it from this routine.
// */
func _sqlite3BtreeGetMeta(tls *libc.TLS, p uintptr, idx int32, pMeta uintptr) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
if idx == int32(BTREE_DATA_VERSION) {
*(*Tu32)(unsafe.Pointer(pMeta)) = _sqlite3PagerDataVersion(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager) + (*TBtree)(unsafe.Pointer(p)).FiBDataVersion
} else {
*(*Tu32)(unsafe.Pointer(pMeta)) = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+uintptr(int32(36)+idx*int32(4)))
}
/* If auto-vacuum is disabled in this build and this is an auto-vacuum
** database, mark the database as read-only. */
_sqlite3BtreeLeave(tls, p)
}
// C documentation
//
// /*
// ** Write meta-information back into the database. Meta[0] is
// ** read-only and may not be written.
// */
func _sqlite3BtreeUpdateMeta(tls *libc.TLS, p uintptr, idx int32, iMeta Tu32) (r int32) {
var pBt, pP1 uintptr
var rc int32
_, _, _ = pBt, pP1, rc
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
_sqlite3BtreeEnter(tls, p)
pP1 = (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FpDbPage)
if rc == SQLITE_OK {
_sqlite3Put4byte(tls, pP1+uintptr(int32(36)+idx*int32(4)), iMeta)
if idx == int32(BTREE_INCR_VACUUM) {
(*TBtShared)(unsafe.Pointer(pBt)).FincrVacuum = uint8(iMeta)
}
}
_sqlite3BtreeLeave(tls, p)
return rc
}
// C documentation
//
// /*
// ** The first argument, pCur, is a cursor opened on some b-tree. Count the
// ** number of entries in the b-tree and write the result to *pnEntry.
// **
// ** SQLITE_OK is returned if the operation is successfully executed.
// ** Otherwise, if an error is encountered (i.e. an IO error or database
// ** corruption) an SQLite error code is returned.
// */
func _sqlite3BtreeCount(tls *libc.TLS, db uintptr, pCur uintptr, pnEntry uintptr) (r int32) {
var iIdx, rc int32
var nEntry Ti64
var pPage uintptr
_, _, _, _ = iIdx, nEntry, pPage, rc
nEntry = 0 /* Return code */
rc = _moveToRoot(tls, pCur)
if rc == int32(SQLITE_EMPTY) {
*(*Ti64)(unsafe.Pointer(pnEntry)) = 0
return SQLITE_OK
}
/* Unless an error occurs, the following loop runs one iteration for each
** page in the B-Tree structure (not including overflow pages).
*/
for rc == SQLITE_OK && !(libc.AtomicLoadPInt32(db+432) != 0) { /* Current page of the b-tree */
/* If this is a leaf page or the tree is not an int-key tree, then
** this page contains countable entries. Increment the entry counter
** accordingly.
*/
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 || !((*TMemPage)(unsafe.Pointer(pPage)).FintKey != 0) {
nEntry += int64((*TMemPage)(unsafe.Pointer(pPage)).FnCell)
}
/* pPage is a leaf node. This loop navigates the cursor so that it
** points to the first interior cell that it points to the parent of
** the next page in the tree that has not yet been visited. The
** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell
** of the page, or to the number of cells in the page if the next page
** to visit is the right-child of its parent.
**
** If all pages in the tree have been visited, return SQLITE_OK to the
** caller.
*/
if (*TMemPage)(unsafe.Pointer(pPage)).Fleaf != 0 {
for cond := true; cond; cond = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix) >= int32((*TMemPage)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpPage)).FnCell) {
if int32((*TBtCursor)(unsafe.Pointer(pCur)).FiPage) == 0 {
/* All pages of the b-tree have been visited. Return successfully. */
*(*Ti64)(unsafe.Pointer(pnEntry)) = nEntry
return _moveToRoot(tls, pCur)
}
_moveToParent(tls, pCur)
}
(*TBtCursor)(unsafe.Pointer(pCur)).Fix++
pPage = (*TBtCursor)(unsafe.Pointer(pCur)).FpPage
}
/* Descend to the child node of the cell that the cursor currently
** points at. This is the right-child if (iIdx==pPage->nCell).
*/
iIdx = int32((*TBtCursor)(unsafe.Pointer(pCur)).Fix)
if iIdx == int32((*TMemPage)(unsafe.Pointer(pPage)).FnCell) {
rc = _moveToChild(tls, pCur, _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FhdrOffset)+int32(8))))
} else {
rc = _moveToChild(tls, pCur, _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer(pPage)).FaData+uintptr(int32((*TMemPage)(unsafe.Pointer(pPage)).FmaskPage)&(int32(*(*Tu8)(unsafe.Pointer((*TMemPage)(unsafe.Pointer(pPage)).FaCellIdx + uintptr(int32(2)*iIdx))))<zErrMsg.
// ** Return 1 if there are 2 or more references to the page and 0 if
// ** if this is the first reference to the page.
// **
// ** Also check that the page number is in bounds.
// */
func _checkRef(tls *libc.TLS, pCheck uintptr, iPage TPgno) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
if iPage > (*TIntegrityCk)(unsafe.Pointer(pCheck)).FnCkPage || iPage == uint32(0) {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4207, libc.VaList(bp+8, iPage))
return int32(1)
}
if _getPageReferenced(tls, pCheck, iPage) != 0 {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4230, libc.VaList(bp+8, iPage))
return int32(1)
}
_setPageReferenced(tls, pCheck, iPage)
return 0
}
// C documentation
//
// /*
// ** Check that the entry in the pointer-map for page iChild maps to
// ** page iParent, pointer type ptrType. If not, append an error message
// ** to pCheck.
// */
func _checkPtrmap(tls *libc.TLS, pCheck uintptr, iChild TPgno, eType Tu8, iParent TPgno) {
bp := tls.Alloc(64)
defer tls.Free(64)
var rc int32
var _ /* ePtrmapType at bp+0 */ Tu8
var _ /* iPtrmapParent at bp+4 */ TPgno
_ = rc
rc = _ptrmapGet(tls, (*TIntegrityCk)(unsafe.Pointer(pCheck)).FpBt, iChild, bp, bp+4)
if rc != SQLITE_OK {
if rc == int32(SQLITE_NOMEM) || rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(12)< (*TBtShared)(unsafe.Pointer((*TIntegrityCk)(unsafe.Pointer(pCheck)).FpBt)).FusableSize/uint32(4)-uint32(2) {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4360, libc.VaList(bp+16, iPage))
N--
} else {
i = 0
for {
if !(i < int32(n)) {
break
}
iFreePage = _sqlite3Get4byte(tls, pOvflData+uintptr(int32(8)+i*int32(4)))
if (*TBtShared)(unsafe.Pointer((*TIntegrityCk)(unsafe.Pointer(pCheck)).FpBt)).FautoVacuum != 0 {
_checkPtrmap(tls, pCheck, iFreePage, uint8(PTRMAP_FREEPAGE), uint32(0))
}
_checkRef(tls, pCheck, iFreePage)
goto _1
_1:
;
i++
}
N -= n
}
} else {
/* If this database supports auto-vacuum and iPage is not the last
** page in this overflow list, check that the pointer-map entry for
** the following page matches iPage.
*/
if (*TBtShared)(unsafe.Pointer((*TIntegrityCk)(unsafe.Pointer(pCheck)).FpBt)).FautoVacuum != 0 && N > uint32(0) {
i = int32(_sqlite3Get4byte(tls, pOvflData))
_checkPtrmap(tls, pCheck, uint32(i), uint8(PTRMAP_OVERFLOW2), iPage)
}
}
iPage = _sqlite3Get4byte(tls, pOvflData)
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
if N != 0 && nErrAtStart == (*TIntegrityCk)(unsafe.Pointer(pCheck)).FnErr {
if isFreeList != 0 {
v2 = __ccgo_ts + 4399
} else {
v2 = __ccgo_ts + 4404
}
_checkAppendMsg(tls, pCheck, __ccgo_ts+4425, libc.VaList(bp+16, v2, expected-N, expected))
}
}
// C documentation
//
// /*
// ** An implementation of a min-heap.
// **
// ** aHeap[0] is the number of elements on the heap. aHeap[1] is the
// ** root element. The daughter nodes of aHeap[N] are aHeap[N*2]
// ** and aHeap[N*2+1].
// **
// ** The heap property is this: Every node is less than or equal to both
// ** of its daughter nodes. A consequence of the heap property is that the
// ** root node aHeap[1] is always the minimum value currently in the heap.
// **
// ** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto
// ** the heap, preserving the heap property. The btreeHeapPull() routine
// ** removes the root element from the heap (the minimum value in the heap)
// ** and then moves other nodes around as necessary to preserve the heap
// ** property.
// **
// ** This heap is used for cell overlap and coverage testing. Each u32
// ** entry represents the span of a cell or freeblock on a btree page.
// ** The upper 16 bits are the index of the first byte of a range and the
// ** lower 16 bits are the index of the last byte of that range.
// */
func _btreeHeapInsert(tls *libc.TLS, aHeap uintptr, x Tu32) {
var i, j, v1, v3 Tu32
var v2 uintptr
_, _, _, _, _ = i, j, v1, v2, v3
v2 = aHeap
*(*Tu32)(unsafe.Pointer(v2))++
v1 = *(*Tu32)(unsafe.Pointer(v2))
i = v1
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4)) = x
for {
v3 = i / libc.Uint32FromInt32(2)
j = v3
if !(v3 > uint32(0) && *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4)) > *(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4))) {
break
}
x = *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4))
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4)) = *(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4))
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4)) = x
i = j
}
}
func _btreeHeapPull(tls *libc.TLS, aHeap uintptr, pOut uintptr) (r int32) {
var i, j, x, v1, v2 Tu32
_, _, _, _, _ = i, j, x, v1, v2
v1 = *(*Tu32)(unsafe.Pointer(aHeap))
x = v1
if v1 == uint32(0) {
return 0
}
*(*Tu32)(unsafe.Pointer(pOut)) = *(*Tu32)(unsafe.Pointer(aHeap + 1*4))
*(*Tu32)(unsafe.Pointer(aHeap + 1*4)) = *(*Tu32)(unsafe.Pointer(aHeap + uintptr(x)*4))
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(x)*4)) = uint32(0xffffffff)
*(*Tu32)(unsafe.Pointer(aHeap))--
i = uint32(1)
for {
v2 = i * libc.Uint32FromInt32(2)
j = v2
if !(v2 <= *(*Tu32)(unsafe.Pointer(aHeap))) {
break
}
if *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4)) > *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j+uint32(1))*4)) {
j++
}
if *(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4)) < *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4)) {
break
}
x = *(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4))
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(i)*4)) = *(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4))
*(*Tu32)(unsafe.Pointer(aHeap + uintptr(j)*4)) = x
i = j
}
return int32(1)
}
// C documentation
//
// /*
// ** Do various sanity checks on a single page of a tree. Return
// ** the tree depth. Root pages return 0. Parents of root pages
// ** return 1, and so forth.
// **
// ** These checks are done:
// **
// ** 1. Make sure that cells and freeblocks do not overlap
// ** but combine to completely cover the page.
// ** 2. Make sure integer cell keys are in order.
// ** 3. Check the integrity of overflow pages.
// ** 4. Recursively call checkTreePage on all children.
// ** 5. Verify that the depth of all children is the same.
// */
func _checkTreePage(tls *libc.TLS, pCheck uintptr, iPage TPgno, piMinKey uintptr, _maxKey Ti64) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
*(*Ti64)(unsafe.Pointer(bp)) = _maxKey
var cellStart, d2, depth, doCoverageCheck, hdr, i, j, keyCanBeEqual, nCell, nFrag, pgno, rc, saved_v1, saved_v2, size1, v1, v2, v3, v5 int32
var contentOffset, nPage, pc, prev, size, usableSize Tu32
var data, heap, pBt, pCell, pCellIdx, saved_zPfx uintptr
var pgnoOvfl TPgno
var savedIsInit Tu8
var _ /* info at bp+24 */ TCellInfo
var _ /* pPage at bp+8 */ uintptr
var _ /* x at bp+16 */ Tu32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = cellStart, contentOffset, d2, data, depth, doCoverageCheck, hdr, heap, i, j, keyCanBeEqual, nCell, nFrag, nPage, pBt, pCell, pCellIdx, pc, pgno, pgnoOvfl, prev, rc, savedIsInit, saved_v1, saved_v2, saved_zPfx, size, size1, usableSize, v1, v2, v3, v5
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0) /* Result code from subroutine call */
depth = -int32(1) /* Number of cells */
doCoverageCheck = int32(1) /* True if cell coverage checking should be done */
keyCanBeEqual = int32(1) /* Offset to the start of the cell content area */
heap = uintptr(0)
prev = uint32(0) /* Next and previous entry on the min-heap */
saved_zPfx = (*TIntegrityCk)(unsafe.Pointer(pCheck)).FzPfx
saved_v1 = int32((*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv1)
saved_v2 = (*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv2
savedIsInit = uint8(0)
/* Check that the page exists
*/
_checkProgress(tls, pCheck)
if (*TIntegrityCk)(unsafe.Pointer(pCheck)).FmxErr == 0 {
goto end_of_check
}
pBt = (*TIntegrityCk)(unsafe.Pointer(pCheck)).FpBt
usableSize = (*TBtShared)(unsafe.Pointer(pBt)).FusableSize
if iPage == uint32(0) {
return 0
}
if _checkRef(tls, pCheck, iPage) != 0 {
return 0
}
(*TIntegrityCk)(unsafe.Pointer(pCheck)).FzPfx = __ccgo_ts + 4451
(*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv1 = iPage
v1 = _btreeGetPage(tls, pBt, iPage, bp+8, 0)
rc = v1
if v1 != 0 {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4469, libc.VaList(bp+56, rc))
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(12)<= 0 && (*TIntegrityCk)(unsafe.Pointer(pCheck)).FmxErr != 0) {
break
}
/* Check cell size */
(*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv2 = i
pc = uint32(int32(*(*Tu8)(unsafe.Pointer(pCellIdx)))< usableSize-uint32(4) {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4623, libc.VaList(bp+56, pc, contentOffset, usableSize-uint32(4)))
doCoverageCheck = 0
goto _4
}
pCell = data + uintptr(pc)
(*(*func(*libc.TLS, uintptr, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FxParseCell})))(tls, *(*uintptr)(unsafe.Pointer(bp + 8)), pCell, bp+24)
if pc+uint32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnSize) > usableSize {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4653, 0)
doCoverageCheck = 0
goto _4
}
/* Check for integer primary key out of range */
if (*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FintKey != 0 {
if keyCanBeEqual != 0 {
v5 = libc.BoolInt32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnKey > *(*Ti64)(unsafe.Pointer(bp)))
} else {
v5 = libc.BoolInt32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnKey >= *(*Ti64)(unsafe.Pointer(bp)))
}
if v5 != 0 {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4677, libc.VaList(bp+56, (*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnKey))
}
*(*Ti64)(unsafe.Pointer(bp)) = (*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnKey
keyCanBeEqual = 0 /* Only the first key on the page may ==maxKey */
}
/* Check the content overflow list */
if (*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnPayload > uint32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnLocal) { /* First page of the overflow chain */
nPage = ((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnPayload - uint32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnLocal) + usableSize - uint32(5)) / (usableSize - uint32(4))
pgnoOvfl = _sqlite3Get4byte(tls, pCell+uintptr(int32((*(*TCellInfo)(unsafe.Pointer(bp + 24))).FnSize)-int32(4)))
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_checkPtrmap(tls, pCheck, pgnoOvfl, uint8(PTRMAP_OVERFLOW1), iPage)
}
_checkList(tls, pCheck, 0, pgnoOvfl, nPage)
}
if !((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).Fleaf != 0) {
/* Check sanity of left child page for internal pages */
pgno = int32(_sqlite3Get4byte(tls, pCell))
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
_checkPtrmap(tls, pCheck, uint32(pgno), uint8(PTRMAP_BTREE), iPage)
}
d2 = _checkTreePage(tls, pCheck, uint32(pgno), bp, *(*Ti64)(unsafe.Pointer(bp)))
keyCanBeEqual = 0
if d2 != depth {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4701, 0)
depth = d2
}
} else {
/* Populate the coverage-checking heap for leaf pages */
_btreeHeapInsert(tls, heap, pc< 0 {
/* For leaf pages, the min-heap has already been initialized and the
** cells have already been inserted. But for internal pages, that has
** not yet been done, so do it now */
if !((*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).Fleaf != 0) {
heap = (*TIntegrityCk)(unsafe.Pointer(pCheck)).Fheap
*(*Tu32)(unsafe.Pointer(heap)) = uint32(0)
i = nCell - int32(1)
for {
if !(i >= 0) {
break
}
pc = uint32(int32(*(*Tu8)(unsafe.Pointer(data + uintptr(cellStart+i*int32(2)))))< 0 {
/* Enforced by btreeComputeFreeSpace() */
size1 = int32(*(*Tu8)(unsafe.Pointer(data + uintptr(i+int32(2)))))<= *(*Tu32)(unsafe.Pointer(bp + 16))>>libc.Int32FromInt32(16) {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4726, libc.VaList(bp+56, *(*Tu32)(unsafe.Pointer(bp + 16))>>int32(16), iPage))
break
} else {
nFrag = int32(uint32(nFrag) + (*(*Tu32)(unsafe.Pointer(bp + 16))>>libc.Int32FromInt32(16) - prev&libc.Uint32FromInt32(0xffff) - libc.Uint32FromInt32(1)))
prev = *(*Tu32)(unsafe.Pointer(bp + 16))
}
}
nFrag = int32(uint32(nFrag) + (usableSize - prev&libc.Uint32FromInt32(0xffff) - libc.Uint32FromInt32(1)))
/* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments
** is stored in the fifth field of the b-tree page header.
** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the
** number of fragmented free bytes within the cell content area.
*/
if *(*Tu32)(unsafe.Pointer(heap)) == uint32(0) && nFrag != int32(*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(7))))) {
_checkAppendMsg(tls, pCheck, __ccgo_ts+4763, libc.VaList(bp+56, nFrag, int32(*(*Tu8)(unsafe.Pointer(data + uintptr(hdr+int32(7))))), iPage))
}
}
goto end_of_check
end_of_check:
;
if !(doCoverageCheck != 0) {
(*TMemPage)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 8)))).FisInit = savedIsInit
}
_releasePage(tls, *(*uintptr)(unsafe.Pointer(bp + 8)))
(*TIntegrityCk)(unsafe.Pointer(pCheck)).FzPfx = saved_zPfx
(*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv1 = uint32(saved_v1)
(*TIntegrityCk)(unsafe.Pointer(pCheck)).Fv2 = saved_v2
return depth + int32(1)
}
// C documentation
//
// /*
// ** This routine does a complete check of the given BTree file. aRoot[] is
// ** an array of pages numbers were each page number is the root page of
// ** a table. nRoot is the number of entries in aRoot.
// **
// ** A read-only or read-write transaction must be opened before calling
// ** this function.
// **
// ** Write the number of error seen in *pnErr. Except for some memory
// ** allocation errors, an error message held in memory obtained from
// ** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is
// ** returned. If a memory allocation error occurs, NULL is returned.
// **
// ** If the first entry in aRoot[] is 0, that indicates that the list of
// ** root pages is incomplete. This is a "partial integrity-check". This
// ** happens when performing an integrity check on a single table. The
// ** zero is skipped, of course. But in addition, the freelist checks
// ** and the checks to make sure every page is referenced are also skipped,
// ** since obviously it is not possible to know which pages are covered by
// ** the unverified btrees. Except, if aRoot[1] is 1, then the freelist
// ** checks are still performed.
// */
func _sqlite3BtreeIntegrityCheck(tls *libc.TLS, db uintptr, p uintptr, aRoot uintptr, nRoot int32, mxErr int32, pnErr uintptr, pzOut uintptr) (r int32) {
bp := tls.Alloc(256)
defer tls.Free(256)
var bCkFreelist, bPartial int32
var i, mx, mxInHdr TPgno
var pBt uintptr
var savedDbFlags Tu64
var _ /* notUsed at bp+224 */ Ti64
var _ /* sCheck at bp+0 */ TIntegrityCk
var _ /* zErr at bp+120 */ [100]int8
_, _, _, _, _, _, _ = bCkFreelist, bPartial, i, mx, mxInHdr, pBt, savedDbFlags
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
savedDbFlags = (*Tsqlite3)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).Fdb)).Fflags
bPartial = 0 /* True if not checking all btrees */
bCkFreelist = int32(1) /* True to scan the freelist */
/* aRoot[0]==0 means this is a partial check */
if *(*TPgno)(unsafe.Pointer(aRoot)) == uint32(0) {
bPartial = int32(1)
if *(*TPgno)(unsafe.Pointer(aRoot + 1*4)) != uint32(1) {
bCkFreelist = 0
}
}
_sqlite3BtreeEnter(tls, p)
libc.Xmemset(tls, bp, 0, uint64(120))
(*(*TIntegrityCk)(unsafe.Pointer(bp))).Fdb = db
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FpBt = pBt
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FpPager = (*TBtShared)(unsafe.Pointer(pBt)).FpPager
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FnCkPage = _btreePagecount(tls, (*(*TIntegrityCk)(unsafe.Pointer(bp))).FpBt)
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FmxErr = mxErr
_sqlite3StrAccumInit(tls, bp+72, uintptr(0), bp+120, int32(100), int32(SQLITE_MAX_LENGTH))
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FerrMsg.FprintfFlags = uint8(SQLITE_PRINTF_INTERNAL)
if (*(*TIntegrityCk)(unsafe.Pointer(bp))).FnCkPage == uint32(0) {
goto integrity_ck_cleanup
}
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FaPgRef = _sqlite3MallocZero(tls, uint64((*(*TIntegrityCk)(unsafe.Pointer(bp))).FnCkPage/uint32(8)+uint32(1)))
if !((*(*TIntegrityCk)(unsafe.Pointer(bp))).FaPgRef != 0) {
_checkOom(tls, bp)
goto integrity_ck_cleanup
}
(*(*TIntegrityCk)(unsafe.Pointer(bp))).Fheap = _sqlite3PageMalloc(tls, int32((*TBtShared)(unsafe.Pointer(pBt)).FpageSize))
if (*(*TIntegrityCk)(unsafe.Pointer(bp))).Fheap == uintptr(0) {
_checkOom(tls, bp)
goto integrity_ck_cleanup
}
i = uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer(pBt)).FpageSize + libc.Uint32FromInt32(1)
if i <= (*(*TIntegrityCk)(unsafe.Pointer(bp))).FnCkPage {
_setPageReferenced(tls, bp, i)
}
/* Check the integrity of the freelist
*/
if bCkFreelist != 0 {
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FzPfx = __ccgo_ts + 4815
_checkList(tls, bp, int32(1), _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+32), _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+36))
(*(*TIntegrityCk)(unsafe.Pointer(bp))).FzPfx = uintptr(0)
}
/* Check all the tables.
*/
if !(bPartial != 0) {
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 {
mx = uint32(0)
i = uint32(0)
for {
if !(int32(i) < nRoot) {
break
}
if mx < *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4)) {
mx = *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4))
}
goto _1
_1:
;
i++
}
mxInHdr = _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+52)
if mx != mxInHdr {
_checkAppendMsg(tls, bp, __ccgo_ts+4826, libc.VaList(bp+240, mx, mxInHdr))
}
} else {
if _sqlite3Get4byte(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData+64) != uint32(0) {
_checkAppendMsg(tls, bp, __ccgo_ts+4871, 0)
}
}
}
*(*Tu64)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).Fdb + 48)) &= ^libc.Uint64FromInt32(SQLITE_CellSizeCk)
i = uint32(0)
for {
if !(int32(i) < nRoot && (*(*TIntegrityCk)(unsafe.Pointer(bp))).FmxErr != 0) {
break
}
if *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4)) == uint32(0) {
goto _2
}
if (*TBtShared)(unsafe.Pointer(pBt)).FautoVacuum != 0 && *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4)) > uint32(1) && !(bPartial != 0) {
_checkPtrmap(tls, bp, *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4)), uint8(PTRMAP_ROOTPAGE), uint32(0))
}
(*(*TIntegrityCk)(unsafe.Pointer(bp))).Fv0 = *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4))
_checkTreePage(tls, bp, *(*TPgno)(unsafe.Pointer(aRoot + uintptr(i)*4)), bp+224, libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= int32(CURSOR_REQUIRESEEK) {
v1 = _btreeRestoreCursorPosition(tls, pCsr)
} else {
v1 = SQLITE_OK
}
rc = v1
if rc != SQLITE_OK {
return rc
}
if int32((*TBtCursor)(unsafe.Pointer(pCsr)).FeState) != CURSOR_VALID {
return int32(SQLITE_ABORT)
}
/* Save the positions of all other cursors open on this table. This is
** required in case any of them are holding references to an xFetch
** version of the b-tree page modified by the accessPayload call below.
**
** Note that pCsr must be open on a INTKEY table and saveCursorPosition()
** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence
** saveAllCursors can only return SQLITE_OK.
*/
_saveAllCursors(tls, (*TBtCursor)(unsafe.Pointer(pCsr)).FpBt, (*TBtCursor)(unsafe.Pointer(pCsr)).FpgnoRoot, pCsr)
/* Check some assumptions:
** (a) the cursor is open for writing,
** (b) there is a read/write transaction open,
** (c) the connection holds a write-lock on the table (if required),
** (d) there are no conflicting read-locks, and
** (e) the cursor points at a valid row of an intKey table.
*/
if int32((*TBtCursor)(unsafe.Pointer(pCsr)).FcurFlags)&int32(BTCF_WriteFlag) == 0 {
return int32(SQLITE_READONLY)
}
return _accessPayload(tls, pCsr, offset, amt, z, int32(1))
}
// C documentation
//
// /*
// ** Mark this cursor as an incremental blob cursor.
// */
func _sqlite3BtreeIncrblobCursor(tls *libc.TLS, pCur uintptr) {
var p1 uintptr
_ = p1
p1 = pCur + 1
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(BTCF_Incrblob))
(*TBtree)(unsafe.Pointer((*TBtCursor)(unsafe.Pointer(pCur)).FpBtree)).FhasIncrblobCur = uint8(1)
}
// C documentation
//
// /*
// ** Set both the "read version" (single byte at byte offset 18) and
// ** "write version" (single byte at byte offset 19) fields in the database
// ** header to iVersion.
// */
func _sqlite3BtreeSetVersion(tls *libc.TLS, pBtree uintptr, iVersion int32) (r int32) {
var aData, pBt, p1, p2, p3 uintptr
var rc int32
_, _, _, _, _, _ = aData, pBt, rc, p1, p2, p3
pBt = (*TBtree)(unsafe.Pointer(pBtree)).FpBt /* Return code */
/* If setting the version fields to 1, do not automatically open the
** WAL connection, even if the version fields are currently set to 2.
*/
p1 = pBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(BTS_NO_WAL))
if iVersion == int32(1) {
p2 = pBt + 40
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(BTS_NO_WAL))
}
rc = _sqlite3BtreeBeginTrans(tls, pBtree, 0, uintptr(0))
if rc == SQLITE_OK {
aData = (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FaData
if int32(*(*Tu8)(unsafe.Pointer(aData + 18))) != int32(uint8(iVersion)) || int32(*(*Tu8)(unsafe.Pointer(aData + 19))) != int32(uint8(iVersion)) {
rc = _sqlite3BtreeBeginTrans(tls, pBtree, int32(2), uintptr(0))
if rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, (*TMemPage)(unsafe.Pointer((*TBtShared)(unsafe.Pointer(pBt)).FpPage1)).FpDbPage)
if rc == SQLITE_OK {
*(*Tu8)(unsafe.Pointer(aData + 18)) = uint8(iVersion)
*(*Tu8)(unsafe.Pointer(aData + 19)) = uint8(iVersion)
}
}
}
}
p3 = pBt + 40
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(BTS_NO_WAL))
return rc
}
// C documentation
//
// /*
// ** Return true if the cursor has a hint specified. This routine is
// ** only used from within assert() statements
// */
func _sqlite3BtreeCursorHasHint(tls *libc.TLS, pCsr uintptr, mask uint32) (r int32) {
return libc.BoolInt32(uint32((*TBtCursor)(unsafe.Pointer(pCsr)).Fhints)&mask != uint32(0))
}
// C documentation
//
// /*
// ** Return true if the given Btree is read-only.
// */
func _sqlite3BtreeIsReadonly(tls *libc.TLS, p uintptr) (r int32) {
return libc.BoolInt32(int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FbtsFlags)&int32(BTS_READ_ONLY) != 0)
}
// C documentation
//
// /*
// ** Return the size of the header added to each page by this module.
// */
func _sqlite3HeaderSizeBtree(tls *libc.TLS) (r int32) {
return int32((libc.Uint64FromInt64(136) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
}
// C documentation
//
// /*
// ** If no transaction is active and the database is not a temp-db, clear
// ** the in-memory pager cache.
// */
func _sqlite3BtreeClearCache(tls *libc.TLS, p uintptr) {
var pBt uintptr
_ = pBt
pBt = (*TBtree)(unsafe.Pointer(p)).FpBt
if int32((*TBtShared)(unsafe.Pointer(pBt)).FinTransaction) == TRANS_NONE {
_sqlite3PagerClearCache(tls, (*TBtShared)(unsafe.Pointer(pBt)).FpPager)
}
}
// C documentation
//
// /*
// ** Return true if the Btree passed as the only argument is sharable.
// */
func _sqlite3BtreeSharable(tls *libc.TLS, p uintptr) (r int32) {
return int32((*TBtree)(unsafe.Pointer(p)).Fsharable)
}
// C documentation
//
// /*
// ** Return the number of connections to the BtShared object accessed by
// ** the Btree handle passed as the only argument. For private caches
// ** this is always 1. For shared caches it may be 1 or greater.
// */
func _sqlite3BtreeConnectionCount(tls *libc.TLS, p uintptr) (r int32) {
return (*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer(p)).FpBt)).FnRef
}
/************** End of btree.c ***********************************************/
/************** Begin file backup.c ******************************************/
/*
** 2009 January 28
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains the implementation of the sqlite3_backup_XXX()
** API functions and the related features.
*/
/* #include "sqliteInt.h" */
/* #include "btreeInt.h" */
/*
** Structure allocated for each backup operation.
*/
type Tsqlite3_backup1 = struct {
FpDestDb uintptr
FpDest uintptr
FiDestSchema Tu32
FbDestLocked int32
FiNext TPgno
FpSrcDb uintptr
FpSrc uintptr
Frc int32
FnRemaining TPgno
FnPagecount TPgno
FisAttached int32
FpNext uintptr
}
type sqlite3_backup1 = Tsqlite3_backup1
/*
** THREAD SAFETY NOTES:
**
** Once it has been created using backup_init(), a single sqlite3_backup
** structure may be accessed via two groups of thread-safe entry points:
**
** * Via the sqlite3_backup_XXX() API function backup_step() and
** backup_finish(). Both these functions obtain the source database
** handle mutex and the mutex associated with the source BtShared
** structure, in that order.
**
** * Via the BackupUpdate() and BackupRestart() functions, which are
** invoked by the pager layer to report various state changes in
** the page cache associated with the source database. The mutex
** associated with the source database BtShared structure will always
** be held when either of these functions are invoked.
**
** The other sqlite3_backup_XXX() API functions, backup_remaining() and
** backup_pagecount() are not thread-safe functions. If they are called
** while some other thread is calling backup_step() or backup_finish(),
** the values returned may be invalid. There is no way for a call to
** BackupUpdate() or BackupRestart() to interfere with backup_remaining()
** or backup_pagecount().
**
** Depending on the SQLite configuration, the database handles and/or
** the Btree objects may have their own mutexes that require locking.
** Non-sharable Btrees (in-memory databases for example), do not have
** associated mutexes.
*/
// C documentation
//
// /*
// ** Return a pointer corresponding to database zDb (i.e. "main", "temp")
// ** in connection handle pDb. If such a database cannot be found, return
// ** a NULL pointer and write an error message to pErrorDb.
// **
// ** If the "temp" database is requested, it may need to be opened by this
// ** function. If an error occurs while doing so, return 0 and write an
// ** error message to pErrorDb.
// */
func _findBtree(tls *libc.TLS, pErrorDb uintptr, pDb uintptr, zDb uintptr) (r uintptr) {
bp := tls.Alloc(448)
defer tls.Free(448)
var i, rc int32
var _ /* sParse at bp+0 */ TParse
_, _ = i, rc
i = _sqlite3FindDbName(tls, pDb, zDb)
if i == int32(1) {
rc = 0
_sqlite3ParseObjectInit(tls, bp, pDb)
if _sqlite3OpenTempDatabase(tls, bp) != 0 {
_sqlite3ErrorWithMsg(tls, pErrorDb, (*(*TParse)(unsafe.Pointer(bp))).Frc, __ccgo_ts+3797, libc.VaList(bp+432, (*(*TParse)(unsafe.Pointer(bp))).FzErrMsg))
rc = int32(SQLITE_ERROR)
}
_sqlite3DbFree(tls, pErrorDb, (*(*TParse)(unsafe.Pointer(bp))).FzErrMsg)
_sqlite3ParseObjectReset(tls, bp)
if rc != 0 {
return uintptr(0)
}
}
if i < 0 {
_sqlite3ErrorWithMsg(tls, pErrorDb, int32(SQLITE_ERROR), __ccgo_ts+4978, libc.VaList(bp+432, zDb))
return uintptr(0)
}
return (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(pDb)).FaDb + uintptr(i)*32))).FpBt
}
// C documentation
//
// /*
// ** Attempt to set the page size of the destination to match the page size
// ** of the source.
// */
func _setDestPgsz(tls *libc.TLS, p uintptr) (r int32) {
var rc int32
_ = rc
rc = _sqlite3BtreeSetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest, _sqlite3BtreeGetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc), 0, 0)
return rc
}
// C documentation
//
// /*
// ** Check that there is no open read-transaction on the b-tree passed as the
// ** second argument. If there is not, return SQLITE_OK. Otherwise, if there
// ** is an open read-transaction, return SQLITE_ERROR and leave an error
// ** message in database handle db.
// */
func _checkReadTransaction(tls *libc.TLS, db uintptr, p uintptr) (r int32) {
if _sqlite3BtreeTxnState(tls, p) != SQLITE_TXN_NONE {
_sqlite3ErrorWithMsg(tls, db, int32(SQLITE_ERROR), __ccgo_ts+4998, 0)
return int32(SQLITE_ERROR)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Create an sqlite3_backup process to copy the contents of zSrcDb from
// ** connection handle pSrcDb to zDestDb in pDestDb. If successful, return
// ** a pointer to the new sqlite3_backup object.
// **
// ** If an error occurs, NULL is returned and an error code and error message
// ** stored in database handle pDestDb.
// */
func Xsqlite3_backup_init(tls *libc.TLS, pDestDb uintptr, zDestDb uintptr, pSrcDb uintptr, zSrcDb uintptr) (r uintptr) {
var p uintptr
_ = p /* Value to return */
/* Lock the source database handle. The destination database
** handle is not locked in this routine, but it is locked in
** sqlite3_backup_step(). The user is required to ensure that no
** other thread accesses the destination handle for the duration
** of the backup operation. Any attempt to use the destination
** database connection while a backup is in progress may cause
** a malfunction or a deadlock.
*/
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(pSrcDb)).Fmutex)
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(pDestDb)).Fmutex)
if pSrcDb == pDestDb {
_sqlite3ErrorWithMsg(tls, pDestDb, int32(SQLITE_ERROR), __ccgo_ts+5029, 0)
p = uintptr(0)
} else {
/* Allocate space for a new sqlite3_backup object...
** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a
** call to sqlite3_backup_init() and is destroyed by a call to
** sqlite3_backup_finish(). */
p = _sqlite3MallocZero(tls, uint64(72))
if !(p != 0) {
_sqlite3Error(tls, pDestDb, int32(SQLITE_NOMEM))
}
}
/* If the allocation succeeded, populate the new object. */
if p != 0 {
(*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc = _findBtree(tls, pDestDb, pSrcDb, zSrcDb)
(*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest = _findBtree(tls, pDestDb, pDestDb, zDestDb)
(*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb = pDestDb
(*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrcDb = pSrcDb
(*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext = uint32(1)
(*Tsqlite3_backup)(unsafe.Pointer(p)).FisAttached = 0
if uintptr(0) == (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc || uintptr(0) == (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest || _checkReadTransaction(tls, pDestDb, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest) != SQLITE_OK {
/* One (or both) of the named databases did not exist or an OOM
** error was hit. Or there is a transaction open on the destination
** database. The error has already been written into the pDestDb
** handle. All that is left to do here is free the sqlite3_backup
** structure. */
Xsqlite3_free(tls, p)
p = uintptr(0)
}
}
if p != 0 {
(*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)).FnBackup++
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(pDestDb)).Fmutex)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(pSrcDb)).Fmutex)
return p
}
// C documentation
//
// /*
// ** Argument rc is an SQLite error code. Return true if this error is
// ** considered fatal if encountered during a backup operation. All errors
// ** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED.
// */
func _isFatalError(tls *libc.TLS, rc int32) (r int32) {
return libc.BoolInt32(rc != SQLITE_OK && rc != int32(SQLITE_BUSY) && rc != int32(SQLITE_LOCKED))
}
// C documentation
//
// /*
// ** Parameter zSrcData points to a buffer containing the data for
// ** page iSrcPg from the source database. Copy this data into the
// ** destination database.
// */
func _backupOnePage(tls *libc.TLS, p uintptr, iSrcPg TPgno, zSrcData uintptr, bUpdate int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var iDest TPgno
var iEnd, iOff Ti64
var nCopy, nDestPgsz, nSrcPgsz, rc, v1, v3, v4 int32
var pDestPager, zDestData, zIn, zOut uintptr
var v5 bool
var _ /* pDestPg at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = iDest, iEnd, iOff, nCopy, nDestPgsz, nSrcPgsz, pDestPager, rc, zDestData, zIn, zOut, v1, v3, v4, v5
pDestPager = _sqlite3BtreePager(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)
nSrcPgsz = _sqlite3BtreeGetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)
nDestPgsz = _sqlite3BtreeGetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)
if nSrcPgsz < nDestPgsz {
v1 = nSrcPgsz
} else {
v1 = nDestPgsz
}
nCopy = v1
iEnd = int64(iSrcPg) * int64(nSrcPgsz)
rc = SQLITE_OK
/* This loop runs once for each destination page spanned by the source
** page. For each iteration, variable iOff is set to the byte offset
** of the destination page.
*/
iOff = iEnd - int64(nSrcPgsz)
for {
if !(rc == SQLITE_OK && iOff < iEnd) {
break
}
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
iDest = uint32(iOff/int64(nDestPgsz)) + uint32(1)
if iDest == uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)).FpBt)).FpageSize+libc.Uint32FromInt32(1) {
goto _2
}
v3 = _sqlite3PagerGet(tls, pDestPager, iDest, bp, 0)
rc = v3
if v5 = SQLITE_OK == v3; v5 {
v4 = _sqlite3PagerWrite(tls, *(*uintptr)(unsafe.Pointer(bp)))
rc = v4
}
if v5 && SQLITE_OK == v4 {
zIn = zSrcData + uintptr(iOff%int64(nSrcPgsz))
zDestData = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp)))
zOut = zDestData + uintptr(iOff%int64(nDestPgsz))
/* Copy the data from the source page into the destination page.
** Then clear the Btree layer MemPage.isInit flag. Both this module
** and the pager code use this trick (clearing the first byte
** of the page 'extra' space to invalidate the Btree layers
** cached parse of the page). MemPage.isInit is marked
** "MUST BE FIRST" for this purpose.
*/
libc.Xmemcpy(tls, zOut, zIn, uint64(nCopy))
*(*Tu8)(unsafe.Pointer(_sqlite3PagerGetExtra(tls, *(*uintptr)(unsafe.Pointer(bp))))) = uint8(0)
if iOff == 0 && bUpdate == 0 {
_sqlite3Put4byte(tls, zOut+28, _sqlite3BtreeLastPage(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc))
}
}
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
goto _2
_2:
;
iOff += int64(nDestPgsz)
}
return rc
}
// C documentation
//
// /*
// ** If pFile is currently larger than iSize bytes, then truncate it to
// ** exactly iSize bytes. If pFile is not larger than iSize bytes, then
// ** this function is a no-op.
// **
// ** Return SQLITE_OK if everything is successful, or an SQLite error
// ** code if an error occurs.
// */
func _backupTruncateFile(tls *libc.TLS, pFile uintptr, iSize Ti64) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* iCurrent at bp+0 */ Ti64
_ = rc
rc = _sqlite3OsFileSize(tls, pFile, bp)
if rc == SQLITE_OK && *(*Ti64)(unsafe.Pointer(bp)) > iSize {
rc = _sqlite3OsTruncate(tls, pFile, iSize)
}
return rc
}
// C documentation
//
// /*
// ** Register this backup object with the associated source pager for
// ** callbacks when pages are changed or the cache invalidated.
// */
func _attachBackupObject(tls *libc.TLS, p uintptr) {
var pp uintptr
_ = pp
pp = _sqlite3PagerBackupPtr(tls, _sqlite3BtreePager(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc))
(*Tsqlite3_backup)(unsafe.Pointer(p)).FpNext = *(*uintptr)(unsafe.Pointer(pp))
*(*uintptr)(unsafe.Pointer(pp)) = p
(*Tsqlite3_backup)(unsafe.Pointer(p)).FisAttached = int32(1)
}
// C documentation
//
// /*
// ** Copy nPage pages from the source b-tree to the destination.
// */
func Xsqlite3_backup_step(tls *libc.TLS, p uintptr, nPage int32) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var bCloseTrans, destMode, ii, nDestTruncate, nSrcPage, pgszDest, pgszSrc, ratio, rc, v1, v7 int32
var iEnd, iOff, iSize Ti64
var iPg, iSrcPg, iSrcPg1 TPgno
var pDestPager, pFile, pSrcPager, zData uintptr
var v2, v8 bool
var v5 int64
var _ /* nDstPage at bp+8 */ int32
var _ /* pPg at bp+16 */ uintptr
var _ /* pSrcPg at bp+0 */ uintptr
var _ /* pSrcPg at bp+24 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = bCloseTrans, destMode, iEnd, iOff, iPg, iSize, iSrcPg, iSrcPg1, ii, nDestTruncate, nSrcPage, pDestPager, pFile, pSrcPager, pgszDest, pgszSrc, ratio, rc, zData, v1, v2, v5, v7, v8 /* Destination journal mode */
pgszSrc = 0 /* Source page size */
pgszDest = 0 /* Destination page size */
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrcDb)).Fmutex)
_sqlite3BtreeEnter(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)
if (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb != 0 {
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb)).Fmutex)
}
rc = (*Tsqlite3_backup)(unsafe.Pointer(p)).Frc
if !(_isFatalError(tls, rc) != 0) {
pSrcPager = _sqlite3BtreePager(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc) /* Source pager */
pDestPager = _sqlite3BtreePager(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest) /* Iterator variable */
nSrcPage = -int32(1) /* Size of source db in pages */
bCloseTrans = 0 /* True if src db requires unlocking */
/* If the source pager is currently in a write-transaction, return
** SQLITE_BUSY immediately.
*/
if (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb != 0 && int32((*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)).FpBt)).FinTransaction) == int32(TRANS_WRITE) {
rc = int32(SQLITE_BUSY)
} else {
rc = SQLITE_OK
}
/* If there is no open read-transaction on the source database, open
** one now. If a transaction is opened here, then it will be closed
** before this function exits.
*/
if rc == SQLITE_OK && SQLITE_TXN_NONE == _sqlite3BtreeTxnState(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc) {
rc = _sqlite3BtreeBeginTrans(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc, 0, uintptr(0))
bCloseTrans = int32(1)
}
/* If the destination database has not yet been locked (i.e. if this
** is the first call to backup_step() for the current backup operation),
** try to set its page size to the same as the source database. This
** is especially important on ZipVFS systems, as in that case it is
** not possible to create a database file that uses one page size by
** writing to it with another. */
if (*Tsqlite3_backup)(unsafe.Pointer(p)).FbDestLocked == 0 && rc == SQLITE_OK && _setDestPgsz(tls, p) == int32(SQLITE_NOMEM) {
rc = int32(SQLITE_NOMEM)
}
/* Lock the destination database, if it is not locked already. */
if v2 = SQLITE_OK == rc && (*Tsqlite3_backup)(unsafe.Pointer(p)).FbDestLocked == 0; v2 {
v1 = _sqlite3BtreeBeginTrans(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest, int32(2), p+16)
rc = v1
}
if v2 && SQLITE_OK == v1 {
(*Tsqlite3_backup)(unsafe.Pointer(p)).FbDestLocked = int32(1)
}
/* Do not allow backup if the destination database is in WAL mode
** and the page sizes are different between source and destination */
pgszSrc = _sqlite3BtreeGetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)
pgszDest = _sqlite3BtreeGetPageSize(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)
destMode = _sqlite3PagerGetJournalMode(tls, _sqlite3BtreePager(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest))
if SQLITE_OK == rc && (destMode == int32(PAGER_JOURNALMODE_WAL) || _sqlite3PagerIsMemdb(tls, pDestPager) != 0) && pgszSrc != pgszDest {
rc = int32(SQLITE_READONLY)
}
/* Now that there is a read-lock on the source database, query the
** source pager for the number of pages in the database.
*/
nSrcPage = int32(_sqlite3BtreeLastPage(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc))
ii = 0
for {
if !((nPage < 0 || ii < nPage) && (*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext <= uint32(nSrcPage) && !(rc != 0)) {
break
}
iSrcPg = (*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext /* Source page number */
if iSrcPg != uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc)).FpBt)).FpageSize+libc.Uint32FromInt32(1) { /* Source page object */
rc = _sqlite3PagerGet(tls, pSrcPager, iSrcPg, bp, int32(PAGER_GET_READONLY))
if rc == SQLITE_OK {
rc = _backupOnePage(tls, p, iSrcPg, _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp))), 0)
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
(*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext++
goto _3
_3:
;
ii++
}
if rc == SQLITE_OK {
(*Tsqlite3_backup)(unsafe.Pointer(p)).FnPagecount = uint32(nSrcPage)
(*Tsqlite3_backup)(unsafe.Pointer(p)).FnRemaining = uint32(nSrcPage+int32(1)) - (*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext
if (*Tsqlite3_backup)(unsafe.Pointer(p)).FiNext > uint32(nSrcPage) {
rc = int32(SQLITE_DONE)
} else {
if !((*Tsqlite3_backup)(unsafe.Pointer(p)).FisAttached != 0) {
_attachBackupObject(tls, p)
}
}
}
/* Update the schema version field in the destination database. This
** is to make sure that the schema-version really does change in
** the case where the source and destination databases have the
** same schema version.
*/
if rc == int32(SQLITE_DONE) {
if nSrcPage == 0 {
rc = _sqlite3BtreeNewDb(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)
nSrcPage = int32(1)
}
if rc == SQLITE_OK || rc == int32(SQLITE_DONE) {
rc = _sqlite3BtreeUpdateMeta(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest, int32(1), (*Tsqlite3_backup)(unsafe.Pointer(p)).FiDestSchema+uint32(1))
}
if rc == SQLITE_OK {
if (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb != 0 {
_sqlite3ResetAllSchemasOfConnection(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDestDb)
}
if destMode == int32(PAGER_JOURNALMODE_WAL) {
rc = _sqlite3BtreeSetVersion(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest, int32(2))
}
}
if rc == SQLITE_OK {
/* Set nDestTruncate to the final number of pages in the destination
** database. The complication here is that the destination page
** size may be different to the source page size.
**
** If the source page size is smaller than the destination page size,
** round up. In this case the call to sqlite3OsTruncate() below will
** fix the size of the file. However it is important to call
** sqlite3PagerTruncateImage() here so that any pages in the
** destination file that lie beyond the nDestTruncate page mark are
** journalled by PagerCommitPhaseOne() before they are destroyed
** by the file truncation.
*/
if pgszSrc < pgszDest {
ratio = pgszDest / pgszSrc
nDestTruncate = (nSrcPage + ratio - int32(1)) / ratio
if nDestTruncate == int32(uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)).FpBt)).FpageSize+libc.Uint32FromInt32(1)) {
nDestTruncate--
}
} else {
nDestTruncate = nSrcPage * (pgszSrc / pgszDest)
}
if pgszSrc < pgszDest {
/* If the source page-size is smaller than the destination page-size,
** two extra things may need to happen:
**
** * The destination may need to be truncated, and
**
** * Data stored on the pages immediately following the
** pending-byte page in the source database may need to be
** copied into the destination database.
*/
iSize = int64(pgszSrc) * int64(nSrcPage)
pFile = _sqlite3PagerFile(tls, pDestPager)
/* This block ensures that all data required to recreate the original
** database has been stored in the journal for pDestPager and the
** journal synced to disk. So at this point we may safely modify
** the database file in any way, knowing that if a power failure
** occurs, the original database will be reconstructed from the
** journal file. */
_sqlite3PagerPagecount(tls, pDestPager, bp+8)
iPg = uint32(nDestTruncate)
for {
if !(rc == SQLITE_OK && iPg <= uint32(*(*int32)(unsafe.Pointer(bp + 8)))) {
break
}
if iPg != uint32(_sqlite3PendingByte)/(*TBtShared)(unsafe.Pointer((*TBtree)(unsafe.Pointer((*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest)).FpBt)).FpageSize+libc.Uint32FromInt32(1) {
rc = _sqlite3PagerGet(tls, pDestPager, iPg, bp+16, 0)
if rc == SQLITE_OK {
rc = _sqlite3PagerWrite(tls, *(*uintptr)(unsafe.Pointer(bp + 16)))
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 16)))
}
}
goto _4
_4:
;
iPg++
}
if rc == SQLITE_OK {
rc = _sqlite3PagerCommitPhaseOne(tls, pDestPager, uintptr(0), int32(1))
}
/* Write the extra pages and truncate the database file as required */
if int64(_sqlite3PendingByte+pgszDest) < iSize {
v5 = int64(_sqlite3PendingByte + pgszDest)
} else {
v5 = iSize
}
iEnd = v5
iOff = int64(_sqlite3PendingByte + pgszSrc)
for {
if !(rc == SQLITE_OK && iOff < iEnd) {
break
}
*(*uintptr)(unsafe.Pointer(bp + 24)) = uintptr(0)
iSrcPg1 = uint32(iOff/int64(pgszSrc) + libc.Int64FromInt32(1))
rc = _sqlite3PagerGet(tls, pSrcPager, iSrcPg1, bp+24, 0)
if rc == SQLITE_OK {
zData = _sqlite3PagerGetData(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
rc = _sqlite3OsWrite(tls, pFile, zData, pgszSrc, iOff)
}
_sqlite3PagerUnref(tls, *(*uintptr)(unsafe.Pointer(bp + 24)))
goto _6
_6:
;
iOff += int64(pgszSrc)
}
if rc == SQLITE_OK {
rc = _backupTruncateFile(tls, pFile, iSize)
}
/* Sync the database file to disk. */
if rc == SQLITE_OK {
rc = _sqlite3PagerSync(tls, pDestPager, uintptr(0))
}
} else {
_sqlite3PagerTruncateImage(tls, pDestPager, uint32(nDestTruncate))
rc = _sqlite3PagerCommitPhaseOne(tls, pDestPager, uintptr(0), 0)
}
/* Finish committing the transaction to the destination database. */
if v8 = SQLITE_OK == rc; v8 {
v7 = _sqlite3BtreeCommitPhaseTwo(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpDest, 0)
rc = v7
}
if v8 && SQLITE_OK == v7 {
rc = int32(SQLITE_DONE)
}
}
}
/* If bCloseTrans is true, then this function opened a read transaction
** on the source database. Close the read transaction here. There is
** no need to check the return values of the btree methods here, as
** "committing" a read-only transaction cannot fail.
*/
if bCloseTrans != 0 {
_sqlite3BtreeCommitPhaseOne(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc, uintptr(0))
_sqlite3BtreeCommitPhaseTwo(tls, (*Tsqlite3_backup)(unsafe.Pointer(p)).FpSrc, 0)
}
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(12)<rc) should be set to either SQLITE_DONE
** or an error code. */
Xsqlite3_backup_step(tls, bp, int32(0x7FFFFFFF))
rc = Xsqlite3_backup_finish(tls, bp)
if rc == SQLITE_OK {
p1 = (*TBtree)(unsafe.Pointer(pTo)).FpBt + 40
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(BTS_PAGESIZE_FIXED))
} else {
_sqlite3PagerClearCache(tls, _sqlite3BtreePager(tls, (*(*Tsqlite3_backup)(unsafe.Pointer(bp))).FpDest))
}
goto copy_finished
copy_finished:
;
_sqlite3BtreeLeave(tls, pFrom)
_sqlite3BtreeLeave(tls, pTo)
return rc
}
/************** End of backup.c **********************************************/
/************** Begin file vdbemem.c *****************************************/
/*
** 2004 May 26
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains code use to manipulate "Mem" structure. A "Mem"
** stores a single value in the VDBE. Mem is an opaque structure visible
** only within the VDBE. Interface routines refer to a Mem using the
** name sqlite_value
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
/* True if X is a power of two. 0 is considered a power of two here.
** In other words, return true if X has at most one bit set.
*/
// C documentation
//
// /*
// ** Render a Mem object which is one of MEM_Int, MEM_Real, or MEM_IntReal
// ** into a buffer.
// */
func _vdbeMemRenderNum(tls *libc.TLS, sz int32, zBuf uintptr, p uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var v1 float64
var _ /* acc at bp+0 */ TStrAccum
_ = v1
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Int) != 0 {
(*TMem)(unsafe.Pointer(p)).Fn = _sqlite3Int64ToText(tls, *(*Ti64)(unsafe.Pointer(p)), zBuf)
} else {
_sqlite3StrAccumInit(tls, bp, uintptr(0), zBuf, sz, 0)
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_IntReal) != 0 {
v1 = float64(*(*Ti64)(unsafe.Pointer(p)))
} else {
v1 = *(*float64)(unsafe.Pointer(p))
}
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5069, libc.VaList(bp+40, v1))
*(*int8)(unsafe.Pointer(zBuf + uintptr((*(*TStrAccum)(unsafe.Pointer(bp))).FnChar))) = 0 /* Fast version of sqlite3StrAccumFinish(&acc) */
(*TMem)(unsafe.Pointer(p)).Fn = int32((*(*TStrAccum)(unsafe.Pointer(bp))).FnChar)
}
}
// C documentation
//
// /*
// ** If pMem is an object with a valid string representation, this routine
// ** ensures the internal encoding for the string representation is
// ** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE.
// **
// ** If pMem is not a string object, or the encoding of the string
// ** representation is already stored using the requested encoding, then this
// ** routine is a no-op.
// **
// ** SQLITE_OK is returned if the conversion is successful (or not required).
// ** SQLITE_NOMEM may be returned if a malloc() fails during conversion
// ** between formats.
// */
func _sqlite3VdbeChangeEncoding(tls *libc.TLS, pMem uintptr, desiredEnc int32) (r int32) {
var rc int32
_ = rc
if !(int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&libc.Int32FromInt32(MEM_Str) != 0) {
(*TMem)(unsafe.Pointer(pMem)).Fenc = uint8(desiredEnc)
return SQLITE_OK
}
if int32((*TMem)(unsafe.Pointer(pMem)).Fenc) == desiredEnc {
return SQLITE_OK
}
/* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned,
** then the encoding of the value may not have changed.
*/
rc = _sqlite3VdbeMemTranslate(tls, pMem, uint8(desiredEnc))
return rc
}
// C documentation
//
// /*
// ** Make sure pMem->z points to a writable allocation of at least n bytes.
// **
// ** If the bPreserve argument is true, then copy of the content of
// ** pMem->z into the new allocation. pMem must be either a string or
// ** blob if bPreserve is true. If bPreserve is false, any prior content
// ** in pMem->z is discarded.
// */
func _sqlite3VdbeMemGrow(tls *libc.TLS, pMem uintptr, n int32, bPreserve int32) (r int32) {
var v1, p2 uintptr
_, _ = v1, p2
/* If the bPreserve flag is set to true, then the memory cell must already
** contain a valid string or blob value. */
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc > 0 && bPreserve != 0 && (*TMem)(unsafe.Pointer(pMem)).Fz == (*TMem)(unsafe.Pointer(pMem)).FzMalloc {
if (*TMem)(unsafe.Pointer(pMem)).Fdb != 0 {
v1 = _sqlite3DbReallocOrFree(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).Fz, uint64(n))
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = v1
(*TMem)(unsafe.Pointer(pMem)).Fz = v1
} else {
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = _sqlite3Realloc(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, uint64(n))
if (*TMem)(unsafe.Pointer(pMem)).FzMalloc == uintptr(0) {
Xsqlite3_free(tls, (*TMem)(unsafe.Pointer(pMem)).Fz)
}
(*TMem)(unsafe.Pointer(pMem)).Fz = (*TMem)(unsafe.Pointer(pMem)).FzMalloc
}
bPreserve = 0
} else {
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc > 0 {
_sqlite3DbFreeNN(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).FzMalloc)
}
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = _sqlite3DbMallocRaw(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, uint64(n))
}
if (*TMem)(unsafe.Pointer(pMem)).FzMalloc == uintptr(0) {
_sqlite3VdbeMemSetNull(tls, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fz = uintptr(0)
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = 0
return int32(SQLITE_NOMEM)
} else {
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = _sqlite3DbMallocSize(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).FzMalloc)
}
if bPreserve != 0 && (*TMem)(unsafe.Pointer(pMem)).Fz != 0 {
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pMem)).FzMalloc, (*TMem)(unsafe.Pointer(pMem)).Fz, uint64((*TMem)(unsafe.Pointer(pMem)).Fn))
}
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Dyn) != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMem)(unsafe.Pointer(pMem)).FxDel})))(tls, (*TMem)(unsafe.Pointer(pMem)).Fz)
}
(*TMem)(unsafe.Pointer(pMem)).Fz = (*TMem)(unsafe.Pointer(pMem)).FzMalloc
p2 = pMem + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^(libc.Int32FromInt32(MEM_Dyn) | libc.Int32FromInt32(MEM_Ephem) | libc.Int32FromInt32(MEM_Static)))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change the pMem->zMalloc allocation to be at least szNew bytes.
// ** If pMem->zMalloc already meets or exceeds the requested size, this
// ** routine is a no-op.
// **
// ** Any prior string or blob content in the pMem object may be discarded.
// ** The pMem->xDel destructor is called, if it exists. Though MEM_Str
// ** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, MEM_IntReal,
// ** and MEM_Null values are preserved.
// **
// ** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM)
// ** if unable to complete the resizing.
// */
func _sqlite3VdbeMemClearAndResize(tls *libc.TLS, pMem uintptr, szNew int32) (r int32) {
var p1 uintptr
_ = p1
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc < szNew {
return _sqlite3VdbeMemGrow(tls, pMem, szNew, 0)
}
(*TMem)(unsafe.Pointer(pMem)).Fz = (*TMem)(unsafe.Pointer(pMem)).FzMalloc
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & (libc.Int32FromInt32(MEM_Null) | libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal)))
return SQLITE_OK
}
// C documentation
//
// /*
// ** If pMem is already a string, detect if it is a zero-terminated
// ** string, or make it into one if possible, and mark it as such.
// **
// ** This is an optimization. Correct operation continues even if
// ** this routine is a no-op.
// */
func _sqlite3VdbeMemZeroTerminateIfAble(tls *libc.TLS, pMem uintptr) {
var p1, p2, p3 uintptr
_, _, _ = p1, p2, p3
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Term)|libc.Int32FromInt32(MEM_Ephem)|libc.Int32FromInt32(MEM_Static)) != int32(MEM_Str) {
/* pMem must be a string, and it cannot be an ephemeral or static string */
return
}
if int32((*TMem)(unsafe.Pointer(pMem)).Fenc) != int32(SQLITE_UTF8) {
return
}
if (*TMem)(unsafe.Pointer(pMem)).Fz == uintptr(0) {
return
}
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Dyn) != 0 {
if (*TMem)(unsafe.Pointer(pMem)).FxDel == __ccgo_fp(Xsqlite3_free) && Xsqlite3_msize(tls, (*TMem)(unsafe.Pointer(pMem)).Fz) >= uint64((*TMem)(unsafe.Pointer(pMem)).Fn+libc.Int32FromInt32(1)) {
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn))) = 0
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(MEM_Term))
return
}
if (*TMem)(unsafe.Pointer(pMem)).FxDel == __ccgo_fp(_sqlite3RCStrUnref) {
/* Blindly assume that all RCStr objects are zero-terminated */
p2 = pMem + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Term))
return
}
} else {
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc >= (*TMem)(unsafe.Pointer(pMem)).Fn+int32(1) {
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn))) = 0
p3 = pMem + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(MEM_Term))
return
}
}
}
// C documentation
//
// /*
// ** It is already known that pMem contains an unterminated string.
// ** Add the zero terminator.
// **
// ** Three bytes of zero are added. In this way, there is guaranteed
// ** to be a double-zero byte at an even byte boundary in order to
// ** terminate a UTF16 string, even if the initial size of the buffer
// ** is an odd number of bytes.
// */
func _vdbeMemAddTerminator(tls *libc.TLS, pMem uintptr) (r int32) {
var p1 uintptr
_ = p1
if _sqlite3VdbeMemGrow(tls, pMem, (*TMem)(unsafe.Pointer(pMem)).Fn+int32(3), int32(1)) != 0 {
return int32(SQLITE_NOMEM)
}
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn))) = 0
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn+int32(1)))) = 0
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr((*TMem)(unsafe.Pointer(pMem)).Fn+int32(2)))) = 0
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(MEM_Term))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Change pMem so that its MEM_Str or MEM_Blob value is stored in
// ** MEM.zMalloc, where it can be safely written.
// **
// ** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails.
// */
func _sqlite3VdbeMemMakeWriteable(tls *libc.TLS, pMem uintptr) (r int32) {
var rc, v1 int32
var p2 uintptr
_, _, _ = rc, v1, p2
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Zero) != 0 {
v1 = _sqlite3VdbeMemExpandBlob(tls, pMem)
} else {
v1 = 0
}
if v1 != 0 {
return int32(SQLITE_NOMEM)
}
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc == 0 || (*TMem)(unsafe.Pointer(pMem)).Fz != (*TMem)(unsafe.Pointer(pMem)).FzMalloc {
rc = _vdbeMemAddTerminator(tls, pMem)
if rc != 0 {
return rc
}
}
}
p2 = pMem + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^libc.Int32FromInt32(MEM_Ephem))
return SQLITE_OK
}
// C documentation
//
// /*
// ** If the given Mem* has a zero-filled tail, turn it into an ordinary
// ** blob stored in dynamically allocated space.
// */
func _sqlite3VdbeMemExpandBlob(tls *libc.TLS, pMem uintptr) (r int32) {
var nByte int32
var p1 uintptr
_, _ = nByte, p1
/* Set nByte to the number of bytes required to store the expanded blob. */
nByte = (*TMem)(unsafe.Pointer(pMem)).Fn + *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pMem)).Fu))
if nByte <= 0 {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Blob) == 0 {
return SQLITE_OK
}
nByte = int32(1)
}
if _sqlite3VdbeMemGrow(tls, pMem, nByte, int32(1)) != 0 {
return int32(SQLITE_NOMEM)
}
libc.Xmemset(tls, (*TMem)(unsafe.Pointer(pMem)).Fz+uintptr((*TMem)(unsafe.Pointer(pMem)).Fn), 0, uint64(*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pMem)).Fu))))
*(*int32)(unsafe.Pointer(pMem + 16)) += *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pMem)).Fu))
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(MEM_Zero) | libc.Int32FromInt32(MEM_Term)))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Make sure the given Mem is \u0000 terminated.
// */
func _sqlite3VdbeMemNulTerminate(tls *libc.TLS, pMem uintptr) (r int32) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Term)|libc.Int32FromInt32(MEM_Str)) != int32(MEM_Str) {
return SQLITE_OK /* Nothing to do */
} else {
return _vdbeMemAddTerminator(tls, pMem)
}
return r
}
// C documentation
//
// /*
// ** Add MEM_Str to the set of representations for the given Mem. This
// ** routine is only called if pMem is a number of some kind, not a NULL
// ** or a BLOB.
// **
// ** Existing representations MEM_Int, MEM_Real, or MEM_IntReal are invalidated
// ** if bForce is true but are retained if bForce is false.
// **
// ** A MEM_Null value will never be passed to this function. This function is
// ** used for converting values to text for returning to the user (i.e. via
// ** sqlite3_value_text()), or for ensuring that values to be used as btree
// ** keys are strings. In the former case a NULL pointer is returned the
// ** user and the latter is an internal programming error.
// */
func _sqlite3VdbeMemStringify(tls *libc.TLS, pMem uintptr, enc Tu8, bForce Tu8) (r int32) {
var nByte int32
var p1, p2 uintptr
_, _, _ = nByte, p1, p2
nByte = int32(32)
if _sqlite3VdbeMemClearAndResize(tls, pMem, nByte) != 0 {
(*TMem)(unsafe.Pointer(pMem)).Fenc = uint8(0)
return int32(SQLITE_NOMEM)
}
_vdbeMemRenderNum(tls, nByte, (*TMem)(unsafe.Pointer(pMem)).Fz, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fenc = uint8(SQLITE_UTF8)
p1 = pMem + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | (libc.Int32FromInt32(MEM_Str) | libc.Int32FromInt32(MEM_Term)))
if bForce != 0 {
p2 = pMem + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^(libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal)))
}
_sqlite3VdbeChangeEncoding(tls, pMem, int32(enc))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Memory cell pMem contains the context of an aggregate function.
// ** This routine calls the finalize method for that function. The
// ** result of the aggregate is stored back into pMem.
// **
// ** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK
// ** otherwise.
// */
func _sqlite3VdbeMemFinalize(tls *libc.TLS, pMem uintptr, pFunc uintptr) (r int32) {
bp := tls.Alloc(112)
defer tls.Free(112)
var _ /* ctx at bp+0 */ Tsqlite3_context
var _ /* t at bp+56 */ TMem
libc.Xmemset(tls, bp, 0, uint64(56))
libc.Xmemset(tls, bp+56, 0, uint64(56))
(*(*TMem)(unsafe.Pointer(bp + 56))).Fflags = uint16(MEM_Null)
(*(*TMem)(unsafe.Pointer(bp + 56))).Fdb = (*TMem)(unsafe.Pointer(pMem)).Fdb
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpOut = bp + 56
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpMem = pMem
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpFunc = pFunc
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).Fenc = (*Tsqlite3)(unsafe.Pointer((*(*TMem)(unsafe.Pointer(bp + 56))).Fdb)).Fenc
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer(pFunc)).FxFinalize})))(tls, bp) /* IMP: R-24505-23230 */
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc > 0 {
_sqlite3DbFreeNN(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).FzMalloc)
}
libc.Xmemcpy(tls, pMem, bp+56, uint64(56))
return (*(*Tsqlite3_context)(unsafe.Pointer(bp))).FisError
}
// C documentation
//
// /*
// ** Memory cell pAccum contains the context of an aggregate function.
// ** This routine calls the xValue method for that function and stores
// ** the results in memory cell pMem.
// **
// ** SQLITE_ERROR is returned if xValue() reports an error. SQLITE_OK
// ** otherwise.
// */
func _sqlite3VdbeMemAggValue(tls *libc.TLS, pAccum uintptr, pOut uintptr, pFunc uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var _ /* ctx at bp+0 */ Tsqlite3_context
libc.Xmemset(tls, bp, 0, uint64(56))
_sqlite3VdbeMemSetNull(tls, pOut)
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpOut = pOut
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpMem = pAccum
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpFunc = pFunc
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).Fenc = (*Tsqlite3)(unsafe.Pointer((*TMem)(unsafe.Pointer(pAccum)).Fdb)).Fenc
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer(pFunc)).FxValue})))(tls, bp)
return (*(*Tsqlite3_context)(unsafe.Pointer(bp))).FisError
}
// C documentation
//
// /*
// ** If the memory cell contains a value that must be freed by
// ** invoking the external callback in Mem.xDel, then this routine
// ** will free that value. It also sets Mem.flags to MEM_Null.
// **
// ** This is a helper routine for sqlite3VdbeMemSetNull() and
// ** for sqlite3VdbeMemRelease(). Use those other routines as the
// ** entry point for releasing Mem resources.
// */
func _vdbeMemClearExternAndSetNull(tls *libc.TLS, p uintptr) {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Agg) != 0 {
_sqlite3VdbeMemFinalize(tls, p, *(*uintptr)(unsafe.Pointer(p)))
}
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Dyn) != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TMem)(unsafe.Pointer(p)).FxDel})))(tls, (*TMem)(unsafe.Pointer(p)).Fz)
}
(*TMem)(unsafe.Pointer(p)).Fflags = uint16(MEM_Null)
}
// C documentation
//
// /*
// ** Release memory held by the Mem p, both external memory cleared
// ** by p->xDel and memory in p->zMalloc.
// **
// ** This is a helper routine invoked by sqlite3VdbeMemRelease() in
// ** the unusual case where there really is memory in p that needs
// ** to be freed.
// */
func _vdbeMemClear(tls *libc.TLS, p uintptr) {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_vdbeMemClearExternAndSetNull(tls, p)
}
if (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_sqlite3DbFreeNN(tls, (*TMem)(unsafe.Pointer(p)).Fdb, (*TMem)(unsafe.Pointer(p)).FzMalloc)
(*TMem)(unsafe.Pointer(p)).FszMalloc = 0
}
(*TMem)(unsafe.Pointer(p)).Fz = uintptr(0)
}
// C documentation
//
// /*
// ** Release any memory resources held by the Mem. Both the memory that is
// ** free by Mem.xDel and the Mem.zMalloc allocation are freed.
// **
// ** Use this routine prior to clean up prior to abandoning a Mem, or to
// ** reset a Mem back to its minimum memory utilization.
// **
// ** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space
// ** prior to inserting new content into the Mem.
// */
func _sqlite3VdbeMemRelease(tls *libc.TLS, p uintptr) {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 || (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_vdbeMemClear(tls, p)
}
}
// C documentation
//
// /* Like sqlite3VdbeMemRelease() but faster for cases where we
// ** know in advance that the Mem is not MEM_Dyn or MEM_Agg.
// */
func _sqlite3VdbeMemReleaseMalloc(tls *libc.TLS, p uintptr) {
if (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_vdbeMemClear(tls, p)
}
}
// C documentation
//
// /*
// ** Return some kind of integer value which is the best we can do
// ** at representing the value that *pMem describes as an integer.
// ** If pMem is an integer, then the value is exact. If pMem is
// ** a floating-point then the value returned is the integer part.
// ** If pMem is a string or blob, then we make an attempt to convert
// ** it into an integer and return that. If pMem represents an
// ** an SQL-NULL value, return 0.
// **
// ** If pMem represents a string value, its encoding might be changed.
// */
func _memIntValue(tls *libc.TLS, pMem uintptr) (r Ti64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* value at bp+0 */ Ti64
*(*Ti64)(unsafe.Pointer(bp)) = 0
_sqlite3Atoi64(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, bp, (*TMem)(unsafe.Pointer(pMem)).Fn, (*TMem)(unsafe.Pointer(pMem)).Fenc)
return *(*Ti64)(unsafe.Pointer(bp))
}
func _sqlite3VdbeIntValue(tls *libc.TLS, pMem uintptr) (r Ti64) {
var flags int32
_ = flags
flags = int32((*TMem)(unsafe.Pointer(pMem)).Fflags)
if flags&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
return *(*Ti64)(unsafe.Pointer(pMem))
} else {
if flags&int32(MEM_Real) != 0 {
return _sqlite3RealToI64(tls, *(*float64)(unsafe.Pointer(pMem)))
} else {
if flags&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 && (*TMem)(unsafe.Pointer(pMem)).Fz != uintptr(0) {
return _memIntValue(tls, pMem)
} else {
return 0
}
}
}
return r
}
// C documentation
//
// /*
// ** Return the best representation of pMem that we can get into a
// ** double. If pMem is already a double or an integer, return its
// ** value. If it is a string or blob, try to convert it to a double.
// ** If it is a NULL, return 0.0.
// */
func _memRealValue(tls *libc.TLS, pMem uintptr) (r float64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* val at bp+0 */ float64
/* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
*(*float64)(unsafe.Pointer(bp)) = libc.Float64FromInt32(0)
_sqlite3AtoF(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, bp, (*TMem)(unsafe.Pointer(pMem)).Fn, (*TMem)(unsafe.Pointer(pMem)).Fenc)
return *(*float64)(unsafe.Pointer(bp))
}
func _sqlite3VdbeRealValue(tls *libc.TLS, pMem uintptr) (r float64) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Real) != 0 {
return *(*float64)(unsafe.Pointer(pMem))
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
return float64(*(*Ti64)(unsafe.Pointer(pMem)))
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
return _memRealValue(tls, pMem)
} else {
/* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */
return libc.Float64FromInt32(0)
}
}
}
return r
}
// C documentation
//
// /*
// ** Return 1 if pMem represents true, and return 0 if pMem represents false.
// ** Return the value ifNull if pMem is NULL.
// */
func _sqlite3VdbeBooleanValue(tls *libc.TLS, pMem uintptr, ifNull int32) (r int32) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
return libc.BoolInt32(*(*Ti64)(unsafe.Pointer(pMem)) != 0)
}
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Null) != 0 {
return ifNull
}
return libc.BoolInt32(_sqlite3VdbeRealValue(tls, pMem) != float64(0))
}
// C documentation
//
// /*
// ** The MEM structure is already a MEM_Real or MEM_IntReal. Try to
// ** make it a MEM_Int if we can.
// */
func _sqlite3VdbeIntegerAffinity(tls *libc.TLS, pMem uintptr) {
var ix Ti64
_ = ix
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_IntReal) != 0 {
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pMem)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Int))
} else {
ix = _sqlite3RealToI64(tls, *(*float64)(unsafe.Pointer(pMem)))
/* Only mark the value as an integer if
**
** (1) the round-trip conversion real->int->real is a no-op, and
** (2) The integer is neither the largest nor the smallest
** possible integer (ticket #3922)
**
** The second and third terms in the following conditional enforces
** the second condition under the assumption that addition overflow causes
** values to wrap around.
*/
if *(*float64)(unsafe.Pointer(pMem)) == float64(ix) && ix > int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= -int64(2251799813685248) && i < int64(2251799813685248))
}
// C documentation
//
// /* Convert a floating point value to its closest integer. Do so in
// ** a way that avoids 'outside the range of representable values' warnings
// ** from UBSAN.
// */
func _sqlite3RealToI64(tls *libc.TLS, r float64) (r1 Ti64) {
if r < -libc.Float64FromFloat64(9.223372036854775e+18) {
return int64(-libc.Int32FromInt32(1)) - (libc.Int64FromUint32(0xffffffff) | libc.Int64FromInt32(0x7fffffff)< +libc.Float64FromFloat64(9.223372036854775e+18) {
return libc.Int64FromUint32(0xffffffff) | libc.Int64FromInt32(0x7fffffff)<>libc.Int32FromInt32(3))
_sqlite3ValueApplyAffinity(tls, pMem, uint8(SQLITE_AFF_TEXT), encoding)
p3 = pMem + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^(libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal) | libc.Int32FromInt32(MEM_Blob) | libc.Int32FromInt32(MEM_Zero)))
if int32(encoding) != int32(SQLITE_UTF8) {
*(*int32)(unsafe.Pointer(pMem + 16)) &= ^libc.Int32FromInt32(1)
}
rc = _sqlite3VdbeChangeEncoding(tls, pMem, int32(encoding))
if rc != 0 {
return rc
}
_sqlite3VdbeMemZeroTerminateIfAble(tls, pMem)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Initialize bulk memory to be a consistent Mem object.
// **
// ** The minimum amount of initialization feasible is performed.
// */
func _sqlite3VdbeMemInit(tls *libc.TLS, pMem uintptr, db uintptr, flags Tu16) {
(*TMem)(unsafe.Pointer(pMem)).Fflags = flags
(*TMem)(unsafe.Pointer(pMem)).Fdb = db
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = 0
}
// C documentation
//
// /*
// ** Delete any previous value and set the value stored in *pMem to NULL.
// **
// ** This routine calls the Mem.xDel destructor to dispose of values that
// ** require the destructor. But it preserves the Mem.zMalloc memory allocation.
// ** To free all resources, use sqlite3VdbeMemRelease(), which both calls this
// ** routine to invoke the destructor and deallocates Mem.zMalloc.
// **
// ** Use this routine to reset the Mem prior to insert a new value.
// **
// ** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it.
// */
func _sqlite3VdbeMemSetNull(tls *libc.TLS, pMem uintptr) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_vdbeMemClearExternAndSetNull(tls, pMem)
} else {
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Null)
}
}
func _sqlite3ValueSetNull(tls *libc.TLS, p uintptr) {
_sqlite3VdbeMemSetNull(tls, p)
}
// C documentation
//
// /*
// ** Delete any previous value and set the value to be a BLOB of length
// ** n containing all zeros.
// */
func _sqlite3VdbeMemSetZeroBlob(tls *libc.TLS, pMem uintptr, n int32) {
_sqlite3VdbeMemRelease(tls, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(libc.Int32FromInt32(MEM_Blob) | libc.Int32FromInt32(MEM_Zero))
(*TMem)(unsafe.Pointer(pMem)).Fn = 0
if n < 0 {
n = 0
}
*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pMem)).Fu)) = n
(*TMem)(unsafe.Pointer(pMem)).Fenc = uint8(SQLITE_UTF8)
(*TMem)(unsafe.Pointer(pMem)).Fz = uintptr(0)
}
// C documentation
//
// /*
// ** The pMem is known to contain content that needs to be destroyed prior
// ** to a value change. So invoke the destructor, then set the value to
// ** a 64-bit integer.
// */
func _vdbeReleaseAndSetInt64(tls *libc.TLS, pMem uintptr, val Ti64) {
_sqlite3VdbeMemSetNull(tls, pMem)
*(*Ti64)(unsafe.Pointer(pMem)) = val
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Int)
}
// C documentation
//
// /*
// ** Delete any previous value and set the value stored in *pMem to val,
// ** manifest type INTEGER.
// */
func _sqlite3VdbeMemSetInt64(tls *libc.TLS, pMem uintptr, val Ti64) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_vdbeReleaseAndSetInt64(tls, pMem, val)
} else {
*(*Ti64)(unsafe.Pointer(pMem)) = val
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Int)
}
}
// C documentation
//
// /* A no-op destructor */
func _sqlite3NoopDestructor(tls *libc.TLS, p uintptr) {
_ = p
}
// C documentation
//
// /*
// ** Set the value stored in *pMem should already be a NULL.
// ** Also store a pointer to go with it.
// */
func _sqlite3VdbeMemSetPointer(tls *libc.TLS, pMem uintptr, pPtr uintptr, zPType uintptr, xDestructor uintptr) {
var v1, v2 uintptr
_, _ = v1, v2
_vdbeMemClear(tls, pMem)
if zPType != 0 {
v1 = zPType
} else {
v1 = __ccgo_ts + 1650
}
*(*uintptr)(unsafe.Pointer(pMem)) = v1
(*TMem)(unsafe.Pointer(pMem)).Fz = pPtr
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(libc.Int32FromInt32(MEM_Null) | libc.Int32FromInt32(MEM_Dyn) | libc.Int32FromInt32(MEM_Subtype) | libc.Int32FromInt32(MEM_Term))
(*TMem)(unsafe.Pointer(pMem)).FeSubtype = uint8('p')
if xDestructor != 0 {
v2 = xDestructor
} else {
v2 = __ccgo_fp(_sqlite3NoopDestructor)
}
(*TMem)(unsafe.Pointer(pMem)).FxDel = v2
}
// C documentation
//
// /*
// ** Delete any previous value and set the value stored in *pMem to val,
// ** manifest type REAL.
// */
func _sqlite3VdbeMemSetDouble(tls *libc.TLS, pMem uintptr, val float64) {
_sqlite3VdbeMemSetNull(tls, pMem)
if !(_sqlite3IsNaN(tls, val) != 0) {
*(*float64)(unsafe.Pointer(pMem)) = val
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Real)
}
}
// C documentation
//
// /*
// ** Delete any previous value and set the value of pMem to be an
// ** empty boolean index.
// **
// ** Return SQLITE_OK on success and SQLITE_NOMEM if a memory allocation
// ** error occurs.
// */
func _sqlite3VdbeMemSetRowSet(tls *libc.TLS, pMem uintptr) (r int32) {
var db, p uintptr
_, _ = db, p
db = (*TMem)(unsafe.Pointer(pMem)).Fdb
_sqlite3VdbeMemRelease(tls, pMem)
p = _sqlite3RowSetInit(tls, db)
if p == uintptr(0) {
return int32(SQLITE_NOMEM)
}
(*TMem)(unsafe.Pointer(pMem)).Fz = p
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(libc.Int32FromInt32(MEM_Blob) | libc.Int32FromInt32(MEM_Dyn))
(*TMem)(unsafe.Pointer(pMem)).FxDel = __ccgo_fp(_sqlite3RowSetDelete)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Return true if the Mem object contains a TEXT or BLOB that is
// ** too large - whose size exceeds SQLITE_MAX_LENGTH.
// */
func _sqlite3VdbeMemTooBig(tls *libc.TLS, p uintptr) (r int32) {
var n int32
_ = n
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
n = (*TMem)(unsafe.Pointer(p)).Fn
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Zero) != 0 {
n += *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(p)).Fu))
}
return libc.BoolInt32(n > *(*int32)(unsafe.Pointer((*TMem)(unsafe.Pointer(p)).Fdb + 136)))
}
return 0
}
// C documentation
//
// /*
// ** Make an shallow copy of pFrom into pTo. Prior contents of
// ** pTo are freed. The pFrom->z field is not duplicated. If
// ** pFrom->z is used, then pTo->z points to the same thing as pFrom->z
// ** and flags gets srcType (either MEM_Ephem or MEM_Static).
// */
func _vdbeClrCopy(tls *libc.TLS, pTo uintptr, pFrom uintptr, eType int32) {
_vdbeMemClearExternAndSetNull(tls, pTo)
_sqlite3VdbeMemShallowCopy(tls, pTo, pFrom, eType)
}
func _sqlite3VdbeMemShallowCopy(tls *libc.TLS, pTo uintptr, pFrom uintptr, srcType int32) {
var p1, p2 uintptr
_, _ = p1, p2
if int32((*TMem)(unsafe.Pointer(pTo)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_vdbeClrCopy(tls, pTo, pFrom, srcType)
return
}
libc.Xmemcpy(tls, pTo, pFrom, uint64(libc.UintptrFromInt32(0)+24))
if int32((*TMem)(unsafe.Pointer(pFrom)).Fflags)&int32(MEM_Static) == 0 {
p1 = pTo + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(MEM_Dyn) | libc.Int32FromInt32(MEM_Static) | libc.Int32FromInt32(MEM_Ephem)))
p2 = pTo + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | srcType)
}
}
// C documentation
//
// /*
// ** Make a full copy of pFrom into pTo. Prior contents of pTo are
// ** freed before the copy is made.
// */
func _sqlite3VdbeMemCopy(tls *libc.TLS, pTo uintptr, pFrom uintptr) (r int32) {
var rc int32
var p1, p2 uintptr
_, _, _ = rc, p1, p2
rc = SQLITE_OK
if int32((*TMem)(unsafe.Pointer(pTo)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_vdbeMemClearExternAndSetNull(tls, pTo)
}
libc.Xmemcpy(tls, pTo, pFrom, uint64(libc.UintptrFromInt32(0)+24))
p1 = pTo + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(MEM_Dyn))
if int32((*TMem)(unsafe.Pointer(pTo)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
if 0 == int32((*TMem)(unsafe.Pointer(pFrom)).Fflags)&int32(MEM_Static) {
p2 = pTo + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Ephem))
rc = _sqlite3VdbeMemMakeWriteable(tls, pTo)
}
}
return rc
}
// C documentation
//
// /*
// ** Transfer the contents of pFrom to pTo. Any existing value in pTo is
// ** freed. If pFrom contains ephemeral data, a copy is made.
// **
// ** pFrom contains an SQL NULL when this routine returns.
// */
func _sqlite3VdbeMemMove(tls *libc.TLS, pTo uintptr, pFrom uintptr) {
_sqlite3VdbeMemRelease(tls, pTo)
libc.Xmemcpy(tls, pTo, pFrom, uint64(56))
(*TMem)(unsafe.Pointer(pFrom)).Fflags = uint16(MEM_Null)
(*TMem)(unsafe.Pointer(pFrom)).FszMalloc = 0
}
// C documentation
//
// /*
// ** Change the value of a Mem to be a string or a BLOB.
// **
// ** The memory management strategy depends on the value of the xDel
// ** parameter. If the value passed is SQLITE_TRANSIENT, then the
// ** string is copied into a (possibly existing) buffer managed by the
// ** Mem structure. Otherwise, any existing buffer is freed and the
// ** pointer copied.
// **
// ** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH
// ** size limit) then no memory allocation occurs. If the string can be
// ** stored without allocating memory, then it is. If a memory allocation
// ** is required to store the string, then value of pMem is unchanged. In
// ** either case, SQLITE_TOOBIG is returned.
// **
// ** The "enc" parameter is the text encoding for the string, or zero
// ** to store a blob.
// **
// ** If n is negative, then the string consists of all bytes up to but
// ** excluding the first zero character. The n parameter must be
// ** non-negative for blobs.
// */
func _sqlite3VdbeMemSetStr(tls *libc.TLS, pMem uintptr, z uintptr, n Ti64, enc Tu8, xDel uintptr) (r int32) {
var flags Tu16
var iLimit, v2, v4 int32
var nAlloc, nByte Ti64
var v3 int64
_, _, _, _, _, _, _ = flags, iLimit, nAlloc, nByte, v2, v3, v4
nByte = n /* New value for pMem->flags */
/* If z is a NULL pointer, set pMem to contain an SQL NULL. */
if !(z != 0) {
_sqlite3VdbeMemSetNull(tls, pMem)
return SQLITE_OK
}
if (*TMem)(unsafe.Pointer(pMem)).Fdb != 0 {
iLimit = *(*int32)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fdb + 136))
} else {
iLimit = int32(SQLITE_MAX_LENGTH)
}
if nByte < 0 {
if int32(enc) == int32(SQLITE_UTF8) {
nByte = int64(libc.Xstrlen(tls, z))
} else {
nByte = 0
for {
if !(nByte <= int64(iLimit) && int32(*(*int8)(unsafe.Pointer(z + uintptr(nByte))))|int32(*(*int8)(unsafe.Pointer(z + uintptr(nByte+int64(1))))) != 0) {
break
}
goto _1
_1:
;
nByte += int64(2)
}
}
flags = uint16(libc.Int32FromInt32(MEM_Str) | libc.Int32FromInt32(MEM_Term))
} else {
if int32(enc) == 0 {
flags = uint16(MEM_Blob)
enc = uint8(SQLITE_UTF8)
} else {
flags = uint16(MEM_Str)
}
}
if nByte > int64(iLimit) {
if xDel != 0 && xDel != uintptr(-libc.Int32FromInt32(1)) {
if xDel == __ccgo_fp(_sqlite3OomClear) {
_sqlite3DbFree(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, z)
} else {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{xDel})))(tls, z)
}
}
_sqlite3VdbeMemSetNull(tls, pMem)
return _sqlite3ErrorToParser(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, int32(SQLITE_TOOBIG))
}
/* The following block sets the new values of Mem.z and Mem.xDel. It
** also sets a flag in local variable "flags" to indicate the memory
** management (one of MEM_Dyn or MEM_Static).
*/
if xDel == uintptr(-libc.Int32FromInt32(1)) {
nAlloc = nByte
if int32(flags)&int32(MEM_Term) != 0 {
if int32(enc) == int32(SQLITE_UTF8) {
v2 = int32(1)
} else {
v2 = int32(2)
}
nAlloc += int64(v2)
}
if nAlloc > int64(libc.Int32FromInt32(32)) {
v3 = nAlloc
} else {
v3 = int64(libc.Int32FromInt32(32))
}
if _sqlite3VdbeMemClearAndResize(tls, pMem, int32(v3)) != 0 {
return int32(SQLITE_NOMEM)
}
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, z, uint64(nAlloc))
} else {
_sqlite3VdbeMemRelease(tls, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fz = z
if xDel == __ccgo_fp(_sqlite3OomClear) {
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = (*TMem)(unsafe.Pointer(pMem)).Fz
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = _sqlite3DbMallocSize(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).FzMalloc)
} else {
(*TMem)(unsafe.Pointer(pMem)).FxDel = xDel
if xDel == libc.UintptrFromInt32(0) {
v4 = int32(MEM_Static)
} else {
v4 = int32(MEM_Dyn)
}
flags = Tu16(int32(flags) | v4)
}
}
(*TMem)(unsafe.Pointer(pMem)).Fn = int32(nByte & libc.Int64FromInt32(0x7fffffff))
(*TMem)(unsafe.Pointer(pMem)).Fflags = flags
(*TMem)(unsafe.Pointer(pMem)).Fenc = enc
if int32(enc) > int32(SQLITE_UTF8) && _sqlite3VdbeMemHandleBom(tls, pMem) != 0 {
return int32(SQLITE_NOMEM)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Move data out of a btree key or data field and into a Mem structure.
// ** The data is payload from the entry that pCur is currently pointing
// ** to. offset and amt determine what portion of the data or key to retrieve.
// ** The result is written into the pMem element.
// **
// ** The pMem object must have been initialized. This routine will use
// ** pMem->zMalloc to hold the content from the btree, if possible. New
// ** pMem->zMalloc space will be allocated if necessary. The calling routine
// ** is responsible for making sure that the pMem object is eventually
// ** destroyed.
// **
// ** If this routine fails for any reason (malloc returns NULL or unable
// ** to read from the disk) then the pMem is left in an inconsistent state.
// */
func _sqlite3VdbeMemFromBtree(tls *libc.TLS, pCur uintptr, offset Tu32, amt Tu32, pMem uintptr) (r int32) {
var rc, v1 int32
_, _ = rc, v1
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Null)
if _sqlite3BtreeMaxRecordSize(tls, pCur) < int64(offset+amt) {
return _sqlite3CorruptError(tls, int32(83638))
}
v1 = _sqlite3VdbeMemClearAndResize(tls, pMem, int32(amt+uint32(1)))
rc = v1
if SQLITE_OK == v1 {
rc = _sqlite3BtreePayload(tls, pCur, offset, amt, (*TMem)(unsafe.Pointer(pMem)).Fz)
if rc == SQLITE_OK {
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pMem)).Fz + uintptr(amt))) = 0 /* Overrun area used when reading malformed records */
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Blob)
(*TMem)(unsafe.Pointer(pMem)).Fn = int32(amt)
} else {
_sqlite3VdbeMemRelease(tls, pMem)
}
}
return rc
}
func _sqlite3VdbeMemFromBtreeZeroOffset(tls *libc.TLS, pCur uintptr, amt Tu32, pMem uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* available at bp+0 */ Tu32
_ = rc
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0) /* Number of bytes available on the local btree page */
rc = SQLITE_OK /* Return code */
/* Note: the calls to BtreeKeyFetch() and DataFetch() below assert()
** that both the BtShared and database handle mutexes are held. */
(*TMem)(unsafe.Pointer(pMem)).Fz = _sqlite3BtreePayloadFetch(tls, pCur, bp)
if amt <= *(*Tu32)(unsafe.Pointer(bp)) {
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(libc.Int32FromInt32(MEM_Blob) | libc.Int32FromInt32(MEM_Ephem))
(*TMem)(unsafe.Pointer(pMem)).Fn = int32(amt)
} else {
rc = _sqlite3VdbeMemFromBtree(tls, pCur, uint32(0), amt, pMem)
}
return rc
}
// C documentation
//
// /*
// ** The pVal argument is known to be a value other than NULL.
// ** Convert it into a string with encoding enc and return a pointer
// ** to a zero-terminated version of that string.
// */
func _valueToText(tls *libc.TLS, pVal uintptr, enc Tu8) (r uintptr) {
var v1 int32
var p2 uintptr
_, _ = v1, p2
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&(libc.Int32FromInt32(MEM_Blob)|libc.Int32FromInt32(MEM_Str)) != 0 {
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_Zero) != 0 {
v1 = _sqlite3VdbeMemExpandBlob(tls, pVal)
} else {
v1 = 0
}
if v1 != 0 {
return uintptr(0)
}
p2 = pVal + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Str))
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc) != int32(enc) & ^libc.Int32FromInt32(SQLITE_UTF16_ALIGNED) {
_sqlite3VdbeChangeEncoding(tls, pVal, int32(enc) & ^libc.Int32FromInt32(SQLITE_UTF16_ALIGNED))
}
if int32(enc)&int32(SQLITE_UTF16_ALIGNED) != 0 && int32(1) == int32(1)&int32(int64((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fz)) {
if _sqlite3VdbeMemMakeWriteable(tls, pVal) != SQLITE_OK {
return uintptr(0)
}
}
_sqlite3VdbeMemNulTerminate(tls, pVal) /* IMP: R-31275-44060 */
} else {
_sqlite3VdbeMemStringify(tls, pVal, enc, uint8(0))
}
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc) == int32(enc) & ^libc.Int32FromInt32(SQLITE_UTF16_ALIGNED) {
return (*Tsqlite3_value)(unsafe.Pointer(pVal)).Fz
} else {
return uintptr(0)
}
return r
}
// C documentation
//
// /* This function is only available internally, it is not part of the
// ** external API. It works in a similar way to sqlite3_value_text(),
// ** except the data returned is in the encoding specified by the second
// ** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or
// ** SQLITE_UTF8.
// **
// ** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED.
// ** If that is the case, then the result must be aligned on an even byte
// ** boundary.
// */
func _sqlite3ValueText(tls *libc.TLS, pVal uintptr, enc Tu8) (r uintptr) {
if !(pVal != 0) {
return uintptr(0)
}
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Term)) == libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Term) && int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc) == int32(enc) {
return (*Tsqlite3_value)(unsafe.Pointer(pVal)).Fz
}
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_Null) != 0 {
return uintptr(0)
}
return _valueToText(tls, pVal, enc)
}
// C documentation
//
// /* Return true if sqlit3_value object pVal is a string or blob value
// ** that uses the destructor specified in the second argument.
// **
// ** TODO: Maybe someday promote this interface into a published API so
// ** that third-party extensions can get access to it?
// */
func _sqlite3ValueIsOfClass(tls *libc.TLS, pVal uintptr, xFree uintptr) (r int32) {
if pVal != uintptr(0) && int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 && int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_Dyn) != 0 && (*Tsqlite3_value)(unsafe.Pointer(pVal)).FxDel == xFree {
return int32(1)
} else {
return 0
}
return r
}
// C documentation
//
// /*
// ** Create a new sqlite3_value object.
// */
func _sqlite3ValueNew(tls *libc.TLS, db uintptr) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3DbMallocZero(tls, db, uint64(56))
if p != 0 {
(*TMem)(unsafe.Pointer(p)).Fflags = uint16(MEM_Null)
(*TMem)(unsafe.Pointer(p)).Fdb = db
}
return p
}
/*
** Context object passed by sqlite3Stat4ProbeSetValue() through to
** valueNew(). See comments above valueNew() for details.
*/
type TValueNewStat4Ctx = struct {
FpParse uintptr
FpIdx uintptr
FppRec uintptr
FiVal int32
}
type ValueNewStat4Ctx = TValueNewStat4Ctx
// C documentation
//
// /*
// ** Allocate and return a pointer to a new sqlite3_value object. If
// ** the second argument to this function is NULL, the object is allocated
// ** by calling sqlite3ValueNew().
// **
// ** Otherwise, if the second argument is non-zero, then this function is
// ** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not
// ** already been allocated, allocate the UnpackedRecord structure that
// ** that function will return to its caller here. Then return a pointer to
// ** an sqlite3_value within the UnpackedRecord.a[] array.
// */
func _valueNew(tls *libc.TLS, db uintptr, p uintptr) (r uintptr) {
var i, nByte, nCol int32
var pIdx, pRec uintptr
_, _, _, _, _ = i, nByte, nCol, pIdx, pRec
if p != 0 {
pRec = *(*uintptr)(unsafe.Pointer((*TValueNewStat4Ctx)(unsafe.Pointer(p)).FppRec))
if pRec == uintptr(0) {
pIdx = (*TValueNewStat4Ctx)(unsafe.Pointer(p)).FpIdx /* Counter variable */
nCol = int32((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) /* Number of index columns including rowid */
nByte = int32(uint64(56)*uint64(nCol) + (libc.Uint64FromInt64(40)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
pRec = _sqlite3DbMallocZero(tls, db, uint64(nByte))
if pRec != 0 {
(*TUnpackedRecord)(unsafe.Pointer(pRec)).FpKeyInfo = _sqlite3KeyInfoOfIndex(tls, (*TValueNewStat4Ctx)(unsafe.Pointer(p)).FpParse, pIdx)
if (*TUnpackedRecord)(unsafe.Pointer(pRec)).FpKeyInfo != 0 {
(*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem = pRec + uintptr((libc.Uint64FromInt64(40)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
i = 0
for {
if !(i < nCol) {
break
}
(*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem + uintptr(i)*56))).Fflags = uint16(MEM_Null)
(*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem + uintptr(i)*56))).Fdb = db
goto _1
_1:
;
i++
}
} else {
_sqlite3DbFreeNN(tls, db, pRec)
pRec = uintptr(0)
}
}
if pRec == uintptr(0) {
return uintptr(0)
}
*(*uintptr)(unsafe.Pointer((*TValueNewStat4Ctx)(unsafe.Pointer(p)).FppRec)) = pRec
}
(*TUnpackedRecord)(unsafe.Pointer(pRec)).FnField = uint16((*TValueNewStat4Ctx)(unsafe.Pointer(p)).FiVal + int32(1))
_sqlite3VdbeMemSetNull(tls, (*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem+uintptr((*TValueNewStat4Ctx)(unsafe.Pointer(p)).FiVal)*56)
return (*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem + uintptr((*TValueNewStat4Ctx)(unsafe.Pointer(p)).FiVal)*56
}
return _sqlite3ValueNew(tls, db)
}
// C documentation
//
// /*
// ** The expression object indicated by the second argument is guaranteed
// ** to be a scalar SQL function. If
// **
// ** * all function arguments are SQL literals,
// ** * one of the SQLITE_FUNC_CONSTANT or _SLOCHNG function flags is set, and
// ** * the SQLITE_FUNC_NEEDCOLL function flag is not set,
// **
// ** then this routine attempts to invoke the SQL function. Assuming no
// ** error occurs, output parameter (*ppVal) is set to point to a value
// ** object containing the result before returning SQLITE_OK.
// **
// ** Affinity aff is applied to the result of the function before returning.
// ** If the result is a text value, the sqlite3_value object uses encoding
// ** enc.
// **
// ** If the conditions above are not met, this function returns SQLITE_OK
// ** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to
// ** NULL and an SQLite error code returned.
// */
func _valueFromFunction(tls *libc.TLS, db uintptr, p uintptr, enc Tu8, aff Tu8, ppVal uintptr, pCtx uintptr) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var apVal, pFunc, pList, pVal uintptr
var i, nVal, rc int32
var _ /* ctx at bp+0 */ Tsqlite3_context
_, _, _, _, _, _, _ = apVal, i, nVal, pFunc, pList, pVal, rc /* Context object for function invocation */
apVal = uintptr(0) /* Function arguments */
nVal = 0 /* Size of apVal[] array */
pFunc = uintptr(0) /* Function definition */
pVal = uintptr(0) /* New value */
rc = SQLITE_OK /* Return code */
pList = uintptr(0) /* Iterator variable */
pList = *(*uintptr)(unsafe.Pointer(p + 32))
if pList != 0 {
nVal = (*TExprList)(unsafe.Pointer(pList)).FnExpr
}
pFunc = _sqlite3FindFunction(tls, db, *(*uintptr)(unsafe.Pointer(p + 8)), nVal, enc, uint8(0))
if (*TFuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&uint32(libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)|libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG)) == uint32(0) || (*TFuncDef)(unsafe.Pointer(pFunc)).FfuncFlags&uint32(libc.Int32FromInt32(SQLITE_FUNC_NEEDCOLL)|libc.Int32FromInt32(SQLITE_FUNC_RUNONLY)) != uint32(0) {
return SQLITE_OK
}
if pList != 0 {
apVal = _sqlite3DbMallocZero(tls, db, uint64(8)*uint64(nVal))
if apVal == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto value_from_function_out
}
i = 0
for {
if !(i < nVal) {
break
}
rc = _sqlite3ValueFromExpr(tls, db, (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr(i)*32))).FpExpr, enc, aff, apVal+uintptr(i)*8)
if *(*uintptr)(unsafe.Pointer(apVal + uintptr(i)*8)) == uintptr(0) || rc != SQLITE_OK {
goto value_from_function_out
}
goto _1
_1:
;
i++
}
}
pVal = _valueNew(tls, db, pCtx)
if pVal == uintptr(0) {
rc = int32(SQLITE_NOMEM)
goto value_from_function_out
}
libc.Xmemset(tls, bp, 0, uint64(56))
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpOut = pVal
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).FpFunc = pFunc
(*(*Tsqlite3_context)(unsafe.Pointer(bp))).Fenc = (*Tsqlite3)(unsafe.Pointer(db)).Fenc
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer(pFunc)).FxSFunc})))(tls, bp, nVal, apVal)
if (*(*Tsqlite3_context)(unsafe.Pointer(bp))).FisError != 0 {
rc = (*(*Tsqlite3_context)(unsafe.Pointer(bp))).FisError
_sqlite3ErrorMsg(tls, (*TValueNewStat4Ctx)(unsafe.Pointer(pCtx)).FpParse, __ccgo_ts+3797, libc.VaList(bp+64, Xsqlite3_value_text(tls, pVal)))
} else {
_sqlite3ValueApplyAffinity(tls, pVal, aff, uint8(SQLITE_UTF8))
rc = _sqlite3VdbeChangeEncoding(tls, pVal, int32(enc))
if rc == SQLITE_OK && _sqlite3VdbeMemTooBig(tls, pVal) != 0 {
rc = int32(SQLITE_TOOBIG)
(*TParse)(unsafe.Pointer((*TValueNewStat4Ctx)(unsafe.Pointer(pCtx)).FpParse)).FnErr++
}
}
goto value_from_function_out
value_from_function_out:
;
if rc != SQLITE_OK {
pVal = uintptr(0)
(*TParse)(unsafe.Pointer((*TValueNewStat4Ctx)(unsafe.Pointer(pCtx)).FpParse)).Frc = rc
}
if apVal != 0 {
i = 0
for {
if !(i < nVal) {
break
}
_sqlite3ValueFree(tls, *(*uintptr)(unsafe.Pointer(apVal + uintptr(i)*8)))
goto _2
_2:
;
i++
}
_sqlite3DbFreeNN(tls, db, apVal)
}
*(*uintptr)(unsafe.Pointer(ppVal)) = pVal
return rc
}
// C documentation
//
// /*
// ** Extract a value from the supplied expression in the manner described
// ** above sqlite3ValueFromExpr(). Allocate the sqlite3_value object
// ** using valueNew().
// **
// ** If pCtx is NULL and an error occurs after the sqlite3_value object
// ** has been allocated, it is freed before returning. Or, if pCtx is not
// ** NULL, it is assumed that the caller will free any allocated object
// ** in all cases.
// */
func _valueFromExpr(tls *libc.TLS, db uintptr, pExpr uintptr, enc Tu8, affinity Tu8, ppVal uintptr, pCtx uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var aff Tu8
var nVal, negInt, op, rc, v1, v2 int32
var zNeg, zVal, p3 uintptr
var _ /* pVal at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _ = aff, nVal, negInt, op, rc, zNeg, zVal, v1, v2, p3
zVal = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
negInt = int32(1)
zNeg = __ccgo_ts + 1650
rc = SQLITE_OK
for {
v1 = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop)
op = v1
if !(v1 == int32(TK_UPLUS) || op == int32(TK_SPAN)) {
break
}
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
}
if op == int32(TK_REGISTER) {
op = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop2)
}
/* Compressed expressions only appear when parsing the DEFAULT clause
** on a table column definition, and hence only when pCtx==0. This
** check ensures that an EP_TokenOnly expression is never passed down
** into valueFromFunction(). */
if op == int32(TK_CAST) {
aff = uint8(_sqlite3AffinityType(tls, *(*uintptr)(unsafe.Pointer(pExpr + 8)), uintptr(0)))
rc = _valueFromExpr(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft, enc, aff, ppVal, pCtx)
if *(*uintptr)(unsafe.Pointer(ppVal)) != 0 {
if int32((*Tsqlite3_value)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(ppVal)))).Fflags)&int32(MEM_Zero) != 0 {
v2 = _sqlite3VdbeMemExpandBlob(tls, *(*uintptr)(unsafe.Pointer(ppVal)))
} else {
v2 = 0
}
rc = v2
_sqlite3VdbeMemCast(tls, *(*uintptr)(unsafe.Pointer(ppVal)), aff, enc)
_sqlite3ValueApplyAffinity(tls, *(*uintptr)(unsafe.Pointer(ppVal)), affinity, enc)
}
return rc
}
/* Handle negative integers in a single step. This is needed in the
** case when the value is -9223372036854775808.
*/
if op == int32(TK_UMINUS) && (int32((*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == int32(TK_INTEGER) || int32((*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(pExpr)).FpLeft)).Fop) == int32(TK_FLOAT)) {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
op = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop)
negInt = -int32(1)
zNeg = __ccgo_ts + 5076
}
if op == int32(TK_STRING) || op == int32(TK_FLOAT) || op == int32(TK_INTEGER) {
*(*uintptr)(unsafe.Pointer(bp)) = _valueNew(tls, db, pCtx)
if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) {
goto no_mem
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_IntValue)) != uint32(0) {
_sqlite3VdbeMemSetInt64(tls, *(*uintptr)(unsafe.Pointer(bp)), int64(*(*int32)(unsafe.Pointer(&(*TExpr)(unsafe.Pointer(pExpr)).Fu)))*int64(negInt))
} else {
zVal = _sqlite3MPrintf(tls, db, __ccgo_ts+5078, libc.VaList(bp+16, zNeg, *(*uintptr)(unsafe.Pointer(pExpr + 8))))
if zVal == uintptr(0) {
goto no_mem
}
_sqlite3ValueSetStr(tls, *(*uintptr)(unsafe.Pointer(bp)), -int32(1), zVal, uint8(SQLITE_UTF8), __ccgo_fp(_sqlite3OomClear))
}
if (op == int32(TK_INTEGER) || op == int32(TK_FLOAT)) && int32(affinity) == int32(SQLITE_AFF_BLOB) {
_sqlite3ValueApplyAffinity(tls, *(*uintptr)(unsafe.Pointer(bp)), uint8(SQLITE_AFF_NUMERIC), uint8(SQLITE_UTF8))
} else {
_sqlite3ValueApplyAffinity(tls, *(*uintptr)(unsafe.Pointer(bp)), affinity, uint8(SQLITE_UTF8))
}
if int32((*Tsqlite3_value)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Real)) != 0 {
p3 = *(*uintptr)(unsafe.Pointer(bp)) + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(MEM_Str))
}
if int32(enc) != int32(SQLITE_UTF8) {
rc = _sqlite3VdbeChangeEncoding(tls, *(*uintptr)(unsafe.Pointer(bp)), int32(enc))
}
} else {
if op == int32(TK_UMINUS) {
/* This branch happens for multiple negative signs. Ex: -(-5) */
if SQLITE_OK == _valueFromExpr(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft, enc, affinity, bp, pCtx) && *(*uintptr)(unsafe.Pointer(bp)) != uintptr(0) {
_sqlite3VdbeMemNumerify(tls, *(*uintptr)(unsafe.Pointer(bp)))
if int32((*Tsqlite3_value)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).Fflags)&int32(MEM_Real) != 0 {
*(*float64)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))) = -*(*float64)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp))))
} else {
if *(*Ti64)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))) == int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)< nRec || iHdr >= *(*int32)(unsafe.Pointer(bp + 4)) {
return _sqlite3CorruptError(tls, int32(84303))
}
iField = *(*int32)(unsafe.Pointer(bp + 4))
i = 0
for {
if !(i <= iCol) {
break
}
if int32(*(*Tu8)(unsafe.Pointer(a + uintptr(iHdr)))) < int32(libc.Uint8FromInt32(0x80)) {
*(*Tu32)(unsafe.Pointer(bp)) = uint32(*(*Tu8)(unsafe.Pointer(a + uintptr(iHdr))))
v3 = libc.Int32FromInt32(1)
} else {
v3 = int32(_sqlite3GetVarint32(tls, a+uintptr(iHdr), bp))
}
iHdr += int32(uint8(v3))
if iHdr > *(*int32)(unsafe.Pointer(bp + 4)) {
return _sqlite3CorruptError(tls, int32(84309))
}
szField = int32(_sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp))))
iField += szField
goto _2
_2:
;
i++
}
if iField > nRec {
return _sqlite3CorruptError(tls, int32(84315))
}
if pMem == uintptr(0) {
v4 = _sqlite3ValueNew(tls, db)
*(*uintptr)(unsafe.Pointer(ppVal)) = v4
pMem = v4
if pMem == uintptr(0) {
return int32(SQLITE_NOMEM)
}
}
_sqlite3VdbeSerialGet(tls, a+uintptr(iField-szField), *(*Tu32)(unsafe.Pointer(bp)), pMem)
(*TMem)(unsafe.Pointer(pMem)).Fenc = (*Tsqlite3)(unsafe.Pointer(db)).Fenc
return SQLITE_OK
}
// C documentation
//
// /*
// ** Unless it is NULL, the argument must be an UnpackedRecord object returned
// ** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes
// ** the object.
// */
func _sqlite3Stat4ProbeFree(tls *libc.TLS, pRec uintptr) {
var aMem, db uintptr
var i, nCol int32
_, _, _, _ = aMem, db, i, nCol
if pRec != 0 {
nCol = int32((*TKeyInfo)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(pRec)).FpKeyInfo)).FnAllField)
aMem = (*TUnpackedRecord)(unsafe.Pointer(pRec)).FaMem
db = (*(*TMem)(unsafe.Pointer(aMem))).Fdb
i = 0
for {
if !(i < nCol) {
break
}
_sqlite3VdbeMemRelease(tls, aMem+uintptr(i)*56)
goto _1
_1:
;
i++
}
_sqlite3KeyInfoUnref(tls, (*TUnpackedRecord)(unsafe.Pointer(pRec)).FpKeyInfo)
_sqlite3DbFreeNN(tls, db, pRec)
}
}
// C documentation
//
// /*
// ** Change the string value of an sqlite3_value object
// */
func _sqlite3ValueSetStr(tls *libc.TLS, v uintptr, n int32, z uintptr, enc Tu8, xDel uintptr) {
if v != 0 {
_sqlite3VdbeMemSetStr(tls, v, z, int64(n), enc, xDel)
}
}
// C documentation
//
// /*
// ** Free an sqlite3_value object
// */
func _sqlite3ValueFree(tls *libc.TLS, v uintptr) {
if !(v != 0) {
return
}
_sqlite3VdbeMemRelease(tls, v)
_sqlite3DbFreeNN(tls, (*TMem)(unsafe.Pointer(v)).Fdb, v)
}
// C documentation
//
// /*
// ** The sqlite3ValueBytes() routine returns the number of bytes in the
// ** sqlite3_value object assuming that it uses the encoding "enc".
// ** The valueBytes() routine is a helper function.
// */
func _valueBytes(tls *libc.TLS, pVal uintptr, enc Tu8) (r int32) {
var v1 int32
_ = v1
if _valueToText(tls, pVal, enc) != uintptr(0) {
v1 = (*Tsqlite3_value)(unsafe.Pointer(pVal)).Fn
} else {
v1 = 0
}
return v1
}
func _sqlite3ValueBytes(tls *libc.TLS, pVal uintptr, enc Tu8) (r int32) {
var p uintptr
_ = p
p = pVal
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Str) != 0 && int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc) == int32(enc) {
return (*TMem)(unsafe.Pointer(p)).Fn
}
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Str) != 0 && int32(enc) != int32(SQLITE_UTF8) && int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc) != int32(SQLITE_UTF8) {
return (*TMem)(unsafe.Pointer(p)).Fn
}
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Blob) != 0 {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Zero) != 0 {
return (*TMem)(unsafe.Pointer(p)).Fn + *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(p)).Fu))
} else {
return (*TMem)(unsafe.Pointer(p)).Fn
}
}
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Null) != 0 {
return 0
}
return _valueBytes(tls, pVal, enc)
}
// C documentation
//
// /*
// ** Create a new virtual database engine.
// */
func _sqlite3VdbeCreate(tls *libc.TLS, pParse uintptr) (r uintptr) {
var db, p uintptr
_, _ = db, p
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
p = _sqlite3DbMallocRawNN(tls, db, uint64(304))
if p == uintptr(0) {
return uintptr(0)
}
libc.Xmemset(tls, p+136, 0, libc.Uint64FromInt64(304)-uint64(libc.UintptrFromInt32(0)+136))
(*TVdbe)(unsafe.Pointer(p)).Fdb = db
if (*Tsqlite3)(unsafe.Pointer(db)).FpVdbe != 0 {
(*TVdbe1)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FpVdbe)).FppVPrev = p + 16
}
(*TVdbe)(unsafe.Pointer(p)).FpVNext = (*Tsqlite3)(unsafe.Pointer(db)).FpVdbe
(*TVdbe)(unsafe.Pointer(p)).FppVPrev = db + 8
(*Tsqlite3)(unsafe.Pointer(db)).FpVdbe = p
(*TVdbe)(unsafe.Pointer(p)).FpParse = pParse
(*TParse)(unsafe.Pointer(pParse)).FpVdbe = p
_sqlite3VdbeAddOp2(tls, p, int32(OP_Init), 0, int32(1))
return p
}
// C documentation
//
// /*
// ** Return the Parse object that owns a Vdbe object.
// */
func _sqlite3VdbeParser(tls *libc.TLS, p uintptr) (r uintptr) {
return (*TVdbe)(unsafe.Pointer(p)).FpParse
}
// C documentation
//
// /*
// ** Change the error string stored in Vdbe.zErrMsg
// */
func _sqlite3VdbeError(tls *libc.TLS, p uintptr, zFormat uintptr, va uintptr) {
var ap Tva_list
_ = ap
_sqlite3DbFree(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, (*TVdbe)(unsafe.Pointer(p)).FzErrMsg)
ap = va
(*TVdbe)(unsafe.Pointer(p)).FzErrMsg = _sqlite3VMPrintf(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, zFormat, ap)
_ = ap
}
// C documentation
//
// /*
// ** Remember the SQL string for a prepared statement.
// */
func _sqlite3VdbeSetSql(tls *libc.TLS, p uintptr, z uintptr, n int32, prepFlags Tu8) {
if p == uintptr(0) {
return
}
(*TVdbe)(unsafe.Pointer(p)).FprepFlags = prepFlags
if int32(prepFlags)&int32(SQLITE_PREPARE_SAVESQL) == 0 {
(*TVdbe)(unsafe.Pointer(p)).Fexpmask = uint32(0)
}
(*TVdbe)(unsafe.Pointer(p)).FzSql = _sqlite3DbStrNDup(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, z, uint64(n))
}
// C documentation
//
// /*
// ** Swap byte-code between two VDBE structures.
// **
// ** This happens after pB was previously run and returned
// ** SQLITE_SCHEMA. The statement was then reprepared in pA.
// ** This routine transfers the new bytecode in pA over to pB
// ** so that pB can be run again. The old pB byte code is
// ** moved back to pA so that it will be cleaned up when pA is
// ** finalized.
// */
func _sqlite3VdbeSwap(tls *libc.TLS, pA uintptr, pB uintptr) {
var pTmp, ppTmp, zTmp uintptr
var tmp TVdbe
_, _, _, _ = pTmp, ppTmp, tmp, zTmp
tmp = *(*TVdbe)(unsafe.Pointer(pA))
*(*TVdbe)(unsafe.Pointer(pA)) = *(*TVdbe)(unsafe.Pointer(pB))
*(*TVdbe)(unsafe.Pointer(pB)) = tmp
pTmp = (*TVdbe)(unsafe.Pointer(pA)).FpVNext
(*TVdbe)(unsafe.Pointer(pA)).FpVNext = (*TVdbe)(unsafe.Pointer(pB)).FpVNext
(*TVdbe)(unsafe.Pointer(pB)).FpVNext = pTmp
ppTmp = (*TVdbe)(unsafe.Pointer(pA)).FppVPrev
(*TVdbe)(unsafe.Pointer(pA)).FppVPrev = (*TVdbe)(unsafe.Pointer(pB)).FppVPrev
(*TVdbe)(unsafe.Pointer(pB)).FppVPrev = ppTmp
zTmp = (*TVdbe)(unsafe.Pointer(pA)).FzSql
(*TVdbe)(unsafe.Pointer(pA)).FzSql = (*TVdbe)(unsafe.Pointer(pB)).FzSql
(*TVdbe)(unsafe.Pointer(pB)).FzSql = zTmp
(*TVdbe)(unsafe.Pointer(pB)).Fexpmask = (*TVdbe)(unsafe.Pointer(pA)).Fexpmask
(*TVdbe)(unsafe.Pointer(pB)).FprepFlags = (*TVdbe)(unsafe.Pointer(pA)).FprepFlags
libc.Xmemcpy(tls, pB+212, pA+212, uint64(36))
*(*Tu32)(unsafe.Pointer(pB + 212 + 5*4))++
}
// C documentation
//
// /*
// ** Resize the Vdbe.aOp array so that it is at least nOp elements larger
// ** than its current size. nOp is guaranteed to be less than or equal
// ** to 1024/sizeof(Op).
// **
// ** If an out-of-memory error occurs while resizing the array, return
// ** SQLITE_NOMEM. In this case Vdbe.aOp and Vdbe.nOpAlloc remain
// ** unchanged (this is so that any opcodes already allocated can be
// ** correctly deallocated along with the rest of the Vdbe).
// */
func _growOpArray(tls *libc.TLS, v uintptr, nOp int32) (r int32) {
var nNew Tsqlite3_int64
var p, pNew uintptr
var v1 int64
var v2 int32
_, _, _, _, _ = nNew, p, pNew, v1, v2
p = (*TVdbe)(unsafe.Pointer(v)).FpParse
if (*TVdbe)(unsafe.Pointer(v)).FnOpAlloc != 0 {
v1 = int64(2) * int64((*TVdbe)(unsafe.Pointer(v)).FnOpAlloc)
} else {
v1 = int64(libc.Uint64FromInt32(1024) / libc.Uint64FromInt64(24))
}
/* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force
** more frequent reallocs and hence provide more opportunities for
** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used
** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array
** by the minimum* amount required until the size reaches 512. Normal
** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current
** size of the op array or add 1KB of space, whichever is smaller. */
nNew = v1
_ = nOp
/* Ensure that the size of a VDBE does not grow too large */
if nNew > int64(*(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(p)).Fdb + 136 + 5*4))) {
_sqlite3OomFault(tls, (*TParse)(unsafe.Pointer(p)).Fdb)
return int32(SQLITE_NOMEM)
}
pNew = _sqlite3DbRealloc(tls, (*TParse)(unsafe.Pointer(p)).Fdb, (*TVdbe)(unsafe.Pointer(v)).FaOp, uint64(nNew)*uint64(24))
if pNew != 0 {
(*TParse)(unsafe.Pointer(p)).FszOpAlloc = _sqlite3DbMallocSize(tls, (*TParse)(unsafe.Pointer(p)).Fdb, pNew)
(*TVdbe)(unsafe.Pointer(v)).FnOpAlloc = int32(uint64((*TParse)(unsafe.Pointer(p)).FszOpAlloc) / uint64(24))
(*TVdbe)(unsafe.Pointer(v)).FaOp = pNew
}
if pNew != 0 {
v2 = SQLITE_OK
} else {
v2 = int32(SQLITE_NOMEM)
}
return v2
}
// C documentation
//
// /*
// ** Slow paths for sqlite3VdbeAddOp3() and sqlite3VdbeAddOp4Int() for the
// ** unusual case when we need to increase the size of the Vdbe.aOp[] array
// ** before adding the new opcode.
// */
func _growOp3(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32) (r int32) {
if _growOpArray(tls, p, int32(1)) != 0 {
return int32(1)
}
return _sqlite3VdbeAddOp3(tls, p, op, p1, p2, p3)
}
func _addOp4IntSlow(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32, p4 int32) (r int32) {
var addr int32
var pOp uintptr
_, _ = addr, pOp
addr = _sqlite3VdbeAddOp3(tls, p, op, p1, p2, p3)
if int32((*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FmallocFailed) == 0 {
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(addr)*24
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type = int8(-libc.Int32FromInt32(3))
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4.Fi = p4
}
return addr
}
// C documentation
//
// /*
// ** Add a new instruction to the list of instructions current in the
// ** VDBE. Return the address of the new instruction.
// **
// ** Parameters:
// **
// ** p Pointer to the VDBE
// **
// ** op The opcode for this instruction
// **
// ** p1, p2, p3, p4 Operands
// */
func _sqlite3VdbeAddOp0(tls *libc.TLS, p uintptr, op int32) (r int32) {
return _sqlite3VdbeAddOp3(tls, p, op, 0, 0, 0)
}
func _sqlite3VdbeAddOp1(tls *libc.TLS, p uintptr, op int32, p1 int32) (r int32) {
return _sqlite3VdbeAddOp3(tls, p, op, p1, 0, 0)
}
func _sqlite3VdbeAddOp2(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32) (r int32) {
return _sqlite3VdbeAddOp3(tls, p, op, p1, p2, 0)
}
func _sqlite3VdbeAddOp3(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32) (r int32) {
var i int32
var pOp uintptr
_, _ = i, pOp
i = (*TVdbe)(unsafe.Pointer(p)).FnOp
if (*TVdbe)(unsafe.Pointer(p)).FnOpAlloc <= i {
return _growOp3(tls, p, op, p1, p2, p3)
}
(*TVdbe)(unsafe.Pointer(p)).FnOp++
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(i)*24
(*TVdbeOp)(unsafe.Pointer(pOp)).Fopcode = uint8(op)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp5 = uint16(0)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp1 = p1
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp2 = p2
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp3 = p3
*(*uintptr)(unsafe.Pointer(pOp + 16)) = uintptr(0)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type = P4_NOTUSED
/* Replicate this logic in sqlite3VdbeAddOp4Int()
** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
/* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
** Replicate in sqlite3VdbeAddOp4Int() */
return i
}
func _sqlite3VdbeAddOp4Int(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32, p4 int32) (r int32) {
var i int32
var pOp uintptr
_, _ = i, pOp
i = (*TVdbe)(unsafe.Pointer(p)).FnOp
if (*TVdbe)(unsafe.Pointer(p)).FnOpAlloc <= i {
return _addOp4IntSlow(tls, p, op, p1, p2, p3, p4)
}
(*TVdbe)(unsafe.Pointer(p)).FnOp++
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(i)*24
(*TVdbeOp)(unsafe.Pointer(pOp)).Fopcode = uint8(op)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp5 = uint16(0)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp1 = p1
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp2 = p2
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp3 = p3
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4.Fi = p4
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type = int8(-libc.Int32FromInt32(3))
/* Replicate this logic in sqlite3VdbeAddOp3()
** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */
/* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
** Replicate in sqlite3VdbeAddOp3() */
return i
}
// C documentation
//
// /* Generate code for an unconditional jump to instruction iDest
// */
func _sqlite3VdbeGoto(tls *libc.TLS, p uintptr, iDest int32) (r int32) {
return _sqlite3VdbeAddOp3(tls, p, int32(OP_Goto), 0, iDest, 0)
}
// C documentation
//
// /* Generate code to cause the string zStr to be loaded into
// ** register iDest
// */
func _sqlite3VdbeLoadString(tls *libc.TLS, p uintptr, iDest int32, zStr uintptr) (r int32) {
return _sqlite3VdbeAddOp4(tls, p, int32(OP_String8), 0, iDest, 0, zStr, 0)
}
// C documentation
//
// /*
// ** Generate code that initializes multiple registers to string or integer
// ** constants. The registers begin with iDest and increase consecutively.
// ** One register is initialized for each characgter in zTypes[]. For each
// ** "s" character in zTypes[], the register is a string if the argument is
// ** not NULL, or OP_Null if the value is a null pointer. For each "i" character
// ** in zTypes[], the register is initialized to an integer.
// **
// ** If the input string does not end with "X" then an OP_ResultRow instruction
// ** is generated for the values inserted.
// */
func _sqlite3VdbeMultiLoad(tls *libc.TLS, p uintptr, iDest int32, zTypes uintptr, va uintptr) {
var ap Tva_list
var c, v2 int8
var i, v3 int32
var z uintptr
_, _, _, _, _, _ = ap, c, i, z, v2, v3
ap = va
i = 0
for {
v2 = *(*int8)(unsafe.Pointer(zTypes + uintptr(i)))
c = v2
if !(int32(v2) != 0) {
break
}
if int32(c) == int32('s') {
z = libc.VaUintptr(&ap)
if z == uintptr(0) {
v3 = int32(OP_Null)
} else {
v3 = int32(OP_String8)
}
_sqlite3VdbeAddOp4(tls, p, v3, 0, iDest+i, 0, z, 0)
} else {
if int32(c) == int32('i') {
_sqlite3VdbeAddOp2(tls, p, int32(OP_Integer), libc.VaInt32(&ap), iDest+i)
} else {
goto skip_op_resultrow
}
}
goto _1
_1:
;
i++
}
_sqlite3VdbeAddOp2(tls, p, int32(OP_ResultRow), iDest, i)
goto skip_op_resultrow
skip_op_resultrow:
;
_ = ap
}
// C documentation
//
// /*
// ** Add an opcode that includes the p4 value as a pointer.
// */
func _sqlite3VdbeAddOp4(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32, zP4 uintptr, p4type int32) (r int32) {
var addr int32
_ = addr
addr = _sqlite3VdbeAddOp3(tls, p, op, p1, p2, p3)
_sqlite3VdbeChangeP4(tls, p, addr, zP4, p4type)
return addr
}
// C documentation
//
// /*
// ** Add an OP_Function or OP_PureFunc opcode.
// **
// ** The eCallCtx argument is information (typically taken from Expr.op2)
// ** that describes the calling context of the function. 0 means a general
// ** function call. NC_IsCheck means called by a check constraint,
// ** NC_IdxExpr means called as part of an index expression. NC_PartIdx
// ** means in the WHERE clause of a partial index. NC_GenCol means called
// ** while computing a generated column value. 0 is the usual case.
// */
func _sqlite3VdbeAddFunctionCall(tls *libc.TLS, pParse uintptr, p1 int32, p2 int32, p3 int32, nArg int32, pFunc uintptr, eCallCtx int32) (r int32) {
var addr, nByte, v1 int32
var pCtx, v uintptr
_, _, _, _, _ = addr, nByte, pCtx, v, v1
v = (*TParse)(unsafe.Pointer(pParse)).FpVdbe
nByte = int32(uint64(56) + uint64(nArg-libc.Int32FromInt32(1))*uint64(8))
pCtx = _sqlite3DbMallocRawNN(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, uint64(nByte))
if pCtx == uintptr(0) {
_freeEphemeralFunction(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pFunc)
return 0
}
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut = uintptr(0)
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpFunc = pFunc
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe = uintptr(0)
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = 0
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).Fargc = uint8(nArg)
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FiOp = _sqlite3VdbeCurrentAddr(tls, v)
if eCallCtx != 0 {
v1 = int32(OP_PureFunc)
} else {
v1 = int32(OP_Function)
}
addr = _sqlite3VdbeAddOp4(tls, v, v1, p1, p2, p3, pCtx, -int32(15))
_sqlite3VdbeChangeP5(tls, v, uint16(eCallCtx&int32(NC_SelfRef)))
_sqlite3MayAbort(tls, pParse)
return addr
}
// C documentation
//
// /*
// ** Add an opcode that includes the p4 value with a P4_INT64 or
// ** P4_REAL type.
// */
func _sqlite3VdbeAddOp4Dup8(tls *libc.TLS, p uintptr, op int32, p1 int32, p2 int32, p3 int32, zP4 uintptr, p4type int32) (r int32) {
var p4copy uintptr
_ = p4copy
p4copy = _sqlite3DbMallocRawNN(tls, _sqlite3VdbeDb(tls, p), uint64(8))
if p4copy != 0 {
libc.Xmemcpy(tls, p4copy, zP4, uint64(8))
}
return _sqlite3VdbeAddOp4(tls, p, op, p1, p2, p3, p4copy, p4type)
}
// C documentation
//
// /*
// ** Return the address of the current EXPLAIN QUERY PLAN baseline.
// ** 0 means "none".
// */
func _sqlite3VdbeExplainParent(tls *libc.TLS, pParse uintptr) (r int32) {
var pOp uintptr
_ = pOp
if (*TParse)(unsafe.Pointer(pParse)).FaddrExplain == 0 {
return 0
}
pOp = _sqlite3VdbeGetOp(tls, (*TParse)(unsafe.Pointer(pParse)).FpVdbe, (*TParse)(unsafe.Pointer(pParse)).FaddrExplain)
return (*TVdbeOp)(unsafe.Pointer(pOp)).Fp2
}
/*
** Set a debugger breakpoint on the following routine in order to
** monitor the EXPLAIN QUERY PLAN code generation.
*/
// C documentation
//
// /*
// ** Add a new OP_Explain opcode.
// **
// ** If the bPush flag is true, then make this opcode the parent for
// ** subsequent Explains until sqlite3VdbeExplainPop() is called.
// */
func _sqlite3VdbeExplain(tls *libc.TLS, pParse uintptr, bPush Tu8, zFmt uintptr, va uintptr) (r int32) {
var addr, iThis int32
var ap Tva_list
var v, zMsg uintptr
_, _, _, _, _ = addr, ap, iThis, v, zMsg
addr = 0
/* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined.
** But omit them (for performance) during production builds */
if int32((*TParse)(unsafe.Pointer(pParse)).Fexplain) == int32(2) || libc.Bool(0 != 0) {
ap = va
zMsg = _sqlite3VMPrintf(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, zFmt, ap)
_ = ap
v = (*TParse)(unsafe.Pointer(pParse)).FpVdbe
iThis = (*TVdbe)(unsafe.Pointer(v)).FnOp
addr = _sqlite3VdbeAddOp4(tls, v, int32(OP_Explain), iThis, (*TParse)(unsafe.Pointer(pParse)).FaddrExplain, 0, zMsg, -int32(6))
if bPush != 0 {
(*TParse)(unsafe.Pointer(pParse)).FaddrExplain = iThis
}
}
return addr
}
// C documentation
//
// /*
// ** Pop the EXPLAIN QUERY PLAN stack one level.
// */
func _sqlite3VdbeExplainPop(tls *libc.TLS, pParse uintptr) {
(*TParse)(unsafe.Pointer(pParse)).FaddrExplain = _sqlite3VdbeExplainParent(tls, pParse)
}
// C documentation
//
// /*
// ** Add an OP_ParseSchema opcode. This routine is broken out from
// ** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees
// ** as having been used.
// **
// ** The zWhere string must have been obtained from sqlite3_malloc().
// ** This routine will take ownership of the allocated memory.
// */
func _sqlite3VdbeAddParseSchemaOp(tls *libc.TLS, p uintptr, iDb int32, zWhere uintptr, p5 Tu16) {
var j int32
_ = j
_sqlite3VdbeAddOp4(tls, p, int32(OP_ParseSchema), iDb, 0, 0, zWhere, -int32(6))
_sqlite3VdbeChangeP5(tls, p, p5)
j = 0
for {
if !(j < (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FnDb) {
break
}
_sqlite3VdbeUsesBtree(tls, p, j)
goto _1
_1:
;
j++
}
_sqlite3MayAbort(tls, (*TVdbe)(unsafe.Pointer(p)).FpParse)
}
// C documentation
//
// /* Insert the end of a co-routine
// */
func _sqlite3VdbeEndCoroutine(tls *libc.TLS, v uintptr, regYield int32) {
_sqlite3VdbeAddOp1(tls, v, int32(OP_EndCoroutine), regYield)
/* Clear the temporary register cache, thereby ensuring that each
** co-routine has its own independent set of registers, because co-routines
** might expect their registers to be preserved across an OP_Yield, and
** that could cause problems if two or more co-routines are using the same
** temporary register.
*/
(*TParse)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).FpParse)).FnTempReg = uint8(0)
(*TParse)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).FpParse)).FnRangeReg = 0
}
// C documentation
//
// /*
// ** Create a new symbolic label for an instruction that has yet to be
// ** coded. The symbolic label is really just a negative number. The
// ** label can be used as the P2 value of an operation. Later, when
// ** the label is resolved to a specific address, the VDBE will scan
// ** through its operation list and change all values of P2 which match
// ** the label into the resolved address.
// **
// ** The VDBE knows that a P2 value is a label because labels are
// ** always negative and P2 values are suppose to be non-negative.
// ** Hence, a negative P2 value is a label that has yet to be resolved.
// ** (Later:) This is only true for opcodes that have the OPFLG_JUMP
// ** property.
// **
// ** Variable usage notes:
// **
// ** Parse.aLabel[x] Stores the address that the x-th label resolves
// ** into. For testing (SQLITE_DEBUG), unresolved
// ** labels stores -1, but that is not required.
// ** Parse.nLabelAlloc Number of slots allocated to Parse.aLabel[]
// ** Parse.nLabel The *negative* of the number of labels that have
// ** been issued. The negative is stored because
// ** that gives a performance improvement over storing
// ** the equivalent positive value.
// */
func _sqlite3VdbeMakeLabel(tls *libc.TLS, pParse uintptr) (r int32) {
var v1 int32
var v2 uintptr
_, _ = v1, v2
v2 = pParse + 68
*(*int32)(unsafe.Pointer(v2))--
v1 = *(*int32)(unsafe.Pointer(v2))
return v1
}
// C documentation
//
// /*
// ** Resolve label "x" to be the address of the next instruction to
// ** be inserted. The parameter "x" must have been obtained from
// ** a prior call to sqlite3VdbeMakeLabel().
// */
func _resizeResolveLabel(tls *libc.TLS, p uintptr, v uintptr, j int32) {
var nNewSize int32
_ = nNewSize
nNewSize = int32(10) - (*TParse)(unsafe.Pointer(p)).FnLabel
(*TParse)(unsafe.Pointer(p)).FaLabel = _sqlite3DbReallocOrFree(tls, (*TParse)(unsafe.Pointer(p)).Fdb, (*TParse)(unsafe.Pointer(p)).FaLabel, uint64(nNewSize)*uint64(4))
if (*TParse)(unsafe.Pointer(p)).FaLabel == uintptr(0) {
(*TParse)(unsafe.Pointer(p)).FnLabelAlloc = 0
} else {
if nNewSize >= int32(100) && nNewSize/int32(100) > (*TParse)(unsafe.Pointer(p)).FnLabelAlloc/int32(100) {
_sqlite3ProgressCheck(tls, p)
}
(*TParse)(unsafe.Pointer(p)).FnLabelAlloc = nNewSize
*(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(p)).FaLabel + uintptr(j)*4)) = (*TVdbe)(unsafe.Pointer(v)).FnOp
}
}
func _sqlite3VdbeResolveLabel(tls *libc.TLS, v uintptr, x int32) {
var j int32
var p uintptr
_, _ = j, p
p = (*TVdbe)(unsafe.Pointer(v)).FpParse
j = ^x
if (*TParse)(unsafe.Pointer(p)).FnLabelAlloc+(*TParse)(unsafe.Pointer(p)).FnLabel < 0 {
_resizeResolveLabel(tls, p, v, j)
} else {
/* Labels may only be resolved once */
*(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(p)).FaLabel + uintptr(j)*4)) = (*TVdbe)(unsafe.Pointer(v)).FnOp
}
}
// C documentation
//
// /*
// ** Mark the VDBE as one that can only be run one time.
// */
func _sqlite3VdbeRunOnlyOnce(tls *libc.TLS, p uintptr) {
_sqlite3VdbeAddOp2(tls, p, int32(OP_Expire), int32(1), int32(1))
}
// C documentation
//
// /*
// ** Mark the VDBE as one that can be run multiple times.
// */
func _sqlite3VdbeReusable(tls *libc.TLS, p uintptr) {
var i int32
_ = i
i = int32(1)
for {
if !(i < (*TVdbe)(unsafe.Pointer(p)).FnOp) {
break
}
if int32((*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(i)*24))).Fopcode) == int32(OP_Expire) {
(*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + 1*24))).Fopcode = uint8(OP_Noop)
break
}
goto _1
_1:
;
i++
}
}
// C documentation
//
// /*
// ** This routine is called after all opcodes have been inserted. It loops
// ** through all the opcodes and fixes up some details.
// **
// ** (1) For each jump instruction with a negative P2 value (a label)
// ** resolve the P2 value to an actual address.
// **
// ** (2) Compute the maximum number of arguments used by any SQL function
// ** and store that value in *pMaxFuncArgs.
// **
// ** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately
// ** indicate what the prepared statement actually does.
// **
// ** (4) (discontinued)
// **
// ** (5) Reclaim the memory allocated for storing labels.
// **
// ** This routine will only function correctly if the mkopcodeh.tcl generator
// ** script numbers the opcodes correctly. Changes to this routine must be
// ** coordinated with changes to mkopcodeh.tcl.
// */
func _resolveP2Values(tls *libc.TLS, p uintptr, pMaxFuncArgs uintptr) {
var aLabel, pOp, pParse uintptr
var n, nMaxArgs int32
_, _, _, _, _ = aLabel, n, nMaxArgs, pOp, pParse
nMaxArgs = *(*int32)(unsafe.Pointer(pMaxFuncArgs))
pParse = (*TVdbe)(unsafe.Pointer(p)).FpParse
aLabel = (*TParse)(unsafe.Pointer(pParse)).FaLabel
/* tag-20230419-1 */
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 6, 0x40)
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 7, 0x80)
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr((*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))*24
for int32(1) != 0 {
/* Only JUMP opcodes and the short list of special opcodes in the switch
** below need to be considered. The mkopcodeh.tcl generator script groups
** all these opcodes together near the front of the opcode list. Skip
** any opcode that does not need processing by virtual of the fact that
** it is larger than SQLITE_MX_JUMP_OPCODE, as a performance optimization.
*/
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) <= int32(SQLITE_MX_JUMP_OPCODE) {
/* NOTE: Be sure to update mkopcodeh.tcl when adding or removing
** cases from this switch! */
switch int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) {
case int32(OP_Transaction):
if (*TOp)(unsafe.Pointer(pOp)).Fp2 != 0 {
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 6, 0x40)
}
fallthrough
case int32(OP_AutoCommit):
fallthrough
case OP_Savepoint:
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 7, 0x80)
case int32(OP_Checkpoint):
fallthrough
case int32(OP_Vacuum):
fallthrough
case int32(OP_JournalMode):
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 6, 0x40)
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 7, 0x80)
case int32(OP_Init):
goto resolve_p2_values_loop_exit
case int32(OP_VUpdate):
if (*TOp)(unsafe.Pointer(pOp)).Fp2 > nMaxArgs {
nMaxArgs = (*TOp)(unsafe.Pointer(pOp)).Fp2
}
case int32(OP_VFilter):
n = (*(*TOp)(unsafe.Pointer(pOp + uintptr(-libc.Int32FromInt32(1))*24))).Fp1
if n > nMaxArgs {
nMaxArgs = n
}
/* Fall through into the default case */
fallthrough
default:
if (*TOp)(unsafe.Pointer(pOp)).Fp2 < 0 {
/* The mkopcodeh.tcl script has so arranged things that the only
** non-jump opcodes less than SQLITE_MX_JUMP_CODE are guaranteed to
** have non-negative values for P2. */
/* True because of tag-20230419-1 */
(*TOp)(unsafe.Pointer(pOp)).Fp2 = *(*int32)(unsafe.Pointer(aLabel + uintptr(^(*TOp)(unsafe.Pointer(pOp)).Fp2)*4))
}
break
}
/* The mkopcodeh.tcl script has so arranged things that the only
** non-jump opcodes less than SQLITE_MX_JUMP_CODE are guaranteed to
** have non-negative values for P2. */
}
pOp -= 24
}
goto resolve_p2_values_loop_exit
resolve_p2_values_loop_exit:
;
if aLabel != 0 {
_sqlite3DbNNFreeNN(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, (*TParse)(unsafe.Pointer(pParse)).FaLabel)
(*TParse)(unsafe.Pointer(pParse)).FaLabel = uintptr(0)
}
(*TParse)(unsafe.Pointer(pParse)).FnLabel = 0
*(*int32)(unsafe.Pointer(pMaxFuncArgs)) = nMaxArgs
}
// C documentation
//
// /*
// ** Return the address of the next instruction to be inserted.
// */
func _sqlite3VdbeCurrentAddr(tls *libc.TLS, p uintptr) (r int32) {
return (*TVdbe)(unsafe.Pointer(p)).FnOp
}
/*
** Verify that at least N opcode slots are available in p without
** having to malloc for more space (except when compiled using
** SQLITE_TEST_REALLOC_STRESS). This interface is used during testing
** to verify that certain calls to sqlite3VdbeAddOpList() can never
** fail due to a OOM fault and hence that the return value from
** sqlite3VdbeAddOpList() will always be non-NULL.
*/
/*
** Verify that the VM passed as the only argument does not contain
** an OP_ResultRow opcode. Fail an assert() if it does. This is used
** by code in pragma.c to ensure that the implementation of certain
** pragmas comports with the flags specified in the mkpragmatab.tcl
** script.
*/
/*
** Generate code (a single OP_Abortable opcode) that will
** verify that the VDBE program can safely call Abort in the current
** context.
*/
// C documentation
//
// /*
// ** This function returns a pointer to the array of opcodes associated with
// ** the Vdbe passed as the first argument. It is the callers responsibility
// ** to arrange for the returned array to be eventually freed using the
// ** vdbeFreeOpArray() function.
// **
// ** Before returning, *pnOp is set to the number of entries in the returned
// ** array. Also, *pnMaxArg is set to the larger of its current value and
// ** the number of entries in the Vdbe.apArg[] array required to execute the
// ** returned program.
// */
func _sqlite3VdbeTakeOpArray(tls *libc.TLS, p uintptr, pnOp uintptr, pnMaxArg uintptr) (r uintptr) {
var aOp uintptr
_ = aOp
aOp = (*TVdbe)(unsafe.Pointer(p)).FaOp
/* Check that sqlite3VdbeUsesBtree() was not called on this VM */
_resolveP2Values(tls, p, pnMaxArg)
*(*int32)(unsafe.Pointer(pnOp)) = (*TVdbe)(unsafe.Pointer(p)).FnOp
(*TVdbe)(unsafe.Pointer(p)).FaOp = uintptr(0)
return aOp
}
// C documentation
//
// /*
// ** Add a whole list of operations to the operation stack. Return a
// ** pointer to the first operation inserted.
// **
// ** Non-zero P2 arguments to jump instructions are automatically adjusted
// ** so that the jump target is relative to the first operation inserted.
// */
func _sqlite3VdbeAddOpList(tls *libc.TLS, p uintptr, nOp int32, aOp uintptr, iLineno int32) (r uintptr) {
var i int32
var pFirst, pOut, v1 uintptr
_, _, _, _ = i, pFirst, pOut, v1
if (*TVdbe)(unsafe.Pointer(p)).FnOp+nOp > (*TVdbe)(unsafe.Pointer(p)).FnOpAlloc && _growOpArray(tls, p, nOp) != 0 {
return uintptr(0)
}
v1 = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr((*TVdbe)(unsafe.Pointer(p)).FnOp)*24
pOut = v1
pFirst = v1
i = 0
for {
if !(i < nOp) {
break
}
(*TVdbeOp)(unsafe.Pointer(pOut)).Fopcode = (*TVdbeOpList)(unsafe.Pointer(aOp)).Fopcode
(*TVdbeOp)(unsafe.Pointer(pOut)).Fp1 = int32((*TVdbeOpList)(unsafe.Pointer(aOp)).Fp1)
(*TVdbeOp)(unsafe.Pointer(pOut)).Fp2 = int32((*TVdbeOpList)(unsafe.Pointer(aOp)).Fp2)
if int32(_sqlite3OpcodeProperty[(*TVdbeOpList)(unsafe.Pointer(aOp)).Fopcode])&int32(OPFLG_JUMP) != 0 && int32((*TVdbeOpList)(unsafe.Pointer(aOp)).Fp2) > 0 {
*(*int32)(unsafe.Pointer(pOut + 8)) += (*TVdbe)(unsafe.Pointer(p)).FnOp
}
(*TVdbeOp)(unsafe.Pointer(pOut)).Fp3 = int32((*TVdbeOpList)(unsafe.Pointer(aOp)).Fp3)
(*TVdbeOp)(unsafe.Pointer(pOut)).Fp4type = P4_NOTUSED
*(*uintptr)(unsafe.Pointer(pOut + 16)) = uintptr(0)
(*TVdbeOp)(unsafe.Pointer(pOut)).Fp5 = uint16(0)
_ = iLineno
goto _2
_2:
;
i++
aOp += 4
pOut += 24
}
*(*int32)(unsafe.Pointer(p + 144)) += nOp
return pFirst
}
// C documentation
//
// /*
// ** Change the value of the opcode, or P1, P2, P3, or P5 operands
// ** for a specific instruction.
// */
func _sqlite3VdbeChangeOpcode(tls *libc.TLS, p uintptr, addr int32, iNewOpcode Tu8) {
(*TVdbeOp)(unsafe.Pointer(_sqlite3VdbeGetOp(tls, p, addr))).Fopcode = iNewOpcode
}
func _sqlite3VdbeChangeP1(tls *libc.TLS, p uintptr, addr int32, val int32) {
(*TVdbeOp)(unsafe.Pointer(_sqlite3VdbeGetOp(tls, p, addr))).Fp1 = val
}
func _sqlite3VdbeChangeP2(tls *libc.TLS, p uintptr, addr int32, val int32) {
(*TVdbeOp)(unsafe.Pointer(_sqlite3VdbeGetOp(tls, p, addr))).Fp2 = val
}
func _sqlite3VdbeChangeP3(tls *libc.TLS, p uintptr, addr int32, val int32) {
(*TVdbeOp)(unsafe.Pointer(_sqlite3VdbeGetOp(tls, p, addr))).Fp3 = val
}
func _sqlite3VdbeChangeP5(tls *libc.TLS, p uintptr, p5 Tu16) {
if (*TVdbe)(unsafe.Pointer(p)).FnOp > 0 {
(*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr((*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))*24))).Fp5 = p5
}
}
// C documentation
//
// /*
// ** If the previous opcode is an OP_Column that delivers results
// ** into register iDest, then add the OPFLAG_TYPEOFARG flag to that
// ** opcode.
// */
func _sqlite3VdbeTypeofColumn(tls *libc.TLS, p uintptr, iDest int32) {
var pOp, p1 uintptr
_, _ = pOp, p1
pOp = _sqlite3VdbeGetLastOp(tls, p)
if (*TVdbeOp)(unsafe.Pointer(pOp)).Fp3 == iDest && int32((*TVdbeOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_Column) {
p1 = pOp + 2
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(OPFLAG_TYPEOFARG))
}
}
// C documentation
//
// /*
// ** Change the P2 operand of instruction addr so that it points to
// ** the address of the next instruction to be coded.
// */
func _sqlite3VdbeJumpHere(tls *libc.TLS, p uintptr, addr int32) {
_sqlite3VdbeChangeP2(tls, p, addr, (*TVdbe)(unsafe.Pointer(p)).FnOp)
}
// C documentation
//
// /*
// ** Change the P2 operand of the jump instruction at addr so that
// ** the jump lands on the next opcode. Or if the jump instruction was
// ** the previous opcode (and is thus a no-op) then simply back up
// ** the next instruction counter by one slot so that the jump is
// ** overwritten by the next inserted opcode.
// **
// ** This routine is an optimization of sqlite3VdbeJumpHere() that
// ** strives to omit useless byte-code like this:
// **
// ** 7 Once 0 8 0
// ** 8 ...
// */
func _sqlite3VdbeJumpHereOrPopInst(tls *libc.TLS, p uintptr, addr int32) {
if addr == (*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1) {
(*TVdbe)(unsafe.Pointer(p)).FnOp--
} else {
_sqlite3VdbeChangeP2(tls, p, addr, (*TVdbe)(unsafe.Pointer(p)).FnOp)
}
}
// C documentation
//
// /*
// ** If the input FuncDef structure is ephemeral, then free it. If
// ** the FuncDef is not ephemeral, then do nothing.
// */
func _freeEphemeralFunction(tls *libc.TLS, db uintptr, pDef uintptr) {
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_EPHEM) != uint32(0) {
_sqlite3DbNNFreeNN(tls, db, pDef)
}
}
// C documentation
//
// /*
// ** Delete a P4 value if necessary.
// */
func _freeP4Mem(tls *libc.TLS, db uintptr, p uintptr) {
if (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_sqlite3DbFree(tls, db, (*TMem)(unsafe.Pointer(p)).FzMalloc)
}
_sqlite3DbNNFreeNN(tls, db, p)
}
func _freeP4FuncCtx(tls *libc.TLS, db uintptr, p uintptr) {
_freeEphemeralFunction(tls, db, (*Tsqlite3_context)(unsafe.Pointer(p)).FpFunc)
_sqlite3DbNNFreeNN(tls, db, p)
}
func _freeP4(tls *libc.TLS, db uintptr, p4type int32, p4 uintptr) {
switch p4type {
case -int32(15):
_freeP4FuncCtx(tls, db, p4)
case -int32(12):
fallthrough
case -int32(13):
fallthrough
case -int32(6):
fallthrough
case -int32(14):
if p4 != 0 {
_sqlite3DbNNFreeNN(tls, db, p4)
}
case -int32(8):
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed == uintptr(0) {
_sqlite3KeyInfoUnref(tls, p4)
}
case -int32(7):
_freeEphemeralFunction(tls, db, p4)
case -int32(10):
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed == uintptr(0) {
_sqlite3ValueFree(tls, p4)
} else {
_freeP4Mem(tls, db, p4)
}
case -int32(11):
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed == uintptr(0) {
_sqlite3VtabUnlock(tls, p4)
}
case -int32(16):
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed == uintptr(0) {
_sqlite3DeleteTable(tls, db, p4)
}
break
}
}
// C documentation
//
// /*
// ** Free the space allocated for aOp and any p4 values allocated for the
// ** opcodes contained within. If aOp is not NULL it is assumed to contain
// ** nOp entries.
// */
func _vdbeFreeOpArray(tls *libc.TLS, db uintptr, aOp uintptr, nOp int32) {
var pOp uintptr
_ = pOp
if aOp != 0 {
pOp = aOp + uintptr(nOp-int32(1))*24
for int32(1) != 0 { /* Exit via break */
if int32((*TOp)(unsafe.Pointer(pOp)).Fp4type) <= -int32(6) {
_freeP4(tls, db, int32((*TOp)(unsafe.Pointer(pOp)).Fp4type), *(*uintptr)(unsafe.Pointer(pOp + 16)))
}
if pOp == aOp {
break
}
pOp -= 24
}
_sqlite3DbNNFreeNN(tls, db, aOp)
}
}
// C documentation
//
// /*
// ** Link the SubProgram object passed as the second argument into the linked
// ** list at Vdbe.pSubProgram. This list is used to delete all sub-program
// ** objects when the VM is no longer required.
// */
func _sqlite3VdbeLinkSubProgram(tls *libc.TLS, pVdbe uintptr, p uintptr) {
(*TSubProgram)(unsafe.Pointer(p)).FpNext = (*TVdbe)(unsafe.Pointer(pVdbe)).FpProgram
(*TVdbe)(unsafe.Pointer(pVdbe)).FpProgram = p
}
// C documentation
//
// /*
// ** Return true if the given Vdbe has any SubPrograms.
// */
func _sqlite3VdbeHasSubProgram(tls *libc.TLS, pVdbe uintptr) (r int32) {
return libc.BoolInt32((*TVdbe)(unsafe.Pointer(pVdbe)).FpProgram != uintptr(0))
}
// C documentation
//
// /*
// ** Change the opcode at addr into OP_Noop
// */
func _sqlite3VdbeChangeToNoop(tls *libc.TLS, p uintptr, addr int32) (r int32) {
var pOp uintptr
_ = pOp
if (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FmallocFailed != 0 {
return 0
}
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(addr)*24
_freeP4(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, int32((*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type), *(*uintptr)(unsafe.Pointer(pOp + 16)))
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type = P4_NOTUSED
*(*uintptr)(unsafe.Pointer(pOp + 16)) = uintptr(0)
(*TVdbeOp)(unsafe.Pointer(pOp)).Fopcode = uint8(OP_Noop)
return int32(1)
}
// C documentation
//
// /*
// ** If the last opcode is "op" and it is not a jump destination,
// ** then remove it. Return true if and only if an opcode was removed.
// */
func _sqlite3VdbeDeletePriorOpcode(tls *libc.TLS, p uintptr, op Tu8) (r int32) {
if (*TVdbe)(unsafe.Pointer(p)).FnOp > 0 && int32((*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr((*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))*24))).Fopcode) == int32(op) {
return _sqlite3VdbeChangeToNoop(tls, p, (*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))
} else {
return 0
}
return r
}
// C documentation
//
// /*
// ** Change the value of the P4 operand for a specific instruction.
// ** This routine is useful when a large program is loaded from a
// ** static array using sqlite3VdbeAddOpList but we want to make a
// ** few minor changes to the program.
// **
// ** If n>=0 then the P4 operand is dynamic, meaning that a copy of
// ** the string is made into memory obtained from sqlite3_malloc().
// ** A value of n==0 means copy bytes of zP4 up to and including the
// ** first null byte. If n>0 then copy n+1 bytes of zP4.
// **
// ** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points
// ** to a string or structure that is guaranteed to exist for the lifetime of
// ** the Vdbe. In these cases we can just copy the pointer.
// **
// ** If addr<0 then change P4 on the most recently inserted instruction.
// */
func _vdbeChangeP4Full(tls *libc.TLS, p uintptr, pOp uintptr, zP4 uintptr, n int32) {
if (*TOp)(unsafe.Pointer(pOp)).Fp4type != 0 {
(*TOp)(unsafe.Pointer(pOp)).Fp4type = 0
*(*uintptr)(unsafe.Pointer(pOp + 16)) = uintptr(0)
}
if n < 0 {
_sqlite3VdbeChangeP4(tls, p, int32((int64(pOp)-int64((*TVdbe)(unsafe.Pointer(p)).FaOp))/24), zP4, n)
} else {
if n == 0 {
n = _sqlite3Strlen30(tls, zP4)
}
*(*uintptr)(unsafe.Pointer(pOp + 16)) = _sqlite3DbStrNDup(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, zP4, uint64(n))
(*TOp)(unsafe.Pointer(pOp)).Fp4type = int8(-libc.Int32FromInt32(6))
}
}
func _sqlite3VdbeChangeP4(tls *libc.TLS, p uintptr, addr int32, _zP4 uintptr, n int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
*(*uintptr)(unsafe.Pointer(bp)) = _zP4
var db, pOp uintptr
_, _ = db, pOp
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
if n != -int32(11) {
_freeP4(tls, db, n, *(*uintptr)(unsafe.Pointer(bp)))
}
return
}
if addr < 0 {
addr = (*TVdbe)(unsafe.Pointer(p)).FnOp - int32(1)
}
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(addr)*24
if n >= 0 || (*TOp)(unsafe.Pointer(pOp)).Fp4type != 0 {
_vdbeChangeP4Full(tls, p, pOp, *(*uintptr)(unsafe.Pointer(bp)), n)
return
}
if n == -int32(3) {
/* Note: this cast is safe, because the origin data point was an int
** that was cast to a (const char *). */
(*TOp)(unsafe.Pointer(pOp)).Fp4.Fi = int32(int64(*(*uintptr)(unsafe.Pointer(bp))))
(*TOp)(unsafe.Pointer(pOp)).Fp4type = int8(-libc.Int32FromInt32(3))
} else {
if *(*uintptr)(unsafe.Pointer(bp)) != uintptr(0) {
*(*uintptr)(unsafe.Pointer(pOp + 16)) = *(*uintptr)(unsafe.Pointer(bp))
(*TOp)(unsafe.Pointer(pOp)).Fp4type = int8(n)
if n == -int32(11) {
_sqlite3VtabLock(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
}
}
// C documentation
//
// /*
// ** Change the P4 operand of the most recently coded instruction
// ** to the value defined by the arguments. This is a high-speed
// ** version of sqlite3VdbeChangeP4().
// **
// ** The P4 operand must not have been previously defined. And the new
// ** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of
// ** those cases.
// */
func _sqlite3VdbeAppendP4(tls *libc.TLS, p uintptr, pP4 uintptr, n int32) {
var pOp uintptr
_ = pOp
if (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FmallocFailed != 0 {
_freeP4(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, n, pP4)
} else {
pOp = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr((*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))*24
(*TVdbeOp)(unsafe.Pointer(pOp)).Fp4type = int8(n)
*(*uintptr)(unsafe.Pointer(pOp + 16)) = pP4
}
}
// C documentation
//
// /*
// ** Set the P4 on the most recently added opcode to the KeyInfo for the
// ** index given.
// */
func _sqlite3VdbeSetP4KeyInfo(tls *libc.TLS, pParse uintptr, pIdx uintptr) {
var pKeyInfo, v uintptr
_, _ = pKeyInfo, v
v = (*TParse)(unsafe.Pointer(pParse)).FpVdbe
pKeyInfo = _sqlite3KeyInfoOfIndex(tls, pParse, pIdx)
if pKeyInfo != 0 {
_sqlite3VdbeAppendP4(tls, v, pKeyInfo, -int32(8))
}
}
// C documentation
//
// /*
// ** Return the opcode for a given address. The address must be non-negative.
// ** See sqlite3VdbeGetLastOp() to get the most recently added opcode.
// **
// ** If a memory allocation error has occurred prior to the calling of this
// ** routine, then a pointer to a dummy VdbeOp will be returned. That opcode
// ** is readable but not writable, though it is cast to a writable value.
// ** The return of a dummy opcode allows the call to continue functioning
// ** after an OOM fault without having to check to see if the return from
// ** this routine is a valid pointer. But because the dummy.opcode is 0,
// ** dummy will never be written to. This is verified by code inspection and
// ** by running with Valgrind.
// */
func _sqlite3VdbeGetOp(tls *libc.TLS, p uintptr, addr int32) (r uintptr) {
if (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FmallocFailed != 0 {
return uintptr(unsafe.Pointer(&_dummy))
} else {
return (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(addr)*24
}
return r
}
/* C89 specifies that the constant "dummy" will be initialized to all
** zeros, which is correct. MSVC generates a warning, nevertheless. */
var _dummy TVdbeOp
// C documentation
//
// /* Return the most recently added opcode
// */
func _sqlite3VdbeGetLastOp(tls *libc.TLS, p uintptr) (r uintptr) {
return _sqlite3VdbeGetOp(tls, p, (*TVdbe)(unsafe.Pointer(p)).FnOp-int32(1))
}
// C documentation
//
// /*
// ** Compute a string that describes the P4 parameter for an opcode.
// ** Use zTemp for any required temporary buffer space.
// */
func _sqlite3VdbeDisplayP4(tls *libc.TLS, db uintptr, pOp uintptr) (r uintptr) {
bp := tls.Alloc(64)
defer tls.Free(64)
var ai, pColl, pColl1, pDef, pDef1, pKeyInfo, pMem, pVtab, zColl, zP4, v2, v3, v4 uintptr
var i, n Tu32
var j, v6 int32
var _ /* x at bp+0 */ TStrAccum
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = ai, i, j, n, pColl, pColl1, pDef, pDef1, pKeyInfo, pMem, pVtab, zColl, zP4, v2, v3, v4, v6
zP4 = uintptr(0)
_sqlite3StrAccumInit(tls, bp, uintptr(0), uintptr(0), 0, int32(SQLITE_MAX_LENGTH))
switch int32((*TOp)(unsafe.Pointer(pOp)).Fp4type) {
case -int32(8):
pKeyInfo = *(*uintptr)(unsafe.Pointer(pOp + 16))
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5083, libc.VaList(bp+40, int32((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnKeyField)))
j = 0
for {
if !(j < int32((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnKeyField)) {
break
}
pColl = *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(j)*8))
if pColl != 0 {
v2 = (*TCollSeq)(unsafe.Pointer(pColl)).FzName
} else {
v2 = __ccgo_ts + 1650
}
zColl = v2
if libc.Xstrcmp(tls, zColl, __ccgo_ts+5088) == 0 {
zColl = __ccgo_ts + 5095
}
if int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FaSortFlags + uintptr(j))))&int32(KEYINFO_ORDER_DESC) != 0 {
v3 = __ccgo_ts + 5076
} else {
v3 = __ccgo_ts + 1650
}
if int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FaSortFlags + uintptr(j))))&int32(KEYINFO_ORDER_BIGNULL) != 0 {
v4 = __ccgo_ts + 5097
} else {
v4 = __ccgo_ts + 1650
}
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5100, libc.VaList(bp+40, v3, v4, zColl))
goto _1
_1:
;
j++
}
Xsqlite3_str_append(tls, bp, __ccgo_ts+5108, int32(1))
case -int32(2):
pColl1 = *(*uintptr)(unsafe.Pointer(pOp + 16))
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5124, libc.VaList(bp+40, (*TCollSeq)(unsafe.Pointer(pColl1)).FzName, _encnames[(*TCollSeq)(unsafe.Pointer(pColl1)).Fenc]))
case -int32(7):
pDef = *(*uintptr)(unsafe.Pointer(pOp + 16))
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5133, libc.VaList(bp+40, (*TFuncDef)(unsafe.Pointer(pDef)).FzName, int32((*TFuncDef)(unsafe.Pointer(pDef)).FnArg)))
case -int32(15):
pDef1 = (*Tsqlite3_context)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FpFunc
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5133, libc.VaList(bp+40, (*TFuncDef)(unsafe.Pointer(pDef1)).FzName, int32((*TFuncDef)(unsafe.Pointer(pDef1)).FnArg)))
case -int32(13):
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+1406, libc.VaList(bp+40, *(*Ti64)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16))))))
case -int32(3):
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5140, libc.VaList(bp+40, (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi))
case -int32(12):
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+1373, libc.VaList(bp+40, *(*float64)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16))))))
case -int32(10):
pMem = *(*uintptr)(unsafe.Pointer(pOp + 16))
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Str) != 0 {
zP4 = (*TMem)(unsafe.Pointer(pMem)).Fz
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+1406, libc.VaList(bp+40, *(*Ti64)(unsafe.Pointer(pMem))))
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Real) != 0 {
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+1373, libc.VaList(bp+40, *(*float64)(unsafe.Pointer(pMem))))
} else {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Null) != 0 {
zP4 = __ccgo_ts + 1651
} else {
zP4 = __ccgo_ts + 5143
}
}
}
}
case -int32(11):
pVtab = (*TVTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FpVtab
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5150, libc.VaList(bp+40, pVtab))
case -int32(14):
ai = *(*uintptr)(unsafe.Pointer(pOp + 16))
n = *(*Tu32)(unsafe.Pointer(ai)) /* The first element of an INTARRAY is always the
** count of the number of elements to follow */
i = uint32(1)
for {
if !(i <= n) {
break
}
if i == uint32(1) {
v6 = int32('[')
} else {
v6 = int32(',')
}
Xsqlite3_str_appendf(tls, bp, __ccgo_ts+5158, libc.VaList(bp+40, v6, *(*Tu32)(unsafe.Pointer(ai + uintptr(i)*4))))
goto _5
_5:
;
i++
}
Xsqlite3_str_append(tls, bp, __ccgo_ts+5163, int32(1))
case -int32(4):
zP4 = __ccgo_ts + 5165
case -int32(5):
zP4 = (*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FzName
default:
zP4 = *(*uintptr)(unsafe.Pointer(pOp + 16))
}
if zP4 != 0 {
Xsqlite3_str_appendall(tls, bp, zP4)
}
if int32((*(*TStrAccum)(unsafe.Pointer(bp))).FaccError)&int32(SQLITE_NOMEM) != 0 {
_sqlite3OomFault(tls, db)
}
return _sqlite3StrAccumFinish(tls, bp)
}
var _encnames = [4]uintptr{
0: __ccgo_ts + 5110,
1: __ccgo_ts + 5112,
2: __ccgo_ts + 5114,
3: __ccgo_ts + 5119,
}
// C documentation
//
// /*
// ** Declare to the Vdbe that the BTree object at db->aDb[i] is used.
// **
// ** The prepared statements need to know in advance the complete set of
// ** attached databases that will be use. A mask of these databases
// ** is maintained in p->btreeMask. The p->lockMask value is the subset of
// ** p->btreeMask of databases that will require a lock.
// */
func _sqlite3VdbeUsesBtree(tls *libc.TLS, p uintptr, i int32) {
*(*TyDbMask)(unsafe.Pointer(p + 204)) |= libc.Uint32FromInt32(1) << i
if i != int32(1) && _sqlite3BtreeSharable(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FaDb + uintptr(i)*32))).FpBt) != 0 {
*(*TyDbMask)(unsafe.Pointer(p + 208)) |= libc.Uint32FromInt32(1) << i
}
}
// C documentation
//
// /*
// ** If SQLite is compiled to support shared-cache mode and to be threadsafe,
// ** this routine obtains the mutex associated with each BtShared structure
// ** that may be accessed by the VM passed as an argument. In doing so it also
// ** sets the BtShared.db member of each of the BtShared structures, ensuring
// ** that the correct busy-handler callback is invoked if required.
// **
// ** If SQLite is not threadsafe but does support shared-cache mode, then
// ** sqlite3BtreeEnter() is invoked to set the BtShared.db variables
// ** of all of BtShared structures accessible via the database handle
// ** associated with the VM.
// **
// ** If SQLite is not threadsafe and does not support shared-cache mode, this
// ** function is a no-op.
// **
// ** The p->btreeMask field is a bitmask of all btrees that the prepared
// ** statement p will ever use. Let N be the number of bits in p->btreeMask
// ** corresponding to btrees that use shared cache. Then the runtime of
// ** this routine is N*N. But as N is rarely more than 1, this should not
// ** be a problem.
// */
func _sqlite3VdbeEnter(tls *libc.TLS, p uintptr) {
var aDb, db uintptr
var i, nDb int32
_, _, _, _ = aDb, db, i, nDb
if (*TVdbe)(unsafe.Pointer(p)).FlockMask == uint32(0) {
return
} /* The common case */
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
aDb = (*Tsqlite3)(unsafe.Pointer(db)).FaDb
nDb = (*Tsqlite3)(unsafe.Pointer(db)).FnDb
i = 0
for {
if !(i < nDb) {
break
}
if i != int32(1) && (*TVdbe)(unsafe.Pointer(p)).FlockMask&(libc.Uint32FromInt32(1)< 0 {
for {
(*TMem)(unsafe.Pointer(p)).Fflags = flags
(*TMem)(unsafe.Pointer(p)).Fdb = db
(*TMem)(unsafe.Pointer(p)).FszMalloc = 0
p += 56
goto _2
_2:
;
N--
v1 = N
if !(v1 > 0) {
break
}
}
}
}
// C documentation
//
// /*
// ** Release auxiliary memory held in an array of N Mem elements.
// **
// ** After this routine returns, all Mem elements in the array will still
// ** be valid. Those Mem elements that were not holding auxiliary resources
// ** will be unchanged. Mem elements which had something freed will be
// ** set to MEM_Undefined.
// */
func _releaseMemArray(tls *libc.TLS, p uintptr, N int32) {
var db, pEnd, v1, v3 uintptr
_, _, _, _ = db, pEnd, v1, v3
if p != 0 && N != 0 {
pEnd = p + uintptr(N)*56
db = (*TMem)(unsafe.Pointer(p)).Fdb
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed != 0 {
for {
if (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_sqlite3DbFree(tls, db, (*TMem)(unsafe.Pointer(p)).FzMalloc)
}
goto _2
_2:
;
p += 56
v1 = p
if !(v1 < pEnd) {
break
}
}
return
}
for {
/* This block is really an inlined version of sqlite3VdbeMemRelease()
** that takes advantage of the fact that the memory cell value is
** being set to NULL after releasing any dynamic resources.
**
** The justification for duplicating code is that according to
** callgrind, this causes a certain test case to hit the CPU 4.7
** percent less (x86 linux, gcc version 4.1.2, -O6) than if
** sqlite3MemRelease() were called from here. With -O2, this jumps
** to 6.6 percent. The test case is inserting 1000 rows into a table
** with no indexes using a single prepared INSERT statement, bind()
** and reset(). Inserts are grouped into a transaction.
*/
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_sqlite3VdbeMemRelease(tls, p)
(*TMem)(unsafe.Pointer(p)).Fflags = uint16(MEM_Undefined)
} else {
if (*TMem)(unsafe.Pointer(p)).FszMalloc != 0 {
_sqlite3DbNNFreeNN(tls, db, (*TMem)(unsafe.Pointer(p)).FzMalloc)
(*TMem)(unsafe.Pointer(p)).FszMalloc = 0
(*TMem)(unsafe.Pointer(p)).Fflags = uint16(MEM_Undefined)
}
}
goto _4
_4:
;
p += 56
v3 = p
if !(v3 < pEnd) {
break
}
}
}
}
// C documentation
//
// /*
// ** This is a destructor on a Mem object (which is really an sqlite3_value)
// ** that deletes the Frame object that is attached to it as a blob.
// **
// ** This routine does not delete the Frame right away. It merely adds the
// ** frame to a list of frames to be deleted when the Vdbe halts.
// */
func _sqlite3VdbeFrameMemDel(tls *libc.TLS, pArg uintptr) {
var pFrame uintptr
_ = pFrame
pFrame = pArg
(*TVdbeFrame)(unsafe.Pointer(pFrame)).FpParent = (*TVdbe)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer(pFrame)).Fv)).FpDelFrame
(*TVdbe)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer(pFrame)).Fv)).FpDelFrame = pFrame
}
// C documentation
//
// /*
// ** Locate the next opcode to be displayed in EXPLAIN or EXPLAIN
// ** QUERY PLAN output.
// **
// ** Return SQLITE_ROW on success. Return SQLITE_DONE if there are no
// ** more opcodes to be displayed.
// */
func _sqlite3VdbeNextOpcode(tls *libc.TLS, p uintptr, pSub uintptr, eMode int32, piPc uintptr, piAddr uintptr, paOp uintptr) (r int32) {
var aOp, apSub uintptr
var i, iPc, j, j1, nByte, nRow, nSub, rc, v2, v5 int32
_, _, _, _, _, _, _, _, _, _, _, _ = aOp, apSub, i, iPc, j, j1, nByte, nRow, nSub, rc, v2, v5 /* Stop when row count reaches this */
nSub = 0 /* Number of sub-vdbes seen so far */
apSub = uintptr(0) /* Next instruction address */
rc = SQLITE_OK /* Result code */
aOp = uintptr(0) /* Rowid. Copy of value in *piPc */
/* When the number of output rows reaches nRow, that means the
** listing has finished and sqlite3_step() should return SQLITE_DONE.
** nRow is the sum of the number of rows in the main program, plus
** the sum of the number of rows in all trigger subprograms encountered
** so far. The nRow value will increase as new trigger subprograms are
** encountered, but p->pc will eventually catch up to nRow.
*/
nRow = (*TVdbe)(unsafe.Pointer(p)).FnOp
if pSub != uintptr(0) {
if int32((*TMem)(unsafe.Pointer(pSub)).Fflags)&int32(MEM_Blob) != 0 {
/* pSub is initiallly NULL. It is initialized to a BLOB by
** the P4_SUBPROGRAM processing logic below */
nSub = int32(uint64((*TMem)(unsafe.Pointer(pSub)).Fn) / uint64(8))
apSub = (*TMem)(unsafe.Pointer(pSub)).Fz
}
i = 0
for {
if !(i < nSub) {
break
}
nRow += (*TSubProgram)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(apSub + uintptr(i)*8)))).FnOp
goto _1
_1:
;
i++
}
}
iPc = *(*int32)(unsafe.Pointer(piPc))
for int32(1) != 0 { /* Loop exits via break */
v2 = iPc
iPc++
i = v2
if i >= nRow {
(*TVdbe)(unsafe.Pointer(p)).Frc = SQLITE_OK
rc = int32(SQLITE_DONE)
break
}
if i < (*TVdbe)(unsafe.Pointer(p)).FnOp {
/* The rowid is small enough that we are still in the
** main program. */
aOp = (*TVdbe)(unsafe.Pointer(p)).FaOp
} else {
i -= (*TVdbe)(unsafe.Pointer(p)).FnOp
j = 0
for {
if !(i >= (*TSubProgram)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(apSub + uintptr(j)*8)))).FnOp) {
break
}
i -= (*TSubProgram)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(apSub + uintptr(j)*8)))).FnOp
goto _3
_3:
;
j++
}
aOp = (*TSubProgram)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(apSub + uintptr(j)*8)))).FaOp
}
/* When an OP_Program opcode is encounter (the only opcode that has
** a P4_SUBPROGRAM argument), expand the size of the array of subprograms
** kept in p->aMem[9].z to hold the new program - assuming this subprogram
** has not already been seen.
*/
if pSub != uintptr(0) && int32((*(*TOp)(unsafe.Pointer(aOp + uintptr(i)*24))).Fp4type) == -int32(4) {
nByte = int32(uint64(nSub+libc.Int32FromInt32(1)) * uint64(8))
j1 = 0
for {
if !(j1 < nSub) {
break
}
if *(*uintptr)(unsafe.Pointer(apSub + uintptr(j1)*8)) == *(*uintptr)(unsafe.Pointer(aOp + uintptr(i)*24 + 16)) {
break
}
goto _4
_4:
;
j1++
}
if j1 == nSub {
(*TVdbe)(unsafe.Pointer(p)).Frc = _sqlite3VdbeMemGrow(tls, pSub, nByte, libc.BoolInt32(nSub != 0))
if (*TVdbe)(unsafe.Pointer(p)).Frc != SQLITE_OK {
rc = int32(SQLITE_ERROR)
break
}
apSub = (*TMem)(unsafe.Pointer(pSub)).Fz
v5 = nSub
nSub++
*(*uintptr)(unsafe.Pointer(apSub + uintptr(v5)*8)) = *(*uintptr)(unsafe.Pointer(aOp + uintptr(i)*24 + 16))
(*TMem)(unsafe.Pointer(pSub)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pSub)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Blob))
(*TMem)(unsafe.Pointer(pSub)).Fn = int32(uint64(nSub) * uint64(8))
nRow += (*TSubProgram)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(aOp + uintptr(i)*24 + 16)))).FnOp
}
}
if eMode == 0 {
break
}
if int32((*(*TOp)(unsafe.Pointer(aOp + uintptr(i)*24))).Fopcode) == int32(OP_Explain) {
break
}
if int32((*(*TOp)(unsafe.Pointer(aOp + uintptr(i)*24))).Fopcode) == int32(OP_Init) && iPc > int32(1) {
break
}
}
*(*int32)(unsafe.Pointer(piPc)) = iPc
*(*int32)(unsafe.Pointer(piAddr)) = i
*(*uintptr)(unsafe.Pointer(paOp)) = aOp
return rc
}
// C documentation
//
// /*
// ** Delete a VdbeFrame object and its contents. VdbeFrame objects are
// ** allocated by the OP_Program opcode in sqlite3VdbeExec().
// */
func _sqlite3VdbeFrameDelete(tls *libc.TLS, p uintptr) {
var aMem, apCsr uintptr
var i int32
_, _, _ = aMem, apCsr, i
aMem = p + uintptr((libc.Uint64FromInt64(112)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
apCsr = aMem + uintptr((*TVdbeFrame)(unsafe.Pointer(p)).FnChildMem)*56
i = 0
for {
if !(i < (*TVdbeFrame)(unsafe.Pointer(p)).FnChildCsr) {
break
}
if *(*uintptr)(unsafe.Pointer(apCsr + uintptr(i)*8)) != 0 {
_sqlite3VdbeFreeCursorNN(tls, (*TVdbeFrame)(unsafe.Pointer(p)).Fv, *(*uintptr)(unsafe.Pointer(apCsr + uintptr(i)*8)))
}
goto _1
_1:
;
i++
}
_releaseMemArray(tls, aMem, (*TVdbeFrame)(unsafe.Pointer(p)).FnChildMem)
_sqlite3VdbeDeleteAuxData(tls, (*TVdbe)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer(p)).Fv)).Fdb, p+64, -int32(1), 0)
_sqlite3DbFree(tls, (*TVdbe)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer(p)).Fv)).Fdb, p)
}
// C documentation
//
// /*
// ** Give a listing of the program in the virtual machine.
// **
// ** The interface is the same as sqlite3VdbeExec(). But instead of
// ** running the code, it invokes the callback once for each instruction.
// ** This feature is used to implement "EXPLAIN".
// **
// ** When p->explain==1, each instruction is listed. When
// ** p->explain==2, only OP_Explain instructions are listed and these
// ** are shown in a different format. p->explain==2 is used to implement
// ** EXPLAIN QUERY PLAN.
// ** 2018-04-24: In p->explain==2 mode, the OP_Init opcodes of triggers
// ** are also shown, so that the boundaries between the main program and
// ** each trigger are clear.
// **
// ** When p->explain==1, first the main program is listed, then each of
// ** the trigger subprograms are listed one by one.
// */
func _sqlite3VdbeList(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var bListSubprogs, rc int32
var db, pMem, pOp, pSub, zP4 uintptr
var _ /* aOp at bp+8 */ uintptr
var _ /* i at bp+0 */ int32
_, _, _, _, _, _, _ = bListSubprogs, db, pMem, pOp, pSub, rc, zP4
pSub = uintptr(0) /* Memory cell hold array of subprogs */
db = (*TVdbe)(unsafe.Pointer(p)).Fdb /* Loop counter */
rc = SQLITE_OK /* Return code */
pMem = (*TVdbe)(unsafe.Pointer(p)).FaMem + 1*56 /* First Mem of result set */
bListSubprogs = libc.BoolInt32(int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) == int32(1) || (*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_TriggerEQP) != uint64(0)) /* Current opcode */
/* Even though this opcode does not use dynamic strings for
** the result, result columns may become dynamic if the user calls
** sqlite3_column_text16(), causing a translation to UTF-16 encoding.
*/
_releaseMemArray(tls, pMem, int32(8))
if (*TVdbe)(unsafe.Pointer(p)).Frc == int32(SQLITE_NOMEM) {
/* This happens if a malloc() inside a call to sqlite3_column_text() or
** sqlite3_column_text16() failed. */
_sqlite3OomFault(tls, db)
return int32(SQLITE_ERROR)
}
if bListSubprogs != 0 {
/* The first 8 memory cells are used for the result set. So we will
** commandeer the 9th cell to use as storage for an array of pointers
** to trigger subprograms. The VDBE is guaranteed to have at least 9
** cells. */
pSub = (*TVdbe)(unsafe.Pointer(p)).FaMem + 9*56
} else {
pSub = uintptr(0)
}
/* Figure out which opcode is next to display */
rc = _sqlite3VdbeNextOpcode(tls, p, pSub, libc.BoolInt32(int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) == int32(2)), p+48, bp, bp+8)
if rc == SQLITE_OK {
pOp = *(*uintptr)(unsafe.Pointer(bp + 8)) + uintptr(*(*int32)(unsafe.Pointer(bp)))*24
if libc.AtomicLoadPInt32(db+432) != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = int32(SQLITE_INTERRUPT)
rc = int32(SQLITE_ERROR)
_sqlite3VdbeError(tls, p, _sqlite3ErrStr(tls, (*TVdbe)(unsafe.Pointer(p)).Frc), 0)
} else {
zP4 = _sqlite3VdbeDisplayP4(tls, db, pOp)
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) == int32(2) {
_sqlite3VdbeMemSetInt64(tls, pMem, int64((*TOp)(unsafe.Pointer(pOp)).Fp1))
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(1)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp2))
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(2)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp3))
_sqlite3VdbeMemSetStr(tls, pMem+uintptr(3)*56, zP4, int64(-int32(1)), uint8(SQLITE_UTF8), __ccgo_fp(Xsqlite3_free))
} else {
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(0)*56, int64(*(*int32)(unsafe.Pointer(bp))))
_sqlite3VdbeMemSetStr(tls, pMem+uintptr(1)*56, _sqlite3OpcodeName(tls, int32((*TOp)(unsafe.Pointer(pOp)).Fopcode)), int64(-int32(1)), uint8(SQLITE_UTF8), libc.UintptrFromInt32(0))
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(2)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp1))
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(3)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp2))
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(4)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp3))
/* pMem+5 for p4 is done last */
_sqlite3VdbeMemSetInt64(tls, pMem+uintptr(6)*56, int64((*TOp)(unsafe.Pointer(pOp)).Fp5))
_sqlite3VdbeMemSetNull(tls, pMem+uintptr(7)*56)
_sqlite3VdbeMemSetStr(tls, pMem+uintptr(5)*56, zP4, int64(-int32(1)), uint8(SQLITE_UTF8), __ccgo_fp(Xsqlite3_free))
}
(*TVdbe)(unsafe.Pointer(p)).FpResultRow = pMem
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = int32(SQLITE_NOMEM)
rc = int32(SQLITE_ERROR)
} else {
(*TVdbe)(unsafe.Pointer(p)).Frc = SQLITE_OK
rc = int32(SQLITE_ROW)
}
}
}
return rc
}
/* An instance of this object describes bulk memory available for use
** by subcomponents of a prepared statement. Space is allocated out
** of a ReusableSpace object by the allocSpace() routine below.
*/
type TReusableSpace = struct {
FpSpace uintptr
FnFree Tsqlite3_int64
FnNeeded Tsqlite3_int64
}
type ReusableSpace = TReusableSpace
// C documentation
//
// /* Try to allocate nByte bytes of 8-byte aligned bulk memory for pBuf
// ** from the ReusableSpace object. Return a pointer to the allocated
// ** memory on success. If insufficient memory is available in the
// ** ReusableSpace object, increase the ReusableSpace.nNeeded
// ** value by the amount needed and return NULL.
// **
// ** If pBuf is not initially NULL, that means that the memory has already
// ** been allocated by a prior call to this routine, so just return a copy
// ** of pBuf and leave ReusableSpace unchanged.
// **
// ** This allocator is employed to repurpose unused slots at the end of the
// ** opcode array of prepared state for other memory needs of the prepared
// ** statement.
// */
func _allocSpace(tls *libc.TLS, p uintptr, pBuf uintptr, nByte Tsqlite3_int64) (r uintptr) {
if pBuf == uintptr(0) {
nByte = nByte
if nByte <= (*TReusableSpace)(unsafe.Pointer(p)).FnFree {
*(*Tsqlite3_int64)(unsafe.Pointer(p + 8)) -= nByte
pBuf = (*TReusableSpace)(unsafe.Pointer(p)).FpSpace + uintptr((*TReusableSpace)(unsafe.Pointer(p)).FnFree)
} else {
*(*Tsqlite3_int64)(unsafe.Pointer(p + 16)) += nByte
}
}
return pBuf
}
// C documentation
//
// /*
// ** Rewind the VDBE back to the beginning in preparation for
// ** running it.
// */
func _sqlite3VdbeRewind(tls *libc.TLS, p uintptr) {
/* There should be at least one opcode.
*/
(*TVdbe)(unsafe.Pointer(p)).FeVdbeState = uint8(VDBE_READY_STATE)
(*TVdbe)(unsafe.Pointer(p)).Fpc = -int32(1)
(*TVdbe)(unsafe.Pointer(p)).Frc = SQLITE_OK
(*TVdbe)(unsafe.Pointer(p)).FerrorAction = uint8(OE_Abort)
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
(*TVdbe)(unsafe.Pointer(p)).FcacheCtr = uint32(1)
(*TVdbe)(unsafe.Pointer(p)).FminWriteFileFormat = uint8(255)
(*TVdbe)(unsafe.Pointer(p)).FiStatement = 0
(*TVdbe)(unsafe.Pointer(p)).FnFkConstraint = 0
}
// C documentation
//
// /*
// ** Prepare a virtual machine for execution for the first time after
// ** creating the virtual machine. This involves things such
// ** as allocating registers and initializing the program counter.
// ** After the VDBE has be prepped, it can be executed by one or more
// ** calls to sqlite3VdbeExec().
// **
// ** This function may be called exactly once on each virtual machine.
// ** After this routine is called the VM has been "packaged" and is ready
// ** to run. After this routine is called, further calls to
// ** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects
// ** the Vdbe from the Parse object that helped generate it so that the
// ** the Vdbe becomes an independent entity and the Parse object can be
// ** destroyed.
// **
// ** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back
// ** to its initial state after it has been run.
// */
func _sqlite3VdbeMakeReady(tls *libc.TLS, p uintptr, pParse uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var db, v1 uintptr
var n, nCursor, nMem, nVar int32
var _ /* nArg at bp+0 */ int32
var _ /* x at bp+8 */ TReusableSpace
_, _, _, _, _, _ = db, n, nCursor, nMem, nVar, v1 /* Reusable bulk memory */
(*TVdbe)(unsafe.Pointer(p)).FpVList = (*TParse)(unsafe.Pointer(pParse)).FpVList
(*TParse)(unsafe.Pointer(pParse)).FpVList = uintptr(0)
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
nVar = int32((*TParse)(unsafe.Pointer(pParse)).FnVar)
nMem = (*TParse)(unsafe.Pointer(pParse)).FnMem
nCursor = (*TParse)(unsafe.Pointer(pParse)).FnTab
*(*int32)(unsafe.Pointer(bp)) = (*TParse)(unsafe.Pointer(pParse)).FnMaxArg
/* Each cursor uses a memory cell. The first cursor (cursor 0) can
** use aMem[0] which is not otherwise used by the VDBE program. Allocate
** space at the end of aMem[] for cursors 1 and greater.
** See also: allocateCursor().
*/
nMem += nCursor
if nCursor == 0 && nMem > 0 {
nMem++
} /* Space for aMem[0] even if not used */
/* Figure out how much reusable memory is available at the end of the
** opcode array. This extra memory will be reallocated for other elements
** of the prepared statement.
*/
n = int32(libc.Uint64FromInt64(24) * uint64((*TVdbe)(unsafe.Pointer(p)).FnOp)) /* Bytes of opcode memory used */
(*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FpSpace = (*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(n) /* Unused opcode memory */
(*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnFree = int64(((*TParse)(unsafe.Pointer(pParse)).FszOpAlloc - n) & ^libc.Int32FromInt32(7)) /* Bytes of unused memory */
_resolveP2Values(tls, p, bp)
libc.SetBitFieldPtr16Uint32(p+200, uint32(libc.BoolUint8((*TParse)(unsafe.Pointer(pParse)).FisMultiWrite != 0 && (*TParse)(unsafe.Pointer(pParse)).FmayAbort != 0)), 5, 0x20)
if (*TParse)(unsafe.Pointer(pParse)).Fexplain != 0 {
if nMem < int32(10) {
nMem = int32(10)
}
libc.SetBitFieldPtr16Uint32(p+200, uint32((*TParse)(unsafe.Pointer(pParse)).Fexplain), 2, 0xc)
(*TVdbe)(unsafe.Pointer(p)).FnResColumn = uint16(int32(12) - int32(4)*int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)))
}
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 0, 0x3)
/* Memory for registers, parameters, cursor, etc, is allocated in one or two
** passes. On the first pass, we try to reuse unused memory at the
** end of the opcode array. If we are unable to satisfy all memory
** requirements by reusing the opcode array tail, then the second
** pass will fill in the remainder using a fresh memory allocation.
**
** This two-pass approach that reuses as much memory as possible from
** the leftover memory at the end of the opcode array. This can significantly
** reduce the amount of memory held by a prepared statement.
*/
(*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnNeeded = 0
(*TVdbe)(unsafe.Pointer(p)).FaMem = _allocSpace(tls, bp+8, uintptr(0), int64(uint64(nMem)*uint64(56)))
(*TVdbe)(unsafe.Pointer(p)).FaVar = _allocSpace(tls, bp+8, uintptr(0), int64(uint64(nVar)*uint64(56)))
(*TVdbe)(unsafe.Pointer(p)).FapArg = _allocSpace(tls, bp+8, uintptr(0), int64(uint64(*(*int32)(unsafe.Pointer(bp)))*uint64(8)))
(*TVdbe)(unsafe.Pointer(p)).FapCsr = _allocSpace(tls, bp+8, uintptr(0), int64(uint64(nCursor)*uint64(8)))
if (*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnNeeded != 0 {
v1 = _sqlite3DbMallocRawNN(tls, db, uint64((*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnNeeded))
(*TVdbe)(unsafe.Pointer(p)).FpFree = v1
(*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FpSpace = v1
(*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnFree = (*(*TReusableSpace)(unsafe.Pointer(bp + 8))).FnNeeded
if !((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) {
(*TVdbe)(unsafe.Pointer(p)).FaMem = _allocSpace(tls, bp+8, (*TVdbe)(unsafe.Pointer(p)).FaMem, int64(uint64(nMem)*uint64(56)))
(*TVdbe)(unsafe.Pointer(p)).FaVar = _allocSpace(tls, bp+8, (*TVdbe)(unsafe.Pointer(p)).FaVar, int64(uint64(nVar)*uint64(56)))
(*TVdbe)(unsafe.Pointer(p)).FapArg = _allocSpace(tls, bp+8, (*TVdbe)(unsafe.Pointer(p)).FapArg, int64(uint64(*(*int32)(unsafe.Pointer(bp)))*uint64(8)))
(*TVdbe)(unsafe.Pointer(p)).FapCsr = _allocSpace(tls, bp+8, (*TVdbe)(unsafe.Pointer(p)).FapCsr, int64(uint64(nCursor)*uint64(8)))
}
}
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
(*TVdbe)(unsafe.Pointer(p)).FnVar = 0
(*TVdbe)(unsafe.Pointer(p)).FnCursor = 0
(*TVdbe)(unsafe.Pointer(p)).FnMem = 0
} else {
(*TVdbe)(unsafe.Pointer(p)).FnCursor = nCursor
(*TVdbe)(unsafe.Pointer(p)).FnVar = int16(nVar)
_initMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar, nVar, db, uint16(MEM_Null))
(*TVdbe)(unsafe.Pointer(p)).FnMem = nMem
_initMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaMem, nMem, db, uint16(MEM_Undefined))
libc.Xmemset(tls, (*TVdbe)(unsafe.Pointer(p)).FapCsr, 0, uint64(nCursor)*uint64(8))
}
_sqlite3VdbeRewind(tls, p)
}
// C documentation
//
// /*
// ** Close a VDBE cursor and release all the resources that cursor
// ** happens to hold.
// */
func _sqlite3VdbeFreeCursor(tls *libc.TLS, p uintptr, pCx uintptr) {
if pCx != 0 {
_sqlite3VdbeFreeCursorNN(tls, p, pCx)
}
}
func _freeCursorWithCache(tls *libc.TLS, p uintptr, pCx uintptr) {
var pCache uintptr
_ = pCache
pCache = (*TVdbeCursor)(unsafe.Pointer(pCx)).FpCache
libc.SetBitFieldPtr8Uint32(pCx+8, libc.Uint32FromInt32(0), 4, 0x10)
(*TVdbeCursor)(unsafe.Pointer(pCx)).FpCache = uintptr(0)
if (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue != 0 {
_sqlite3RCStrUnref(tls, (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue)
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue = uintptr(0)
}
_sqlite3DbFree(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, pCache)
_sqlite3VdbeFreeCursorNN(tls, p, pCx)
}
func _sqlite3VdbeFreeCursorNN(tls *libc.TLS, p uintptr, pCx uintptr) {
var pModule, pVCur uintptr
_, _ = pModule, pVCur
if int32(TBool(*(*uint8)(unsafe.Pointer(pCx + 8))&0x10>>4)) != 0 {
_freeCursorWithCache(tls, p, pCx)
return
}
switch int32((*TVdbeCursor)(unsafe.Pointer(pCx)).FeCurType) {
case int32(CURTYPE_SORTER):
_sqlite3VdbeSorterClose(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, pCx)
case CURTYPE_BTREE:
_sqlite3BtreeCloseCursor(tls, *(*uintptr)(unsafe.Pointer(pCx + 48)))
case int32(CURTYPE_VTAB):
pVCur = *(*uintptr)(unsafe.Pointer(pCx + 48))
pModule = (*Tsqlite3_vtab)(unsafe.Pointer((*Tsqlite3_vtab_cursor)(unsafe.Pointer(pVCur)).FpVtab)).FpModule
(*Tsqlite3_vtab)(unsafe.Pointer((*Tsqlite3_vtab_cursor)(unsafe.Pointer(pVCur)).FpVtab)).FnRef--
(*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer(pModule)).FxClose})))(tls, pVCur)
break
}
}
// C documentation
//
// /*
// ** Close all cursors in the current frame.
// */
func _closeCursorsInFrame(tls *libc.TLS, p uintptr) {
var i int32
var pC uintptr
_, _ = i, pC
i = 0
for {
if !(i < (*TVdbe)(unsafe.Pointer(p)).FnCursor) {
break
}
pC = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(i)*8))
if pC != 0 {
_sqlite3VdbeFreeCursorNN(tls, p, pC)
*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(i)*8)) = uintptr(0)
}
goto _1
_1:
;
i++
}
}
// C documentation
//
// /*
// ** Copy the values stored in the VdbeFrame structure to its Vdbe. This
// ** is used, for example, when a trigger sub-program is halted to restore
// ** control to the main program.
// */
func _sqlite3VdbeFrameRestore(tls *libc.TLS, pFrame uintptr) (r int32) {
var v uintptr
_ = v
v = (*TVdbeFrame)(unsafe.Pointer(pFrame)).Fv
_closeCursorsInFrame(tls, v)
(*TVdbe)(unsafe.Pointer(v)).FaOp = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FaOp
(*TVdbe)(unsafe.Pointer(v)).FnOp = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FnOp
(*TVdbe)(unsafe.Pointer(v)).FaMem = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FaMem
(*TVdbe)(unsafe.Pointer(v)).FnMem = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FnMem
(*TVdbe)(unsafe.Pointer(v)).FapCsr = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FapCsr
(*TVdbe)(unsafe.Pointer(v)).FnCursor = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FnCursor
(*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).Fdb)).FlastRowid = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FlastRowid
(*TVdbe)(unsafe.Pointer(v)).FnChange = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FnChange
(*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).Fdb)).FnChange = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FnDbChange
_sqlite3VdbeDeleteAuxData(tls, (*TVdbe)(unsafe.Pointer(v)).Fdb, v+296, -int32(1), 0)
(*TVdbe)(unsafe.Pointer(v)).FpAuxData = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FpAuxData
(*TVdbeFrame)(unsafe.Pointer(pFrame)).FpAuxData = uintptr(0)
return (*TVdbeFrame)(unsafe.Pointer(pFrame)).Fpc
}
// C documentation
//
// /*
// ** Close all cursors.
// **
// ** Also release any dynamic memory held by the VM in the Vdbe.aMem memory
// ** cell array. This is necessary as the memory cell array may contain
// ** pointers to VdbeFrame objects, which may in turn contain pointers to
// ** open cursors.
// */
func _closeAllCursors(tls *libc.TLS, p uintptr) {
var pDel, pFrame uintptr
_, _ = pDel, pFrame
if (*TVdbe)(unsafe.Pointer(p)).FpFrame != 0 {
pFrame = (*TVdbe)(unsafe.Pointer(p)).FpFrame
for {
if !((*TVdbeFrame)(unsafe.Pointer(pFrame)).FpParent != 0) {
break
}
goto _1
_1:
;
pFrame = (*TVdbeFrame)(unsafe.Pointer(pFrame)).FpParent
}
_sqlite3VdbeFrameRestore(tls, pFrame)
(*TVdbe)(unsafe.Pointer(p)).FpFrame = uintptr(0)
(*TVdbe)(unsafe.Pointer(p)).FnFrame = 0
}
_closeCursorsInFrame(tls, p)
_releaseMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaMem, (*TVdbe)(unsafe.Pointer(p)).FnMem)
for (*TVdbe)(unsafe.Pointer(p)).FpDelFrame != 0 {
pDel = (*TVdbe)(unsafe.Pointer(p)).FpDelFrame
(*TVdbe)(unsafe.Pointer(p)).FpDelFrame = (*TVdbeFrame)(unsafe.Pointer(pDel)).FpParent
_sqlite3VdbeFrameDelete(tls, pDel)
}
/* Delete any auxdata allocations made by the VM */
if (*TVdbe)(unsafe.Pointer(p)).FpAuxData != 0 {
_sqlite3VdbeDeleteAuxData(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, p+296, -int32(1), 0)
}
}
// C documentation
//
// /*
// ** Set the number of result columns that will be returned by this SQL
// ** statement. This is now set at compile time, rather than during
// ** execution of the vdbe program so that sqlite3_column_count() can
// ** be called on an SQL statement before sqlite3_step().
// */
func _sqlite3VdbeSetNumCols(tls *libc.TLS, p uintptr, nResColumn int32) {
var db uintptr
var n int32
var v1 Tu16
_, _, _ = db, n, v1
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
if (*TVdbe)(unsafe.Pointer(p)).FnResAlloc != 0 {
_releaseMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaColName, int32((*TVdbe)(unsafe.Pointer(p)).FnResAlloc)*int32(COLNAME_N))
_sqlite3DbFree(tls, db, (*TVdbe)(unsafe.Pointer(p)).FaColName)
}
n = nResColumn * int32(COLNAME_N)
v1 = uint16(nResColumn)
(*TVdbe)(unsafe.Pointer(p)).FnResAlloc = v1
(*TVdbe)(unsafe.Pointer(p)).FnResColumn = v1
(*TVdbe)(unsafe.Pointer(p)).FaColName = _sqlite3DbMallocRawNN(tls, db, uint64(56)*uint64(n))
if (*TVdbe)(unsafe.Pointer(p)).FaColName == uintptr(0) {
return
}
_initMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaColName, n, db, uint16(MEM_Null))
}
// C documentation
//
// /*
// ** Set the name of the idx'th column to be returned by the SQL statement.
// ** zName must be a pointer to a nul terminated string.
// **
// ** This call must be made after a call to sqlite3VdbeSetNumCols().
// **
// ** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC
// ** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed
// ** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed.
// */
func _sqlite3VdbeSetColName(tls *libc.TLS, p uintptr, idx int32, var1 int32, zName uintptr, xDel uintptr) (r int32) {
var pColName uintptr
var rc int32
_, _ = pColName, rc
if (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FmallocFailed != 0 {
return int32(SQLITE_NOMEM)
}
pColName = (*TVdbe)(unsafe.Pointer(p)).FaColName + uintptr(idx+var1*int32((*TVdbe)(unsafe.Pointer(p)).FnResAlloc))*56
rc = _sqlite3VdbeMemSetStr(tls, pColName, zName, int64(-int32(1)), uint8(SQLITE_UTF8), xDel)
return rc
}
// C documentation
//
// /*
// ** A read or write transaction may or may not be active on database handle
// ** db. If a transaction is active, commit it. If there is a
// ** write-transaction spanning more than one database file, this routine
// ** takes care of the super-journal trickery.
// */
func _vdbeCommit(tls *libc.TLS, db uintptr, p uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var i, nMainFile, nTrans, needXcommit, rc, retryCount, v5 int32
var offset Ti64
var pBt, pBt1, pBt2, pBt3, pBt4, pBt5, pPager, pVfs, zFile, zMainFile, zSuper uintptr
var v6 bool
var _ /* iRandom at bp+12 */ Tu32
var _ /* pSuperJrnl at bp+0 */ uintptr
var _ /* res at bp+8 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = i, nMainFile, nTrans, needXcommit, offset, pBt, pBt1, pBt2, pBt3, pBt4, pBt5, pPager, pVfs, rc, retryCount, zFile, zMainFile, zSuper, v5, v6
nTrans = 0 /* Number of databases with an active write-transaction
** that are candidates for a two-phase commit using a
** super-journal */
rc = SQLITE_OK
needXcommit = 0
/* Before doing anything else, call the xSync() callback for any
** virtual module tables written in this transaction. This has to
** be done before determining whether a super-journal file is
** required, as an xSync() callback may add an attached database
** to the transaction.
*/
rc = _sqlite3VtabSync(tls, db, p)
/* This loop determines (a) if the commit hook should be invoked and
** (b) how many database files have open write transactions, not
** including the temp database. (b) is important because if more than
** one database file has an open write transaction, a super-journal
** file is required for an atomic commit.
*/
i = 0
for {
if !(rc == SQLITE_OK && i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if _sqlite3BtreeTxnState(tls, pBt) == int32(SQLITE_TXN_WRITE) {
needXcommit = int32(1)
_sqlite3BtreeEnter(tls, pBt)
pPager = _sqlite3BtreePager(tls, pBt)
if int32((*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).Fsafety_level) != int32(PAGER_SYNCHRONOUS_OFF) && _aMJNeeded[_sqlite3PagerGetJournalMode(tls, pPager)] != 0 && _sqlite3PagerIsMemdb(tls, pPager) == 0 {
nTrans++
}
rc = _sqlite3PagerExclusiveLock(tls, pPager)
_sqlite3BtreeLeave(tls, pBt)
}
goto _1
_1:
;
i++
}
if rc != SQLITE_OK {
return rc
}
/* If there are any write-transactions at all, invoke the commit hook */
if needXcommit != 0 && (*Tsqlite3)(unsafe.Pointer(db)).FxCommitCallback != 0 {
rc = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxCommitCallback})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpCommitArg)
if rc != 0 {
return libc.Int32FromInt32(SQLITE_CONSTRAINT) | libc.Int32FromInt32(2)< int32(100) {
Xsqlite3_log(tls, int32(SQLITE_FULL), __ccgo_ts+5185, libc.VaList(bp+24, zSuper))
_sqlite3OsDelete(tls, pVfs, zSuper, 0)
break
} else {
if retryCount == int32(1) {
Xsqlite3_log(tls, int32(SQLITE_FULL), __ccgo_ts+5199, libc.VaList(bp+24, zSuper))
}
}
}
retryCount++
Xsqlite3_randomness(tls, int32(4), bp+12)
Xsqlite3_snprintf(tls, int32(13), zSuper+uintptr(nMainFile), __ccgo_ts+5214, libc.VaList(bp+24, *(*Tu32)(unsafe.Pointer(bp + 12))>>libc.Int32FromInt32(8)&uint32(0xffffff), *(*Tu32)(unsafe.Pointer(bp + 12))&uint32(0xff)))
/* The antipenultimate character of the super-journal name must
** be "9" to avoid name collisions when using 8+3 filenames. */
rc = _sqlite3OsAccess(tls, pVfs, zSuper, SQLITE_ACCESS_EXISTS, bp+8)
}
if rc == SQLITE_OK {
/* Open the super-journal. */
rc = _sqlite3OsOpenMalloc(tls, pVfs, zSuper, bp, libc.Int32FromInt32(SQLITE_OPEN_READWRITE)|libc.Int32FromInt32(SQLITE_OPEN_CREATE)|libc.Int32FromInt32(SQLITE_OPEN_EXCLUSIVE)|libc.Int32FromInt32(SQLITE_OPEN_SUPER_JOURNAL), uintptr(0))
}
if rc != SQLITE_OK {
_sqlite3DbFree(tls, db, zSuper-uintptr(4))
return rc
}
/* Write the name of each database file in the transaction into the new
** super-journal file. If an error occurs at this point close
** and delete the super-journal file. All the individual journal files
** still have 'null' as the super-journal pointer, so they will roll
** back independently if a failure occurs.
*/
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt3 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if _sqlite3BtreeTxnState(tls, pBt3) == int32(SQLITE_TXN_WRITE) {
zFile = _sqlite3BtreeGetJournalname(tls, pBt3)
if zFile == uintptr(0) {
goto _4 /* Ignore TEMP and :memory: databases */
}
rc = _sqlite3OsWrite(tls, *(*uintptr)(unsafe.Pointer(bp)), zFile, _sqlite3Strlen30(tls, zFile)+int32(1), offset)
offset += int64(_sqlite3Strlen30(tls, zFile) + int32(1))
if rc != SQLITE_OK {
_sqlite3OsCloseFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
_sqlite3OsDelete(tls, pVfs, zSuper, 0)
_sqlite3DbFree(tls, db, zSuper-uintptr(4))
return rc
}
}
goto _4
_4:
;
i++
}
/* Sync the super-journal file. If the IOCAP_SEQUENTIAL device
** flag is set this is not required.
*/
if v6 = 0 == _sqlite3OsDeviceCharacteristics(tls, *(*uintptr)(unsafe.Pointer(bp)))&int32(SQLITE_IOCAP_SEQUENTIAL); v6 {
v5 = _sqlite3OsSync(tls, *(*uintptr)(unsafe.Pointer(bp)), int32(SQLITE_SYNC_NORMAL))
rc = v5
}
if v6 && SQLITE_OK != v5 {
_sqlite3OsCloseFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
_sqlite3OsDelete(tls, pVfs, zSuper, 0)
_sqlite3DbFree(tls, db, zSuper-uintptr(4))
return rc
}
/* Sync all the db files involved in the transaction. The same call
** sets the super-journal pointer in each individual journal. If
** an error occurs here, do not delete the super-journal file.
**
** If the error occurs during the first call to
** sqlite3BtreeCommitPhaseOne(), then there is a chance that the
** super-journal file will be orphaned. But we cannot delete it,
** in case the super-journal file name was written into the journal
** file before the failure occurred.
*/
i = 0
for {
if !(rc == SQLITE_OK && i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt4 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if pBt4 != 0 {
rc = _sqlite3BtreeCommitPhaseOne(tls, pBt4, zSuper)
}
goto _7
_7:
;
i++
}
_sqlite3OsCloseFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
if rc != SQLITE_OK {
_sqlite3DbFree(tls, db, zSuper-uintptr(4))
return rc
}
/* Delete the super-journal file. This commits the transaction. After
** doing this the directory is synced again before any individual
** transaction files are deleted.
*/
rc = _sqlite3OsDelete(tls, pVfs, zSuper, int32(1))
_sqlite3DbFree(tls, db, zSuper-uintptr(4))
zSuper = uintptr(0)
if rc != 0 {
return rc
}
/* All files and directories have already been synced, so the following
** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and
** deleting or truncating journals. If something goes wrong while
** this is happening we don't really care. The integrity of the
** transaction is already guaranteed, but some stray 'cold' journals
** may be lying around. Returning an error code won't help matters.
*/
_sqlite3BeginBenignMalloc(tls)
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt5 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if pBt5 != 0 {
_sqlite3BtreeCommitPhaseTwo(tls, pBt5, int32(1))
}
goto _8
_8:
;
i++
}
_sqlite3EndBenignMalloc(tls)
_sqlite3VtabCommit(tls, db)
}
return rc
}
/* Whether or not a database might need a super-journal depends upon
** its journal mode (among other things). This matrix determines which
** journal modes use a super-journal and which do not */
var _aMJNeeded = [6]Tu8{
0: uint8(1),
1: uint8(1),
3: uint8(1),
}
/*
** This routine checks that the sqlite3.nVdbeActive count variable
** matches the number of vdbe's in the list sqlite3.pVdbe that are
** currently active. An assertion fails if the two counts do not match.
** This is an internal self-check only - it is not an essential processing
** step.
**
** This is a no-op if NDEBUG is defined.
*/
// C documentation
//
// /*
// ** If the Vdbe passed as the first argument opened a statement-transaction,
// ** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or
// ** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement
// ** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the
// ** statement transaction is committed.
// **
// ** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned.
// ** Otherwise SQLITE_OK.
// */
func _vdbeCloseStatement(tls *libc.TLS, p uintptr, eOp int32) (r int32) {
var db, pBt uintptr
var i, iSavepoint, rc, rc2 int32
_, _, _, _, _, _ = db, i, iSavepoint, pBt, rc, rc2
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
rc = SQLITE_OK
iSavepoint = (*TVdbe)(unsafe.Pointer(p)).FiStatement - int32(1)
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
rc2 = SQLITE_OK
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if pBt != 0 {
if eOp == int32(SAVEPOINT_ROLLBACK) {
rc2 = _sqlite3BtreeSavepoint(tls, pBt, int32(SAVEPOINT_ROLLBACK), iSavepoint)
}
if rc2 == SQLITE_OK {
rc2 = _sqlite3BtreeSavepoint(tls, pBt, int32(SAVEPOINT_RELEASE), iSavepoint)
}
if rc == SQLITE_OK {
rc = rc2
}
}
goto _1
_1:
;
i++
}
(*Tsqlite3)(unsafe.Pointer(db)).FnStatement--
(*TVdbe)(unsafe.Pointer(p)).FiStatement = 0
if rc == SQLITE_OK {
if eOp == int32(SAVEPOINT_ROLLBACK) {
rc = _sqlite3VtabSavepoint(tls, db, int32(SAVEPOINT_ROLLBACK), iSavepoint)
}
if rc == SQLITE_OK {
rc = _sqlite3VtabSavepoint(tls, db, int32(SAVEPOINT_RELEASE), iSavepoint)
}
}
/* If the statement transaction is being rolled back, also restore the
** database handles deferred constraint counter to the value it had when
** the statement transaction was opened. */
if eOp == int32(SAVEPOINT_ROLLBACK) {
(*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons = (*TVdbe)(unsafe.Pointer(p)).FnStmtDefCons
(*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons = (*TVdbe)(unsafe.Pointer(p)).FnStmtDefImmCons
}
return rc
}
func _sqlite3VdbeCloseStatement(tls *libc.TLS, p uintptr, eOp int32) (r int32) {
if (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FnStatement != 0 && (*TVdbe)(unsafe.Pointer(p)).FiStatement != 0 {
return _vdbeCloseStatement(tls, p, eOp)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** This function is called when a transaction opened by the database
// ** handle associated with the VM passed as an argument is about to be
// ** committed. If there are outstanding deferred foreign key constraint
// ** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK.
// **
// ** If there are outstanding FK violations and this function returns
// ** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY
// ** and write an error message to it. Then return SQLITE_ERROR.
// */
func _sqlite3VdbeCheckFk(tls *libc.TLS, p uintptr, deferred int32) (r int32) {
var db uintptr
_ = db
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
if deferred != 0 && (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons+(*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons > 0 || !(deferred != 0) && (*TVdbe)(unsafe.Pointer(p)).FnFkConstraint > 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = libc.Int32FromInt32(SQLITE_CONSTRAINT) | libc.Int32FromInt32(3)<>7)) != 0 { /* Primary error code from p->rc */
eStatementOp = 0 /* Set to true if a 'special' error */
/* Lock all btrees used by the statement */
_sqlite3VdbeEnter(tls, p)
/* Check for one of the special errors */
if (*TVdbe)(unsafe.Pointer(p)).Frc != 0 {
mrc = (*TVdbe)(unsafe.Pointer(p)).Frc & int32(0xff)
isSpecialError = libc.BoolInt32(mrc == int32(SQLITE_NOMEM) || mrc == int32(SQLITE_IOERR) || mrc == int32(SQLITE_INTERRUPT) || mrc == int32(SQLITE_FULL))
} else {
v1 = libc.Int32FromInt32(0)
isSpecialError = v1
mrc = v1
}
if isSpecialError != 0 {
/* If the query was read-only and the error code is SQLITE_INTERRUPT,
** no rollback is necessary. Otherwise, at least a savepoint
** transaction must be rolled back to restore the database to a
** consistent state.
**
** Even if the statement is read-only, it is important to perform
** a statement or transaction rollback operation. If the error
** occurred while writing to the journal, sub-journal or database
** file as part of an effort to free up cache space (see function
** pagerStress() in pager.c), the rollback is required to restore
** the pager to a consistent state.
*/
if !(int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x40>>6)) != 0) || mrc != int32(SQLITE_INTERRUPT) {
if (mrc == int32(SQLITE_NOMEM) || mrc == int32(SQLITE_FULL)) && int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x20>>5)) != 0 {
eStatementOp = int32(SAVEPOINT_ROLLBACK)
} else {
/* We are forced to roll back the active transaction. Before doing
** so, abort any other statements this handle currently has active.
*/
_sqlite3RollbackAll(tls, db, libc.Int32FromInt32(SQLITE_ABORT)|libc.Int32FromInt32(2)< 0 && (*Tsqlite3)(unsafe.Pointer(db)).FaVTrans == uintptr(0)) && (*Tsqlite3)(unsafe.Pointer(db)).FautoCommit != 0 && (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeWrite == libc.BoolInt32(int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x40>>6)) == 0) {
if (*TVdbe)(unsafe.Pointer(p)).Frc == SQLITE_OK || int32((*TVdbe)(unsafe.Pointer(p)).FerrorAction) == int32(OE_Fail) && !(isSpecialError != 0) {
rc = _sqlite3VdbeCheckFk(tls, p, int32(1))
if rc != SQLITE_OK {
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x40>>6)) != 0 {
_sqlite3VdbeLeave(tls, p)
return int32(SQLITE_ERROR)
}
rc = libc.Int32FromInt32(SQLITE_CONSTRAINT) | libc.Int32FromInt32(3)<>6)) != 0 {
_sqlite3VdbeLeave(tls, p)
return int32(SQLITE_BUSY)
} else {
if rc != SQLITE_OK {
_sqlite3SystemError(tls, db, rc)
(*TVdbe)(unsafe.Pointer(p)).Frc = rc
_sqlite3RollbackAll(tls, db, SQLITE_OK)
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons = 0
(*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons = 0
*(*Tu64)(unsafe.Pointer(db + 48)) &= ^libc.Uint64FromInt32(SQLITE_DeferFKs)
_sqlite3CommitInternalChanges(tls, db)
}
}
} else {
if (*TVdbe)(unsafe.Pointer(p)).Frc == int32(SQLITE_SCHEMA) && (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeActive > int32(1) {
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
} else {
_sqlite3RollbackAll(tls, db, SQLITE_OK)
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
}
}
(*Tsqlite3)(unsafe.Pointer(db)).FnStatement = 0
} else {
if eStatementOp == 0 {
if (*TVdbe)(unsafe.Pointer(p)).Frc == SQLITE_OK || int32((*TVdbe)(unsafe.Pointer(p)).FerrorAction) == int32(OE_Fail) {
eStatementOp = int32(SAVEPOINT_RELEASE)
} else {
if int32((*TVdbe)(unsafe.Pointer(p)).FerrorAction) == int32(OE_Abort) {
eStatementOp = int32(SAVEPOINT_ROLLBACK)
} else {
_sqlite3RollbackAll(tls, db, libc.Int32FromInt32(SQLITE_ABORT)|libc.Int32FromInt32(2)<>4)) != 0 {
if eStatementOp != int32(SAVEPOINT_ROLLBACK) {
_sqlite3VdbeSetChanges(tls, db, (*TVdbe)(unsafe.Pointer(p)).FnChange)
} else {
_sqlite3VdbeSetChanges(tls, db, 0)
}
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
}
/* Release the locks */
_sqlite3VdbeLeave(tls, p)
}
/* We have successfully halted and closed the VM. Record this fact. */
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeActive--
if !(int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x40>>6)) != 0) {
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeWrite--
}
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x80>>7)) != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead--
}
(*TVdbe)(unsafe.Pointer(p)).FeVdbeState = uint8(VDBE_HALT_STATE)
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = int32(SQLITE_NOMEM)
}
/* If the auto-commit flag is set to true, then any locks that were held
** by connection db have now been released. Call sqlite3ConnectionUnlocked()
** to invoke any required unlock-notify callbacks.
*/
if (*Tsqlite3)(unsafe.Pointer(db)).FautoCommit != 0 {
_sqlite3ConnectionUnlocked(tls, db)
}
if (*TVdbe)(unsafe.Pointer(p)).Frc == int32(SQLITE_BUSY) {
v2 = int32(SQLITE_BUSY)
} else {
v2 = SQLITE_OK
}
return v2
}
// C documentation
//
// /*
// ** Each VDBE holds the result of the most recent sqlite3_step() call
// ** in p->rc. This routine sets that result back to SQLITE_OK.
// */
func _sqlite3VdbeResetStepResult(tls *libc.TLS, p uintptr) {
(*TVdbe)(unsafe.Pointer(p)).Frc = SQLITE_OK
}
// C documentation
//
// /*
// ** Copy the error code and error message belonging to the VDBE passed
// ** as the first argument to its database handle (so that they will be
// ** returned by calls to sqlite3_errcode() and sqlite3_errmsg()).
// **
// ** This function does not clear the VDBE error code or message, just
// ** copies them to the database handle.
// */
func _sqlite3VdbeTransferError(tls *libc.TLS, p uintptr) (r int32) {
var db uintptr
var rc int32
_, _ = db, rc
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
rc = (*TVdbe)(unsafe.Pointer(p)).Frc
if (*TVdbe)(unsafe.Pointer(p)).FzErrMsg != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FbBenignMalloc++
_sqlite3BeginBenignMalloc(tls)
if (*Tsqlite3)(unsafe.Pointer(db)).FpErr == uintptr(0) {
(*Tsqlite3)(unsafe.Pointer(db)).FpErr = _sqlite3ValueNew(tls, db)
}
_sqlite3ValueSetStr(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpErr, -int32(1), (*TVdbe)(unsafe.Pointer(p)).FzErrMsg, uint8(SQLITE_UTF8), uintptr(-libc.Int32FromInt32(1)))
_sqlite3EndBenignMalloc(tls)
(*Tsqlite3)(unsafe.Pointer(db)).FbBenignMalloc--
} else {
if (*Tsqlite3)(unsafe.Pointer(db)).FpErr != 0 {
_sqlite3ValueSetNull(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpErr)
}
}
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = rc
(*Tsqlite3)(unsafe.Pointer(db)).FerrByteOffset = -int32(1)
return rc
}
// C documentation
//
// /*
// ** Clean up a VDBE after execution but do not delete the VDBE just yet.
// ** Write any error messages into *pzErrMsg. Return the result code.
// **
// ** After this routine is run, the VDBE should be ready to be executed
// ** again.
// **
// ** To look at it another way, this routine resets the state of the
// ** virtual machine from VDBE_RUN_STATE or VDBE_HALT_STATE back to
// ** VDBE_READY_STATE.
// */
func _sqlite3VdbeReset(tls *libc.TLS, p uintptr) (r int32) {
var db uintptr
_ = db
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
/* If the VM did not run to completion or if it encountered an
** error, then it might not have been halted properly. So halt
** it now.
*/
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) == int32(VDBE_RUN_STATE) {
_sqlite3VdbeHalt(tls, p)
}
/* If the VDBE has been run even partially, then transfer the error code
** and error message from the VDBE into the main database structure. But
** if the VDBE has just been set to run but has not actually executed any
** instructions yet, leave the main database error information unchanged.
*/
if (*TVdbe)(unsafe.Pointer(p)).Fpc >= 0 {
if (*Tsqlite3)(unsafe.Pointer(db)).FpErr != 0 || (*TVdbe)(unsafe.Pointer(p)).FzErrMsg != 0 {
_sqlite3VdbeTransferError(tls, p)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = (*TVdbe)(unsafe.Pointer(p)).Frc
}
}
/* Reset register contents and reclaim error message memory.
*/
if (*TVdbe)(unsafe.Pointer(p)).FzErrMsg != 0 {
_sqlite3DbFree(tls, db, (*TVdbe)(unsafe.Pointer(p)).FzErrMsg)
(*TVdbe)(unsafe.Pointer(p)).FzErrMsg = uintptr(0)
}
(*TVdbe)(unsafe.Pointer(p)).FpResultRow = uintptr(0)
/* Save profiling information from this VDBE run.
*/
return (*TVdbe)(unsafe.Pointer(p)).Frc & (*Tsqlite3)(unsafe.Pointer(db)).FerrMask
}
// C documentation
//
// /*
// ** Clean up and delete a VDBE after execution. Return an integer which is
// ** the result code. Write any error message text into *pzErrMsg.
// */
func _sqlite3VdbeFinalize(tls *libc.TLS, p uintptr) (r int32) {
var rc int32
_ = rc
rc = SQLITE_OK
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) >= int32(VDBE_READY_STATE) {
rc = _sqlite3VdbeReset(tls, p)
}
_sqlite3VdbeDelete(tls, p)
return rc
}
// C documentation
//
// /*
// ** If parameter iOp is less than zero, then invoke the destructor for
// ** all auxiliary data pointers currently cached by the VM passed as
// ** the first argument.
// **
// ** Or, if iOp is greater than or equal to zero, then the destructor is
// ** only invoked for those auxiliary data pointers created by the user
// ** function invoked by the OP_Function opcode at instruction iOp of
// ** VM pVdbe, and only then if:
// **
// ** * the associated function parameter is the 32nd or later (counting
// ** from left to right), or
// **
// ** * the corresponding bit in argument mask is clear (where the first
// ** function parameter corresponds to bit 0 etc.).
// */
func _sqlite3VdbeDeleteAuxData(tls *libc.TLS, db uintptr, pp uintptr, iOp int32, mask int32) {
var pAux uintptr
_ = pAux
for *(*uintptr)(unsafe.Pointer(pp)) != 0 {
pAux = *(*uintptr)(unsafe.Pointer(pp))
if iOp < 0 || (*TAuxData)(unsafe.Pointer(pAux)).FiAuxOp == iOp && (*TAuxData)(unsafe.Pointer(pAux)).FiAuxArg >= 0 && ((*TAuxData)(unsafe.Pointer(pAux)).FiAuxArg > int32(31) || !(uint32(mask)&(libc.Uint32FromInt32(1)<<(*TAuxData)(unsafe.Pointer(pAux)).FiAuxArg) != 0)) {
if (*TAuxData)(unsafe.Pointer(pAux)).FxDeleteAux != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TAuxData)(unsafe.Pointer(pAux)).FxDeleteAux})))(tls, (*TAuxData)(unsafe.Pointer(pAux)).FpAux)
}
*(*uintptr)(unsafe.Pointer(pp)) = (*TAuxData)(unsafe.Pointer(pAux)).FpNextAux
_sqlite3DbFree(tls, db, pAux)
} else {
pp = pAux + 24
}
}
}
// C documentation
//
// /*
// ** Free all memory associated with the Vdbe passed as the second argument,
// ** except for object itself, which is preserved.
// **
// ** The difference between this function and sqlite3VdbeDelete() is that
// ** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with
// ** the database connection and frees the object itself.
// */
func _sqlite3VdbeClearObject(tls *libc.TLS, db uintptr, p uintptr) {
var pNext, pSub uintptr
_, _ = pNext, pSub
if (*TVdbe)(unsafe.Pointer(p)).FaColName != 0 {
_releaseMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaColName, int32((*TVdbe)(unsafe.Pointer(p)).FnResAlloc)*int32(COLNAME_N))
_sqlite3DbNNFreeNN(tls, db, (*TVdbe)(unsafe.Pointer(p)).FaColName)
}
pSub = (*TVdbe)(unsafe.Pointer(p)).FpProgram
for {
if !(pSub != 0) {
break
}
pNext = (*TSubProgram)(unsafe.Pointer(pSub)).FpNext
_vdbeFreeOpArray(tls, db, (*TSubProgram)(unsafe.Pointer(pSub)).FaOp, (*TSubProgram)(unsafe.Pointer(pSub)).FnOp)
_sqlite3DbFree(tls, db, pSub)
goto _1
_1:
;
pSub = pNext
}
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) != VDBE_INIT_STATE {
_releaseMemArray(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar, int32((*TVdbe)(unsafe.Pointer(p)).FnVar))
if (*TVdbe)(unsafe.Pointer(p)).FpVList != 0 {
_sqlite3DbNNFreeNN(tls, db, (*TVdbe)(unsafe.Pointer(p)).FpVList)
}
if (*TVdbe)(unsafe.Pointer(p)).FpFree != 0 {
_sqlite3DbNNFreeNN(tls, db, (*TVdbe)(unsafe.Pointer(p)).FpFree)
}
}
_vdbeFreeOpArray(tls, db, (*TVdbe)(unsafe.Pointer(p)).FaOp, (*TVdbe)(unsafe.Pointer(p)).FnOp)
if (*TVdbe)(unsafe.Pointer(p)).FzSql != 0 {
_sqlite3DbNNFreeNN(tls, db, (*TVdbe)(unsafe.Pointer(p)).FzSql)
}
}
// C documentation
//
// /*
// ** Delete an entire VDBE.
// */
func _sqlite3VdbeDelete(tls *libc.TLS, p uintptr) {
var db uintptr
_ = db
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
_sqlite3VdbeClearObject(tls, db, p)
if (*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed == uintptr(0) {
*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FppVPrev)) = (*TVdbe)(unsafe.Pointer(p)).FpVNext
if (*TVdbe)(unsafe.Pointer(p)).FpVNext != 0 {
(*TVdbe)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FpVNext)).FppVPrev = (*TVdbe)(unsafe.Pointer(p)).FppVPrev
}
}
_sqlite3DbNNFreeNN(tls, db, p)
}
// C documentation
//
// /*
// ** The cursor "p" has a pending seek operation that has not yet been
// ** carried out. Seek the cursor now. If an error occurs, return
// ** the appropriate error code.
// */
func _sqlite3VdbeFinishMoveto(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* res at bp+0 */ int32
_ = rc
rc = _sqlite3BtreeTableMoveto(tls, *(*uintptr)(unsafe.Pointer(p + 48)), (*TVdbeCursor)(unsafe.Pointer(p)).FmovetoTarget, 0, bp)
if rc != 0 {
return rc
}
if *(*int32)(unsafe.Pointer(bp)) != 0 {
return _sqlite3CorruptError(tls, int32(88164))
}
(*TVdbeCursor)(unsafe.Pointer(p)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(p)).FcacheStatus = uint32(CACHE_STALE)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Something has moved cursor "p" out of place. Maybe the row it was
// ** pointed to was deleted out from under it. Or maybe the btree was
// ** rebalanced. Whatever the cause, try to restore "p" to the place it
// ** is supposed to be pointing. If the row was deleted out from under the
// ** cursor, set the cursor to point to a NULL row.
// */
func _sqlite3VdbeHandleMovedCursor(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* isDifferentRow at bp+0 */ int32
_ = rc
rc = _sqlite3BtreeCursorRestore(tls, *(*uintptr)(unsafe.Pointer(p + 48)), bp)
(*TVdbeCursor)(unsafe.Pointer(p)).FcacheStatus = uint32(CACHE_STALE)
if *(*int32)(unsafe.Pointer(bp)) != 0 {
(*TVdbeCursor)(unsafe.Pointer(p)).FnullRow = uint8(1)
}
return rc
}
// C documentation
//
// /*
// ** Check to ensure that the cursor is valid. Restore the cursor
// ** if need be. Return any I/O error from the restore operation.
// */
func _sqlite3VdbeCursorRestore(tls *libc.TLS, p uintptr) (r int32) {
if _sqlite3BtreeCursorHasMoved(tls, *(*uintptr)(unsafe.Pointer(p + 48))) != 0 {
return _sqlite3VdbeHandleMovedCursor(tls, p)
}
return SQLITE_OK
}
/*
** The following functions:
**
** sqlite3VdbeSerialType()
** sqlite3VdbeSerialTypeLen()
** sqlite3VdbeSerialLen()
** sqlite3VdbeSerialPut() <--- in-lined into OP_MakeRecord as of 2022-04-02
** sqlite3VdbeSerialGet()
**
** encapsulate the code that serializes values for storage in SQLite
** data and index records. Each serialized value consists of a
** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned
** integer, stored as a varint.
**
** In an SQLite index record, the serial type is stored directly before
** the blob of data that it corresponds to. In a table record, all serial
** types are stored at the start of the record, and the blobs of data at
** the end. Hence these functions allow the caller to handle the
** serial-type and data blob separately.
**
** The following table describes the various storage classes for data:
**
** serial type bytes of data type
** -------------- --------------- ---------------
** 0 0 NULL
** 1 1 signed integer
** 2 2 signed integer
** 3 3 signed integer
** 4 4 signed integer
** 5 6 signed integer
** 6 8 signed integer
** 7 8 IEEE float
** 8 0 Integer constant 0
** 9 0 Integer constant 1
** 10,11 reserved for expansion
** N>=12 and even (N-12)/2 BLOB
** N>=13 and odd (N-13)/2 text
**
** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions
** of SQLite will not understand those serial types.
*/
// C documentation
//
// /*
// ** The sizes for serial types less than 128
// */
var _sqlite3SmallTypeSizes = [128]Tu8{
1: uint8(1),
2: uint8(2),
3: uint8(3),
4: uint8(4),
5: uint8(6),
6: uint8(8),
7: uint8(8),
14: uint8(1),
15: uint8(1),
16: uint8(2),
17: uint8(2),
18: uint8(3),
19: uint8(3),
20: uint8(4),
21: uint8(4),
22: uint8(5),
23: uint8(5),
24: uint8(6),
25: uint8(6),
26: uint8(7),
27: uint8(7),
28: uint8(8),
29: uint8(8),
30: uint8(9),
31: uint8(9),
32: uint8(10),
33: uint8(10),
34: uint8(11),
35: uint8(11),
36: uint8(12),
37: uint8(12),
38: uint8(13),
39: uint8(13),
40: uint8(14),
41: uint8(14),
42: uint8(15),
43: uint8(15),
44: uint8(16),
45: uint8(16),
46: uint8(17),
47: uint8(17),
48: uint8(18),
49: uint8(18),
50: uint8(19),
51: uint8(19),
52: uint8(20),
53: uint8(20),
54: uint8(21),
55: uint8(21),
56: uint8(22),
57: uint8(22),
58: uint8(23),
59: uint8(23),
60: uint8(24),
61: uint8(24),
62: uint8(25),
63: uint8(25),
64: uint8(26),
65: uint8(26),
66: uint8(27),
67: uint8(27),
68: uint8(28),
69: uint8(28),
70: uint8(29),
71: uint8(29),
72: uint8(30),
73: uint8(30),
74: uint8(31),
75: uint8(31),
76: uint8(32),
77: uint8(32),
78: uint8(33),
79: uint8(33),
80: uint8(34),
81: uint8(34),
82: uint8(35),
83: uint8(35),
84: uint8(36),
85: uint8(36),
86: uint8(37),
87: uint8(37),
88: uint8(38),
89: uint8(38),
90: uint8(39),
91: uint8(39),
92: uint8(40),
93: uint8(40),
94: uint8(41),
95: uint8(41),
96: uint8(42),
97: uint8(42),
98: uint8(43),
99: uint8(43),
100: uint8(44),
101: uint8(44),
102: uint8(45),
103: uint8(45),
104: uint8(46),
105: uint8(46),
106: uint8(47),
107: uint8(47),
108: uint8(48),
109: uint8(48),
110: uint8(49),
111: uint8(49),
112: uint8(50),
113: uint8(50),
114: uint8(51),
115: uint8(51),
116: uint8(52),
117: uint8(52),
118: uint8(53),
119: uint8(53),
120: uint8(54),
121: uint8(54),
122: uint8(55),
123: uint8(55),
124: uint8(56),
125: uint8(56),
126: uint8(57),
127: uint8(57),
}
// C documentation
//
// /*
// ** Return the length of the data corresponding to the supplied serial-type.
// */
func _sqlite3VdbeSerialTypeLen(tls *libc.TLS, serial_type Tu32) (r Tu32) {
if serial_type >= uint32(128) {
return (serial_type - uint32(12)) / uint32(2)
} else {
return uint32(_sqlite3SmallTypeSizes[serial_type])
}
return r
}
func _sqlite3VdbeOneByteSerialTypeLen(tls *libc.TLS, serial_type Tu8) (r Tu8) {
return _sqlite3SmallTypeSizes[serial_type]
}
/*
** If we are on an architecture with mixed-endian floating
** points (ex: ARM7) then swap the lower 4 bytes with the
** upper 4 bytes. Return the result.
**
** For most architectures, this is a no-op.
**
** (later): It is reported to me that the mixed-endian problem
** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems
** that early versions of GCC stored the two words of a 64-bit
** float in the wrong order. And that error has been propagated
** ever since. The blame is not necessarily with GCC, though.
** GCC might have just copying the problem from a prior compiler.
** I am also told that newer versions of GCC that follow a different
** ABI get the byte order right.
**
** Developers using SQLite on an ARM7 should compile and run their
** application using -DSQLITE_DEBUG=1 at least once. With DEBUG
** enabled, some asserts below will ensure that the byte order of
** floating point values is correct.
**
** (2007-08-30) Frank van Vugt has studied this problem closely
** and has send his findings to the SQLite developers. Frank
** writes that some Linux kernels offer floating point hardware
** emulation that uses only 32-bit mantissas instead of a full
** 48-bits as required by the IEEE standard. (This is the
** CONFIG_FPE_FASTFPE option.) On such systems, floating point
** byte swapping becomes very complicated. To avoid problems,
** the necessary byte swapping is carried out using a 64-bit integer
** rather than a 64-bit float. Frank assures us that the code here
** works for him. We, the developers, have no way to independently
** verify this, but Frank seems to know what he is talking about
** so we trust him.
*/
/* Input "x" is a sequence of unsigned characters that represent a
** big-endian integer. Return the equivalent native integer
*/
// C documentation
//
// /*
// ** Deserialize the data blob pointed to by buf as serial type serial_type
// ** and store the result in pMem.
// **
// ** This function is implemented as two separate routines for performance.
// ** The few cases that require local variables are broken out into a separate
// ** routine so that in most cases the overhead of moving the stack pointer
// ** is avoided.
// */
func _serialGet(tls *libc.TLS, buf uintptr, serial_type Tu32, pMem uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var y Tu32
var v1 int32
var _ /* x at bp+0 */ Tu64
_, _ = y, v1
*(*Tu64)(unsafe.Pointer(bp)) = uint64(uint32(*(*uint8)(unsafe.Pointer(buf)))<flags = 0; // sqlite3VdbeSerialGet() will set this for us */
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = 0
(*TMem)(unsafe.Pointer(pMem)).Fz = uintptr(0)
_sqlite3VdbeSerialGet(tls, aKey+uintptr(d), *(*Tu32)(unsafe.Pointer(bp + 4)), pMem)
d += _sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp + 4)))
pMem += 56
u++
v3 = u
if int32(v3) >= int32((*TUnpackedRecord)(unsafe.Pointer(p)).FnField) {
break
}
}
if d > uint32(nKey) && u != 0 {
/* In a corrupt record entry, the last pMem might have been set up using
** uninitialized memory. Overwrite its value with NULL, to prevent
** warnings from MSAN. */
_sqlite3VdbeMemSetNull(tls, pMem-uintptr(1)*56)
}
(*TUnpackedRecord)(unsafe.Pointer(p)).FnField = u
}
// C documentation
//
// /*
// ** Both *pMem1 and *pMem2 contain string values. Compare the two values
// ** using the collation sequence pColl. As usual, return a negative , zero
// ** or positive value if *pMem1 is less than, equal to or greater than
// ** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);".
// */
func _vdbeCompareMemString(tls *libc.TLS, pMem1 uintptr, pMem2 uintptr, pColl uintptr, prcErr uintptr) (r int32) {
bp := tls.Alloc(112)
defer tls.Free(112)
var rc int32
var v1, v2 uintptr
var _ /* c1 at bp+0 */ TMem
var _ /* c2 at bp+56 */ TMem
_, _, _ = rc, v1, v2
if int32((*TMem)(unsafe.Pointer(pMem1)).Fenc) == int32((*TCollSeq)(unsafe.Pointer(pColl)).Fenc) {
/* The strings are already in the correct encoding. Call the
** comparison function directly */
return (*(*func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TCollSeq)(unsafe.Pointer(pColl)).FxCmp})))(tls, (*TCollSeq)(unsafe.Pointer(pColl)).FpUser, (*TMem)(unsafe.Pointer(pMem1)).Fn, (*TMem)(unsafe.Pointer(pMem1)).Fz, (*TMem)(unsafe.Pointer(pMem2)).Fn, (*TMem)(unsafe.Pointer(pMem2)).Fz)
} else {
_sqlite3VdbeMemInit(tls, bp, (*TMem)(unsafe.Pointer(pMem1)).Fdb, uint16(MEM_Null))
_sqlite3VdbeMemInit(tls, bp+56, (*TMem)(unsafe.Pointer(pMem1)).Fdb, uint16(MEM_Null))
_sqlite3VdbeMemShallowCopy(tls, bp, pMem1, int32(MEM_Ephem))
_sqlite3VdbeMemShallowCopy(tls, bp+56, pMem2, int32(MEM_Ephem))
v1 = _sqlite3ValueText(tls, bp, (*TCollSeq)(unsafe.Pointer(pColl)).Fenc)
v2 = _sqlite3ValueText(tls, bp+56, (*TCollSeq)(unsafe.Pointer(pColl)).Fenc)
if v1 == uintptr(0) || v2 == uintptr(0) {
if prcErr != 0 {
*(*Tu8)(unsafe.Pointer(prcErr)) = uint8(SQLITE_NOMEM)
}
rc = 0
} else {
rc = (*(*func(*libc.TLS, uintptr, int32, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TCollSeq)(unsafe.Pointer(pColl)).FxCmp})))(tls, (*TCollSeq)(unsafe.Pointer(pColl)).FpUser, (*(*TMem)(unsafe.Pointer(bp))).Fn, v1, (*(*TMem)(unsafe.Pointer(bp + 56))).Fn, v2)
}
_sqlite3VdbeMemReleaseMalloc(tls, bp)
_sqlite3VdbeMemReleaseMalloc(tls, bp+56)
return rc
}
return r
}
// C documentation
//
// /*
// ** The input pBlob is guaranteed to be a Blob that is not marked
// ** with MEM_Zero. Return true if it could be a zero-blob.
// */
func _isAllZero(tls *libc.TLS, z uintptr, n int32) (r int32) {
var i int32
_ = i
i = 0
for {
if !(i < n) {
break
}
if *(*int8)(unsafe.Pointer(z + uintptr(i))) != 0 {
return 0
}
goto _1
_1:
;
i++
}
return int32(1)
}
// C documentation
//
// /*
// ** Compare two blobs. Return negative, zero, or positive if the first
// ** is less than, equal to, or greater than the second, respectively.
// ** If one blob is a prefix of the other, then the shorter is the lessor.
// */
func _sqlite3BlobCompare(tls *libc.TLS, pB1 uintptr, pB2 uintptr) (r int32) {
var c, n1, n2, v1 int32
_, _, _, _ = c, n1, n2, v1
n1 = (*TMem)(unsafe.Pointer(pB1)).Fn
n2 = (*TMem)(unsafe.Pointer(pB2)).Fn
/* It is possible to have a Blob value that has some non-zero content
** followed by zero content. But that only comes up for Blobs formed
** by the OP_MakeRecord opcode, and such Blobs never get passed into
** sqlite3MemCompare(). */
if (int32((*TMem)(unsafe.Pointer(pB1)).Fflags)|int32((*TMem)(unsafe.Pointer(pB2)).Fflags))&int32(MEM_Zero) != 0 {
if int32((*TMem)(unsafe.Pointer(pB1)).Fflags)&int32((*TMem)(unsafe.Pointer(pB2)).Fflags)&int32(MEM_Zero) != 0 {
return *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pB1)).Fu)) - *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pB2)).Fu))
} else {
if int32((*TMem)(unsafe.Pointer(pB1)).Fflags)&int32(MEM_Zero) != 0 {
if !(_isAllZero(tls, (*TMem)(unsafe.Pointer(pB2)).Fz, (*TMem)(unsafe.Pointer(pB2)).Fn) != 0) {
return -int32(1)
}
return *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pB1)).Fu)) - n2
} else {
if !(_isAllZero(tls, (*TMem)(unsafe.Pointer(pB1)).Fz, (*TMem)(unsafe.Pointer(pB1)).Fn) != 0) {
return +libc.Int32FromInt32(1)
}
return n1 - *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pB2)).Fu))
}
}
}
if n1 > n2 {
v1 = n2
} else {
v1 = n1
}
c = libc.Xmemcmp(tls, (*TMem)(unsafe.Pointer(pB1)).Fz, (*TMem)(unsafe.Pointer(pB2)).Fz, uint64(v1))
if c != 0 {
return c
}
return n1 - n2
}
/* The following two functions are used only within testcase() to prove
** test coverage. These functions do no exist for production builds.
** We must use separate SQLITE_NOINLINE functions here, since otherwise
** optimizer code movement causes gcov to become very confused.
*/
// C documentation
//
// /*
// ** Do a comparison between a 64-bit signed integer and a 64-bit floating-point
// ** number. Return negative, zero, or positive if the first (i64) is less than,
// ** equal to, or greater than the second (double).
// */
func _sqlite3IntFloatCompare(tls *libc.TLS, i Ti64, r float64) (r1 int32) {
var s, x float64
var y Ti64
var v1, v2 int32
_, _, _, _, _ = s, x, y, v1, v2
if _sqlite3IsNaN(tls, r) != 0 {
/* SQLite considers NaN to be a NULL. And all integer values are greater
** than NULL */
return int32(1)
}
if _sqlite3Config.FbUseLongDouble != 0 {
x = float64(i)
if x < r {
v1 = -int32(1)
} else {
v1 = libc.BoolInt32(x > r)
}
return v1
} else {
if r < -libc.Float64FromFloat64(9.223372036854776e+18) {
return +libc.Int32FromInt32(1)
}
if r >= float64(9.223372036854776e+18) {
return -int32(1)
}
y = int64(r)
if i < y {
return -int32(1)
}
if i > y {
return +libc.Int32FromInt32(1)
}
s = float64(i)
if s < r {
v2 = -int32(1)
} else {
v2 = libc.BoolInt32(s > r)
}
return v2
}
return r1
}
// C documentation
//
// /*
// ** Compare the values contained by the two memory cells, returning
// ** negative, zero or positive if pMem1 is less than, equal to, or greater
// ** than pMem2. Sorting order is NULL's first, followed by numbers (integers
// ** and reals) sorted numerically, followed by text ordered by the collating
// ** sequence pColl and finally blob's ordered by memcmp().
// **
// ** Two NULL values are considered equal by this function.
// */
func _sqlite3MemCompare(tls *libc.TLS, pMem1 uintptr, pMem2 uintptr, pColl uintptr) (r int32) {
var combined_flags, f1, f2 int32
_, _, _ = combined_flags, f1, f2
f1 = int32((*TMem)(unsafe.Pointer(pMem1)).Fflags)
f2 = int32((*TMem)(unsafe.Pointer(pMem2)).Fflags)
combined_flags = f1 | f2
/* If one value is NULL, it is less than the other. If both values
** are NULL, return 0.
*/
if combined_flags&int32(MEM_Null) != 0 {
return f2&int32(MEM_Null) - f1&int32(MEM_Null)
}
/* At least one of the two values is a number
*/
if combined_flags&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
if f1&f2&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
if *(*Ti64)(unsafe.Pointer(pMem1)) < *(*Ti64)(unsafe.Pointer(pMem2)) {
return -int32(1)
}
if *(*Ti64)(unsafe.Pointer(pMem1)) > *(*Ti64)(unsafe.Pointer(pMem2)) {
return +libc.Int32FromInt32(1)
}
return 0
}
if f1&f2&int32(MEM_Real) != 0 {
if *(*float64)(unsafe.Pointer(pMem1)) < *(*float64)(unsafe.Pointer(pMem2)) {
return -int32(1)
}
if *(*float64)(unsafe.Pointer(pMem1)) > *(*float64)(unsafe.Pointer(pMem2)) {
return +libc.Int32FromInt32(1)
}
return 0
}
if f1&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
if f2&int32(MEM_Real) != 0 {
return _sqlite3IntFloatCompare(tls, *(*Ti64)(unsafe.Pointer(pMem1)), *(*float64)(unsafe.Pointer(pMem2)))
} else {
if f2&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
if *(*Ti64)(unsafe.Pointer(pMem1)) < *(*Ti64)(unsafe.Pointer(pMem2)) {
return -int32(1)
}
if *(*Ti64)(unsafe.Pointer(pMem1)) > *(*Ti64)(unsafe.Pointer(pMem2)) {
return +libc.Int32FromInt32(1)
}
return 0
} else {
return -int32(1)
}
}
}
if f1&int32(MEM_Real) != 0 {
if f2&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
return -_sqlite3IntFloatCompare(tls, *(*Ti64)(unsafe.Pointer(pMem2)), *(*float64)(unsafe.Pointer(pMem1)))
} else {
return -int32(1)
}
}
return +libc.Int32FromInt32(1)
}
/* If one value is a string and the other is a blob, the string is less.
** If both are strings, compare using the collating functions.
*/
if combined_flags&int32(MEM_Str) != 0 {
if f1&int32(MEM_Str) == 0 {
return int32(1)
}
if f2&int32(MEM_Str) == 0 {
return -int32(1)
}
/* The collation sequence must be defined at this point, even if
** the user deletes the collation sequence after the vdbe program is
** compiled (this was not always the case).
*/
if pColl != 0 {
return _vdbeCompareMemString(tls, pMem1, pMem2, pColl, uintptr(0))
}
/* If a NULL pointer was passed as the collate function, fall through
** to the blob case and use memcmp(). */
}
/* Both values must be blobs. Compare using memcmp(). */
return _sqlite3BlobCompare(tls, pMem1, pMem2)
}
// C documentation
//
// /*
// ** The first argument passed to this function is a serial-type that
// ** corresponds to an integer - all values between 1 and 9 inclusive
// ** except 7. The second points to a buffer containing an integer value
// ** serialized according to serial_type. This function deserializes
// ** and returns the value.
// */
func _vdbeRecordDecodeInt(tls *libc.TLS, serial_type Tu32, aKey uintptr) (r Ti64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* x at bp+8 */ Tu64
var _ /* y at bp+0 */ Tu32
switch serial_type {
case uint32(0):
fallthrough
case uint32(1):
return int64(int8(*(*Tu8)(unsafe.Pointer(aKey))))
case uint32(2):
return int64(libc.Int32FromInt32(256)*int32(int8(*(*Tu8)(unsafe.Pointer(aKey)))) | int32(*(*Tu8)(unsafe.Pointer(aKey + 1))))
case uint32(3):
return int64(libc.Int32FromInt32(65536)*int32(int8(*(*Tu8)(unsafe.Pointer(aKey)))) | int32(*(*Tu8)(unsafe.Pointer(aKey + 1)))<default_rc is
// ** returned.
// **
// ** If database corruption is discovered, set pPKey2->errCode to
// ** SQLITE_CORRUPT and return 0. If an OOM error is encountered,
// ** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the
// ** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db).
// */
func _sqlite3VdbeRecordCompareWithSkip(tls *libc.TLS, nKey1 int32, pKey1 uintptr, pPKey2 uintptr, bSkip int32) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var aKey1, pKeyInfo, pRhs, v4 uintptr
var d1, idx1, v1 Tu32
var i, nCmp, nCmp1, nStr, rc, sortFlags, v2, v3, v6, v7 int32
var lhs, rhs Ti64
var v5 bool
var _ /* mem1 at bp+8 */ TMem
var _ /* s1 at bp+64 */ Tu32
var _ /* serial_type at bp+68 */ Tu32
var _ /* szHdr1 at bp+0 */ Tu32
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = aKey1, d1, i, idx1, lhs, nCmp, nCmp1, nStr, pKeyInfo, pRhs, rc, rhs, sortFlags, v1, v2, v3, v4, v5, v6, v7 /* Offset of first type in header */
rc = 0 /* Return value */
pRhs = (*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FaMem
aKey1 = pKey1
/* If bSkip is true, then the caller has already determined that the first
** two elements in the keys are equal. Fix the various stack variables so
** that this routine begins comparing at the second field. */
if bSkip != 0 {
*(*Tu32)(unsafe.Pointer(bp + 64)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + 1)))
if *(*Tu32)(unsafe.Pointer(bp + 64)) < uint32(0x80) {
idx1 = uint32(2)
} else {
idx1 = uint32(int32(1) + int32(_sqlite3GetVarint32(tls, aKey1+1, bp+64)))
}
*(*Tu32)(unsafe.Pointer(bp)) = uint32(*(*uint8)(unsafe.Pointer(aKey1)))
d1 = *(*Tu32)(unsafe.Pointer(bp)) + _sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp + 64)))
i = int32(1)
pRhs += 56
} else {
v1 = uint32(*(*uint8)(unsafe.Pointer(aKey1)))
*(*Tu32)(unsafe.Pointer(bp)) = v1
if v1 < uint32(0x80) {
idx1 = uint32(1)
} else {
idx1 = uint32(_sqlite3GetVarint32(tls, aKey1, bp))
}
d1 = *(*Tu32)(unsafe.Pointer(bp))
i = 0
}
if d1 > uint32(nKey1) {
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = uint8(_sqlite3CorruptError(tls, int32(89131)))
return 0 /* Corruption */
}
/* Only needed by assert() statements */
for int32(1) != 0 {
/* RHS is an integer */
if int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
*(*Tu32)(unsafe.Pointer(bp + 68)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + uintptr(idx1))))
if *(*Tu32)(unsafe.Pointer(bp + 68)) >= uint32(10) {
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(10) {
v2 = -int32(1)
} else {
v2 = +libc.Int32FromInt32(1)
}
rc = v2
} else {
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(0) {
rc = -int32(1)
} else {
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(7) {
_serialGet7(tls, aKey1+uintptr(d1), bp+8)
rc = -_sqlite3IntFloatCompare(tls, *(*Ti64)(unsafe.Pointer(pRhs)), *(*float64)(unsafe.Pointer(bp + 8)))
} else {
lhs = _vdbeRecordDecodeInt(tls, *(*Tu32)(unsafe.Pointer(bp + 68)), aKey1+uintptr(d1))
rhs = *(*Ti64)(unsafe.Pointer(pRhs))
if lhs < rhs {
rc = -int32(1)
} else {
if lhs > rhs {
rc = +libc.Int32FromInt32(1)
}
}
}
}
}
} else {
if int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&int32(MEM_Real) != 0 {
*(*Tu32)(unsafe.Pointer(bp + 68)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + uintptr(idx1))))
if *(*Tu32)(unsafe.Pointer(bp + 68)) >= uint32(10) {
/* Serial types 12 or greater are strings and blobs (greater than
** numbers). Types 10 and 11 are currently "reserved for future
** use", so it doesn't really matter what the results of comparing
** them to numeric values are. */
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(10) {
v3 = -int32(1)
} else {
v3 = +libc.Int32FromInt32(1)
}
rc = v3
} else {
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(0) {
rc = -int32(1)
} else {
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(7) {
if _serialGet7(tls, aKey1+uintptr(d1), bp+8) != 0 {
rc = -int32(1) /* mem1 is a NaN */
} else {
if *(*float64)(unsafe.Pointer(bp + 8)) < *(*float64)(unsafe.Pointer(pRhs)) {
rc = -int32(1)
} else {
if *(*float64)(unsafe.Pointer(bp + 8)) > *(*float64)(unsafe.Pointer(pRhs)) {
rc = +libc.Int32FromInt32(1)
} else {
}
}
}
} else {
_sqlite3VdbeSerialGet(tls, aKey1+uintptr(d1), *(*Tu32)(unsafe.Pointer(bp + 68)), bp+8)
rc = _sqlite3IntFloatCompare(tls, *(*Ti64)(unsafe.Pointer(bp + 8)), *(*float64)(unsafe.Pointer(pRhs)))
}
}
}
} else {
if int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&int32(MEM_Str) != 0 {
*(*Tu32)(unsafe.Pointer(bp + 68)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + uintptr(idx1))))
if *(*Tu32)(unsafe.Pointer(bp + 68)) >= uint32(0x80) {
_sqlite3GetVarint32(tls, aKey1+uintptr(idx1), bp+68)
}
if *(*Tu32)(unsafe.Pointer(bp + 68)) < uint32(12) {
rc = -int32(1)
} else {
if !(*(*Tu32)(unsafe.Pointer(bp + 68))&libc.Uint32FromInt32(0x01) != 0) {
rc = +libc.Int32FromInt32(1)
} else {
(*(*TMem)(unsafe.Pointer(bp + 8))).Fn = int32((*(*Tu32)(unsafe.Pointer(bp + 68)) - uint32(12)) / uint32(2))
if v5 = d1+uint32((*(*TMem)(unsafe.Pointer(bp + 8))).Fn) > uint32(nKey1); !v5 {
v4 = (*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo
pKeyInfo = v4
}
if v5 || int32((*TKeyInfo)(unsafe.Pointer(v4)).FnAllField) <= i {
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = uint8(_sqlite3CorruptError(tls, int32(89212)))
return 0 /* Corruption */
} else {
if *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)) != 0 {
(*(*TMem)(unsafe.Pointer(bp + 8))).Fenc = (*TKeyInfo)(unsafe.Pointer(pKeyInfo)).Fenc
(*(*TMem)(unsafe.Pointer(bp + 8))).Fdb = (*TKeyInfo)(unsafe.Pointer(pKeyInfo)).Fdb
(*(*TMem)(unsafe.Pointer(bp + 8))).Fflags = uint16(MEM_Str)
(*(*TMem)(unsafe.Pointer(bp + 8))).Fz = aKey1 + uintptr(d1)
rc = _vdbeCompareMemString(tls, bp+8, pRhs, *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8)), pPKey2+31)
} else {
if (*(*TMem)(unsafe.Pointer(bp + 8))).Fn < (*TMem)(unsafe.Pointer(pRhs)).Fn {
v6 = (*(*TMem)(unsafe.Pointer(bp + 8))).Fn
} else {
v6 = (*TMem)(unsafe.Pointer(pRhs)).Fn
}
nCmp = v6
rc = libc.Xmemcmp(tls, aKey1+uintptr(d1), (*TMem)(unsafe.Pointer(pRhs)).Fz, uint64(nCmp))
if rc == 0 {
rc = (*(*TMem)(unsafe.Pointer(bp + 8))).Fn - (*TMem)(unsafe.Pointer(pRhs)).Fn
}
}
}
}
}
} else {
if int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&int32(MEM_Blob) != 0 {
*(*Tu32)(unsafe.Pointer(bp + 68)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + uintptr(idx1))))
if *(*Tu32)(unsafe.Pointer(bp + 68)) >= uint32(0x80) {
_sqlite3GetVarint32(tls, aKey1+uintptr(idx1), bp+68)
}
if *(*Tu32)(unsafe.Pointer(bp + 68)) < uint32(12) || *(*Tu32)(unsafe.Pointer(bp + 68))&uint32(0x01) != 0 {
rc = -int32(1)
} else {
nStr = int32((*(*Tu32)(unsafe.Pointer(bp + 68)) - uint32(12)) / uint32(2))
if d1+uint32(nStr) > uint32(nKey1) {
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = uint8(_sqlite3CorruptError(tls, int32(89242)))
return 0 /* Corruption */
} else {
if int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&int32(MEM_Zero) != 0 {
if !(_isAllZero(tls, aKey1+uintptr(d1), nStr) != 0) {
rc = int32(1)
} else {
rc = nStr - *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pRhs)).Fu))
}
} else {
if nStr < (*TMem)(unsafe.Pointer(pRhs)).Fn {
v7 = nStr
} else {
v7 = (*TMem)(unsafe.Pointer(pRhs)).Fn
}
nCmp1 = v7
rc = libc.Xmemcmp(tls, aKey1+uintptr(d1), (*TMem)(unsafe.Pointer(pRhs)).Fz, uint64(nCmp1))
if rc == 0 {
rc = nStr - (*TMem)(unsafe.Pointer(pRhs)).Fn
}
}
}
}
} else {
*(*Tu32)(unsafe.Pointer(bp + 68)) = uint32(*(*uint8)(unsafe.Pointer(aKey1 + uintptr(idx1))))
if *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(0) || *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(10) || *(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(7) && _serialGet7(tls, aKey1+uintptr(d1), bp+8) != 0 {
} else {
rc = int32(1)
}
}
}
}
}
if rc != 0 {
sortFlags = int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FpKeyInfo)).FaSortFlags + uintptr(i))))
if sortFlags != 0 {
if sortFlags&int32(KEYINFO_ORDER_BIGNULL) == 0 || sortFlags&int32(KEYINFO_ORDER_DESC) != libc.BoolInt32(*(*Tu32)(unsafe.Pointer(bp + 68)) == uint32(0) || int32((*TMem)(unsafe.Pointer(pRhs)).Fflags)&int32(MEM_Null) != 0) {
rc = -rc
}
}
/* See comment below */
return rc
}
i++
if i == int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FnField) {
break
}
pRhs += 56
d1 += _sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp + 68)))
if d1 > uint32(nKey1) {
break
}
idx1 += uint32(_sqlite3VarintLen(tls, uint64(*(*Tu32)(unsafe.Pointer(bp + 68)))))
if idx1 >= *(*Tu32)(unsafe.Pointer(bp)) {
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = uint8(_sqlite3CorruptError(tls, int32(89293)))
return 0 /* Corrupt index */
}
}
/* No memory allocation is ever used on mem1. Prove this using
** the following assert(). If the assert() fails, it indicates a
** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */
/* rc==0 here means that one or both of the keys ran out of fields and
** all the fields up to that point were equal. Return the default_rc
** value. */
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FeqSeen = uint8(1)
return int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fdefault_rc)
}
func _sqlite3VdbeRecordCompare(tls *libc.TLS, nKey1 int32, pKey1 uintptr, pPKey2 uintptr) (r int32) {
return _sqlite3VdbeRecordCompareWithSkip(tls, nKey1, pKey1, pPKey2, 0)
}
// C documentation
//
// /*
// ** This function is an optimized version of sqlite3VdbeRecordCompare()
// ** that (a) the first field of pPKey2 is an integer, and (b) the
// ** size-of-header varint at the start of (pKey1/nKey1) fits in a single
// ** byte (i.e. is less than 128).
// **
// ** To avoid concerns about buffer overreads, this routine is only used
// ** on schemas where the maximum valid header size is 63 bytes or less.
// */
func _vdbeRecordCompareInt(tls *libc.TLS, nKey1 int32, pKey1 uintptr, pPKey2 uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aKey uintptr
var lhs, v Ti64
var res, serial_type int32
var _ /* x at bp+8 */ Tu64
var _ /* y at bp+0 */ Tu32
_, _, _, _, _ = aKey, lhs, res, serial_type, v
aKey = pKey1 + uintptr(int32(*(*Tu8)(unsafe.Pointer(pKey1)))&int32(0x3F))
serial_type = int32(*(*Tu8)(unsafe.Pointer(pKey1 + 1)))
switch serial_type {
case int32(1): /* 1-byte signed integer */
lhs = int64(int8(*(*Tu8)(unsafe.Pointer(aKey))))
case int32(2): /* 2-byte signed integer */
lhs = int64(libc.Int32FromInt32(256)*int32(int8(*(*Tu8)(unsafe.Pointer(aKey)))) | int32(*(*Tu8)(unsafe.Pointer(aKey + 1))))
case int32(3): /* 3-byte signed integer */
lhs = int64(libc.Int32FromInt32(65536)*int32(int8(*(*Tu8)(unsafe.Pointer(aKey)))) | int32(*(*Tu8)(unsafe.Pointer(aKey + 1)))< lhs {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr1)
} else {
if v < lhs {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr2)
} else {
if int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FnField) > int32(1) {
/* The first fields of the two keys are equal. Compare the trailing
** fields. */
res = _sqlite3VdbeRecordCompareWithSkip(tls, nKey1, pKey1, pPKey2, int32(1))
} else {
/* The first fields of the two keys are equal and there are no trailing
** fields. Return pPKey2->default_rc in this case. */
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fdefault_rc)
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FeqSeen = uint8(1)
}
}
}
return res
}
// C documentation
//
// /*
// ** This function is an optimized version of sqlite3VdbeRecordCompare()
// ** that (a) the first field of pPKey2 is a string, that (b) the first field
// ** uses the collation sequence BINARY and (c) that the size-of-header varint
// ** at the start of (pKey1/nKey1) fits in a single byte.
// */
func _vdbeRecordCompareString(tls *libc.TLS, nKey1 int32, pKey1 uintptr, pPKey2 uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aKey1 uintptr
var nCmp, nStr, res, szHdr, v1 int32
var _ /* serial_type at bp+0 */ int32
_, _, _, _, _, _ = aKey1, nCmp, nStr, res, szHdr, v1
aKey1 = pKey1
*(*int32)(unsafe.Pointer(bp)) = int32(int8(*(*Tu8)(unsafe.Pointer(aKey1 + 1))))
goto vrcs_restart
vrcs_restart:
;
if *(*int32)(unsafe.Pointer(bp)) < int32(12) {
if *(*int32)(unsafe.Pointer(bp)) < 0 {
_sqlite3GetVarint32(tls, aKey1+1, bp)
if *(*int32)(unsafe.Pointer(bp)) >= int32(12) {
goto vrcs_restart
}
}
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr1) /* (pKey1/nKey1) is a number or a null */
} else {
if !(*(*int32)(unsafe.Pointer(bp))&libc.Int32FromInt32(0x01) != 0) {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr2) /* (pKey1/nKey1) is a blob */
} else {
szHdr = int32(*(*Tu8)(unsafe.Pointer(aKey1)))
nStr = (*(*int32)(unsafe.Pointer(bp)) - int32(12)) / int32(2)
if szHdr+nStr > nKey1 {
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FerrCode = uint8(_sqlite3CorruptError(tls, int32(89456)))
return 0 /* Corruption */
}
if (*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fn < nStr {
v1 = (*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fn
} else {
v1 = nStr
}
nCmp = v1
res = libc.Xmemcmp(tls, aKey1+uintptr(szHdr), *(*uintptr)(unsafe.Pointer(pPKey2 + 16)), uint64(nCmp))
if res > 0 {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr2)
} else {
if res < 0 {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr1)
} else {
res = nStr - (*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fn
if res == 0 {
if int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FnField) > int32(1) {
res = _sqlite3VdbeRecordCompareWithSkip(tls, nKey1, pKey1, pPKey2, int32(1))
} else {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fdefault_rc)
(*TUnpackedRecord)(unsafe.Pointer(pPKey2)).FeqSeen = uint8(1)
}
} else {
if res > 0 {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr2)
} else {
res = int32((*TUnpackedRecord)(unsafe.Pointer(pPKey2)).Fr1)
}
}
}
}
}
}
return res
}
// C documentation
//
// /*
// ** Return a pointer to an sqlite3VdbeRecordCompare() compatible function
// ** suitable for comparing serialized records to the unpacked record passed
// ** as the only argument.
// */
func _sqlite3VdbeFindCompare(tls *libc.TLS, p uintptr) (r TRecordCompare) {
var flags int32
_ = flags
/* varintRecordCompareInt() and varintRecordCompareString() both assume
** that the size-of-header varint that occurs at the start of each record
** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt()
** also assumes that it is safe to overread a buffer by at least the
** maximum possible legal header size plus 8 bytes. Because there is
** guaranteed to be at least 74 (but not 136) bytes of padding following each
** buffer passed to varintRecordCompareInt() this makes it convenient to
** limit the size of the header to 64 bytes in cases where the first field
** is an integer.
**
** The easiest way to enforce this limit is to consider only records with
** 13 fields or less. If the first field is an integer, the maximum legal
** header size is (12*5 + 1 + 1) bytes. */
if int32((*TKeyInfo)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FpKeyInfo)).FnAllField) <= int32(13) {
flags = int32((*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FaMem))).Fflags)
if *(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FpKeyInfo)).FaSortFlags)) != 0 {
if int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FpKeyInfo)).FaSortFlags)))&int32(KEYINFO_ORDER_BIGNULL) != 0 {
return __ccgo_fp(_sqlite3VdbeRecordCompare)
}
(*TUnpackedRecord)(unsafe.Pointer(p)).Fr1 = int8(1)
(*TUnpackedRecord)(unsafe.Pointer(p)).Fr2 = int8(-int32(1))
} else {
(*TUnpackedRecord)(unsafe.Pointer(p)).Fr1 = int8(-int32(1))
(*TUnpackedRecord)(unsafe.Pointer(p)).Fr2 = int8(1)
}
if flags&int32(MEM_Int) != 0 {
*(*Ti64)(unsafe.Pointer(p + 16)) = *(*Ti64)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FaMem))
return __ccgo_fp(_vdbeRecordCompareInt)
}
if flags&(libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Null)|libc.Int32FromInt32(MEM_Blob)) == 0 && *(*uintptr)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FpKeyInfo + 32)) == uintptr(0) {
*(*uintptr)(unsafe.Pointer(p + 16)) = (*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FaMem))).Fz
(*TUnpackedRecord)(unsafe.Pointer(p)).Fn = (*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(p)).FaMem))).Fn
return __ccgo_fp(_vdbeRecordCompareString)
}
}
return __ccgo_fp(_sqlite3VdbeRecordCompare)
}
// C documentation
//
// /*
// ** pCur points at an index entry created using the OP_MakeRecord opcode.
// ** Read the rowid (the last field in the record) and store it in *rowid.
// ** Return SQLITE_OK if everything works, or an error code otherwise.
// **
// ** pCur might be pointing to text obtained from a corrupt database file.
// ** So the content cannot be trusted. Do appropriate checks on the content.
// */
func _sqlite3VdbeIdxRowid(tls *libc.TLS, db uintptr, pCur uintptr, rowid uintptr) (r int32) {
bp := tls.Alloc(128)
defer tls.Free(128)
var lenRowid Tu32
var nCellKey Ti64
var rc int32
var _ /* m at bp+8 */ TMem
var _ /* szHdr at bp+0 */ Tu32
var _ /* typeRowid at bp+4 */ Tu32
var _ /* v at bp+64 */ TMem
_, _, _ = lenRowid, nCellKey, rc
nCellKey = 0
/* Get the size of the index entry. Only indices entries of less
** than 2GiB are support - anything large must be database corruption.
** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so
** this code can safely assume that nCellKey is 32-bits
*/
nCellKey = int64(_sqlite3BtreePayloadSize(tls, pCur))
/* Read in the complete content of the index entry */
_sqlite3VdbeMemInit(tls, bp+8, db, uint16(0))
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, uint32(nCellKey), bp+8)
if rc != 0 {
return rc
}
/* The index entry must begin with a header size */
*(*Tu32)(unsafe.Pointer(bp)) = uint32(*(*Tu8)(unsafe.Pointer((*(*TMem)(unsafe.Pointer(bp + 8))).Fz)))
if *(*Tu32)(unsafe.Pointer(bp)) >= uint32(0x80) {
_sqlite3GetVarint32(tls, (*(*TMem)(unsafe.Pointer(bp + 8))).Fz, bp)
}
if *(*Tu32)(unsafe.Pointer(bp)) < uint32(3) || *(*Tu32)(unsafe.Pointer(bp)) > uint32((*(*TMem)(unsafe.Pointer(bp + 8))).Fn) {
goto idx_rowid_corruption
}
/* The last field of the index should be an integer - the ROWID.
** Verify that the last entry really is an integer. */
*(*Tu32)(unsafe.Pointer(bp + 4)) = uint32(*(*Tu8)(unsafe.Pointer((*(*TMem)(unsafe.Pointer(bp + 8))).Fz + uintptr(*(*Tu32)(unsafe.Pointer(bp))-uint32(1)))))
if *(*Tu32)(unsafe.Pointer(bp + 4)) >= uint32(0x80) {
_sqlite3GetVarint32(tls, (*(*TMem)(unsafe.Pointer(bp + 8))).Fz+uintptr(*(*Tu32)(unsafe.Pointer(bp))-uint32(1)), bp+4)
}
if *(*Tu32)(unsafe.Pointer(bp + 4)) < uint32(1) || *(*Tu32)(unsafe.Pointer(bp + 4)) > uint32(9) || *(*Tu32)(unsafe.Pointer(bp + 4)) == uint32(7) {
goto idx_rowid_corruption
}
lenRowid = uint32(_sqlite3SmallTypeSizes[*(*Tu32)(unsafe.Pointer(bp + 4))])
if uint32((*(*TMem)(unsafe.Pointer(bp + 8))).Fn) < *(*Tu32)(unsafe.Pointer(bp))+lenRowid {
goto idx_rowid_corruption
}
/* Fetch the integer off the end of the index record */
_sqlite3VdbeSerialGet(tls, (*(*TMem)(unsafe.Pointer(bp + 8))).Fz+uintptr(uint32((*(*TMem)(unsafe.Pointer(bp + 8))).Fn)-lenRowid), *(*Tu32)(unsafe.Pointer(bp + 4)), bp+64)
*(*Ti64)(unsafe.Pointer(rowid)) = *(*Ti64)(unsafe.Pointer(bp + 64))
_sqlite3VdbeMemReleaseMalloc(tls, bp+8)
return SQLITE_OK
/* Jump here if database corruption is detected after m has been
** allocated. Free the m object and return SQLITE_CORRUPT. */
goto idx_rowid_corruption
idx_rowid_corruption:
;
_sqlite3VdbeMemReleaseMalloc(tls, bp+8)
return _sqlite3CorruptError(tls, int32(89614))
}
// C documentation
//
// /*
// ** Compare the key of the index entry that cursor pC is pointing to against
// ** the key string in pUnpacked. Write into *pRes a number
// ** that is negative, zero, or positive if pC is less than, equal to,
// ** or greater than pUnpacked. Return SQLITE_OK on success.
// **
// ** pUnpacked is either created without a rowid or is truncated so that it
// ** omits the rowid at the end. The rowid at the end of the index entry
// ** is ignored as well. Hence, this routine only compares the prefixes
// ** of the keys prior to the final rowid, not the entire key.
// */
func _sqlite3VdbeIdxKeyCompare(tls *libc.TLS, db uintptr, pC uintptr, pUnpacked uintptr, res uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var nCellKey Ti64
var pCur uintptr
var rc int32
var _ /* m at bp+0 */ TMem
_, _, _ = nCellKey, pCur, rc
nCellKey = 0
pCur = *(*uintptr)(unsafe.Pointer(pC + 48))
nCellKey = int64(_sqlite3BtreePayloadSize(tls, pCur))
/* nCellKey will always be between 0 and 0xffffffff because of the way
** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */
if nCellKey <= 0 || nCellKey > int64(0x7fffffff) {
*(*int32)(unsafe.Pointer(res)) = 0
return _sqlite3CorruptError(tls, int32(89647))
}
_sqlite3VdbeMemInit(tls, bp, db, uint16(0))
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, pCur, uint32(nCellKey), bp)
if rc != 0 {
return rc
}
*(*int32)(unsafe.Pointer(res)) = _sqlite3VdbeRecordCompareWithSkip(tls, (*(*TMem)(unsafe.Pointer(bp))).Fn, (*(*TMem)(unsafe.Pointer(bp))).Fz, pUnpacked, 0)
_sqlite3VdbeMemReleaseMalloc(tls, bp)
return SQLITE_OK
}
// C documentation
//
// /*
// ** This routine sets the value to be returned by subsequent calls to
// ** sqlite3_changes() on the database handle 'db'.
// */
func _sqlite3VdbeSetChanges(tls *libc.TLS, db uintptr, nChange Ti64) {
(*Tsqlite3)(unsafe.Pointer(db)).FnChange = nChange
*(*Ti64)(unsafe.Pointer(db + 128)) += nChange
}
// C documentation
//
// /*
// ** Set a flag in the vdbe to update the change counter when it is finalised
// ** or reset.
// */
func _sqlite3VdbeCountChanges(tls *libc.TLS, v uintptr) {
libc.SetBitFieldPtr16Uint32(v+200, libc.Uint32FromInt32(1), 4, 0x10)
}
// C documentation
//
// /*
// ** Mark every prepared statement associated with a database connection
// ** as expired.
// **
// ** An expired statement means that recompilation of the statement is
// ** recommend. Statements expire when things happen that make their
// ** programs obsolete. Removing user-defined functions or collating
// ** sequences, or changing an authorization function are the types of
// ** things that make prepared statements obsolete.
// **
// ** If iCode is 1, then expiration is advisory. The statement should
// ** be reprepared before being restarted, but if it is already running
// ** it is allowed to run to completion.
// **
// ** Internally, this function just sets the Vdbe.expired flag on all
// ** prepared statements. The flag is set to 1 for an immediate expiration
// ** and set to 2 for an advisory expiration.
// */
func _sqlite3ExpirePreparedStatements(tls *libc.TLS, db uintptr, iCode int32) {
var p uintptr
_ = p
p = (*Tsqlite3)(unsafe.Pointer(db)).FpVdbe
for {
if !(p != 0) {
break
}
libc.SetBitFieldPtr16Uint32(p+200, uint32(iCode+libc.Int32FromInt32(1)), 0, 0x3)
goto _1
_1:
;
p = (*TVdbe)(unsafe.Pointer(p)).FpVNext
}
}
// C documentation
//
// /*
// ** Return the database associated with the Vdbe.
// */
func _sqlite3VdbeDb(tls *libc.TLS, v uintptr) (r uintptr) {
return (*TVdbe)(unsafe.Pointer(v)).Fdb
}
// C documentation
//
// /*
// ** Return the SQLITE_PREPARE flags for a Vdbe.
// */
func _sqlite3VdbePrepareFlags(tls *libc.TLS, v uintptr) (r Tu8) {
return (*TVdbe)(unsafe.Pointer(v)).FprepFlags
}
// C documentation
//
// /*
// ** Return a pointer to an sqlite3_value structure containing the value bound
// ** parameter iVar of VM v. Except, if the value is an SQL NULL, return
// ** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_*
// ** constants) to the value before returning it.
// **
// ** The returned value must be freed by the caller using sqlite3ValueFree().
// */
func _sqlite3VdbeGetBoundValue(tls *libc.TLS, v uintptr, iVar int32, aff Tu8) (r uintptr) {
var pMem, pRet uintptr
_, _ = pMem, pRet
if v != 0 {
pMem = (*TVdbe)(unsafe.Pointer(v)).FaVar + uintptr(iVar-int32(1))*56
if 0 == int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Null) {
pRet = _sqlite3ValueNew(tls, (*TVdbe)(unsafe.Pointer(v)).Fdb)
if pRet != 0 {
_sqlite3VdbeMemCopy(tls, pRet, pMem)
_sqlite3ValueApplyAffinity(tls, pRet, aff, uint8(SQLITE_UTF8))
}
return pRet
}
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Configure SQL variable iVar so that binding a new value to it signals
// ** to sqlite3_reoptimize() that re-preparing the statement may result
// ** in a better query plan.
// */
func _sqlite3VdbeSetVarmask(tls *libc.TLS, v uintptr, iVar int32) {
if iVar >= int32(32) {
*(*Tu32)(unsafe.Pointer(v + 284)) |= uint32(0x80000000)
} else {
*(*Tu32)(unsafe.Pointer(v + 284)) |= libc.Uint32FromInt32(1) << (iVar - libc.Int32FromInt32(1))
}
}
// C documentation
//
// /*
// ** Cause a function to throw an error if it was call from OP_PureFunc
// ** rather than OP_Function.
// **
// ** OP_PureFunc means that the function must be deterministic, and should
// ** throw an error if it is given inputs that would make it non-deterministic.
// ** This routine is invoked by date/time functions that use non-deterministic
// ** features such as 'now'.
// */
func _sqlite3NotPureFunc(tls *libc.TLS, pCtx uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var pOp, zContext, zMsg uintptr
_, _, _ = pOp, zContext, zMsg
if (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe == uintptr(0) {
return int32(1)
}
pOp = (*TVdbe)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe)).FaOp + uintptr((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FiOp)*24
if int32((*TVdbeOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_PureFunc) {
if int32((*TVdbeOp)(unsafe.Pointer(pOp)).Fp5)&int32(NC_IsCheck) != 0 {
zContext = __ccgo_ts + 5257
} else {
if int32((*TVdbeOp)(unsafe.Pointer(pOp)).Fp5)&int32(NC_GenCol) != 0 {
zContext = __ccgo_ts + 5276
} else {
zContext = __ccgo_ts + 5295
}
}
zMsg = Xsqlite3_mprintf(tls, __ccgo_ts+5304, libc.VaList(bp+8, (*TFuncDef)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpFunc)).FzName, zContext))
Xsqlite3_result_error(tls, pCtx, zMsg, -int32(1))
Xsqlite3_free(tls, zMsg)
return 0
}
return int32(1)
}
// C documentation
//
// /*
// ** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored
// ** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored
// ** in memory obtained from sqlite3DbMalloc).
// */
func _sqlite3VtabImportErrmsg(tls *libc.TLS, p uintptr, pVtab uintptr) {
var db uintptr
_ = db
if (*Tsqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg != 0 {
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
_sqlite3DbFree(tls, db, (*TVdbe)(unsafe.Pointer(p)).FzErrMsg)
(*TVdbe)(unsafe.Pointer(p)).FzErrMsg = _sqlite3DbStrDup(tls, db, (*Tsqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg)
Xsqlite3_free(tls, (*Tsqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg)
(*Tsqlite3_vtab)(unsafe.Pointer(pVtab)).FzErrMsg = uintptr(0)
}
}
// C documentation
//
// /*
// ** If the second argument is not NULL, release any allocations associated
// ** with the memory cells in the p->aMem[] array. Also free the UnpackedRecord
// ** structure itself, using sqlite3DbFree().
// **
// ** This function is used to free UnpackedRecord structures allocated by
// ** the vdbeUnpackRecord() function found in vdbeapi.c.
// */
func _vdbeFreeUnpacked(tls *libc.TLS, db uintptr, nField int32, p uintptr) {
var i int32
var pMem uintptr
_, _ = i, pMem
if p != 0 {
i = 0
for {
if !(i < nField) {
break
}
pMem = (*TUnpackedRecord)(unsafe.Pointer(p)).FaMem + uintptr(i)*56
if (*TMem)(unsafe.Pointer(pMem)).FzMalloc != 0 {
_sqlite3VdbeMemReleaseMalloc(tls, pMem)
}
goto _1
_1:
;
i++
}
_sqlite3DbNNFreeNN(tls, db, p)
}
}
// C documentation
//
// /*
// ** Invoke the pre-update hook. If this is an UPDATE or DELETE pre-update call,
// ** then cursor passed as the second argument should point to the row about
// ** to be update or deleted. If the application calls sqlite3_preupdate_old(),
// ** the required value will be read from the row the cursor points to.
// */
func _sqlite3VdbePreUpdateHook(tls *libc.TLS, v uintptr, pCsr uintptr, op int32, zDb uintptr, pTab uintptr, iKey1 Ti64, iReg int32, iBlobWrite int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var db, zTbl uintptr
var i int32
var iKey2, v1 Ti64
var _ /* preupdate at bp+0 */ TPreUpdate
_, _, _, _, _ = db, i, iKey2, zTbl, v1
db = (*TVdbe)(unsafe.Pointer(v)).Fdb
zTbl = (*TTable)(unsafe.Pointer(pTab)).FzName
libc.Xmemset(tls, bp, 0, uint64(136))
if libc.BoolInt32((*TTable)(unsafe.Pointer(pTab)).FtabFlags&uint32(TF_WithoutRowid) == uint32(0)) == 0 {
v1 = libc.Int64FromInt32(0)
iKey2 = v1
iKey1 = v1
(*(*TPreUpdate)(unsafe.Pointer(bp))).FpPk = _sqlite3PrimaryKeyIndex(tls, pTab)
} else {
if op == int32(SQLITE_UPDATE) {
iKey2 = *(*Ti64)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).FaMem + uintptr(iReg)*56))
} else {
iKey2 = iKey1
}
}
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fv = v
(*(*TPreUpdate)(unsafe.Pointer(bp))).FpCsr = pCsr
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fop = op
(*(*TPreUpdate)(unsafe.Pointer(bp))).FiNewReg = iReg
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.Fdb = db
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.Fenc = (*Tsqlite3)(unsafe.Pointer(db)).Fenc
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.FnKeyField = uint16((*TTable)(unsafe.Pointer(pTab)).FnCol)
(*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.FaSortFlags = uintptr(unsafe.Pointer(&_fakeSortOrder))
(*(*TPreUpdate)(unsafe.Pointer(bp))).FiKey1 = iKey1
(*(*TPreUpdate)(unsafe.Pointer(bp))).FiKey2 = iKey2
(*(*TPreUpdate)(unsafe.Pointer(bp))).FpTab = pTab
(*(*TPreUpdate)(unsafe.Pointer(bp))).FiBlobWrite = iBlobWrite
(*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate = bp
(*(*func(*libc.TLS, uintptr, uintptr, int32, uintptr, uintptr, Tsqlite3_int64, Tsqlite3_int64))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2)
(*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate = uintptr(0)
_sqlite3DbFree(tls, db, (*(*TPreUpdate)(unsafe.Pointer(bp))).FaRecord)
_vdbeFreeUnpacked(tls, db, int32((*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.FnKeyField)+int32(1), (*(*TPreUpdate)(unsafe.Pointer(bp))).FpUnpacked)
_vdbeFreeUnpacked(tls, db, int32((*(*TPreUpdate)(unsafe.Pointer(bp))).Fkeyinfo.FnKeyField)+int32(1), (*(*TPreUpdate)(unsafe.Pointer(bp))).FpNewUnpacked)
if (*(*TPreUpdate)(unsafe.Pointer(bp))).FaNew != 0 {
i = 0
for {
if !(i < int32((*TVdbeCursor)(unsafe.Pointer(pCsr)).FnField)) {
break
}
_sqlite3VdbeMemRelease(tls, (*(*TPreUpdate)(unsafe.Pointer(bp))).FaNew+uintptr(i)*56)
goto _2
_2:
;
i++
}
_sqlite3DbNNFreeNN(tls, db, (*(*TPreUpdate)(unsafe.Pointer(bp))).FaNew)
}
}
var _fakeSortOrder Tu8
/************** End of vdbeaux.c *********************************************/
/************** Begin file vdbeapi.c *****************************************/
/*
** 2004 May 26
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains code use to implement APIs that are part of the
** VDBE.
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
/* #include "opcodes.h" */
// C documentation
//
// /*
// ** Return TRUE (non-zero) of the statement supplied as an argument needs
// ** to be recompiled. A statement needs to be recompiled whenever the
// ** execution environment changes in a way that would alter the program
// ** that sqlite3_prepare() generates. For example, if new functions or
// ** collating sequences are registered or if an authorizer function is
// ** added or changed.
// */
func Xsqlite3_expired(tls *libc.TLS, pStmt uintptr) (r int32) {
var p uintptr
_ = p
p = pStmt
return libc.BoolInt32(p == uintptr(0) || int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x3>>0)) != 0)
}
// C documentation
//
// /*
// ** Check on a Vdbe to make sure it has not been finalized. Log
// ** an error and return true if it has been finalized (or is otherwise
// ** invalid). Return false if it is ok.
// */
func _vdbeSafety(tls *libc.TLS, p uintptr) (r int32) {
if (*TVdbe)(unsafe.Pointer(p)).Fdb == uintptr(0) {
Xsqlite3_log(tls, int32(SQLITE_MISUSE), __ccgo_ts+5340, 0)
return int32(1)
} else {
return 0
}
return r
}
func _vdbeSafetyNotNull(tls *libc.TLS, p uintptr) (r int32) {
if p == uintptr(0) {
Xsqlite3_log(tls, int32(SQLITE_MISUSE), __ccgo_ts+5385, 0)
return int32(1)
} else {
return _vdbeSafety(tls, p)
}
return r
}
// C documentation
//
// /*
// ** Invoke the profile callback. This routine is only called if we already
// ** know that the profile callback is defined and needs to be invoked.
// */
func _invokeProfileCallback(tls *libc.TLS, db uintptr, p uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* iElapse at bp+8 */ Tsqlite3_int64
var _ /* iNow at bp+0 */ Tsqlite3_int64
_sqlite3OsCurrentTimeInt64(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpVfs, bp)
*(*Tsqlite3_int64)(unsafe.Pointer(bp + 8)) = (*(*Tsqlite3_int64)(unsafe.Pointer(bp)) - (*TVdbe)(unsafe.Pointer(p)).FstartTime) * int64(1000000)
if (*Tsqlite3)(unsafe.Pointer(db)).FxProfile != 0 {
(*(*func(*libc.TLS, uintptr, uintptr, Tu64))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxProfile})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpProfileArg, (*TVdbe)(unsafe.Pointer(p)).FzSql, uint64(*(*Tsqlite3_int64)(unsafe.Pointer(bp + 8))))
}
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmTrace)&int32(SQLITE_TRACE_PROFILE) != 0 {
(*(*func(*libc.TLS, Tu32, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{*(*uintptr)(unsafe.Pointer(&(*Tsqlite3)(unsafe.Pointer(db)).Ftrace))})))(tls, uint32(SQLITE_TRACE_PROFILE), (*Tsqlite3)(unsafe.Pointer(db)).FpTraceArg, p, bp+8)
}
(*TVdbe)(unsafe.Pointer(p)).FstartTime = 0
}
/*
** The checkProfileCallback(DB,P) macro checks to see if a profile callback
** is needed, and it invokes the callback if it is needed.
*/
// C documentation
//
// /*
// ** The following routine destroys a virtual machine that is created by
// ** the sqlite3_compile() routine. The integer returned is an SQLITE_
// ** success/failure code that describes the result of executing the virtual
// ** machine.
// **
// ** This routine sets the error code and string returned by
// ** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
// */
func Xsqlite3_finalize(tls *libc.TLS, pStmt uintptr) (r int32) {
var db, v uintptr
var rc int32
_, _, _ = db, rc, v
if pStmt == uintptr(0) {
/* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL
** pointer is a harmless no-op. */
rc = SQLITE_OK
} else {
v = pStmt
db = (*TVdbe)(unsafe.Pointer(v)).Fdb
if _vdbeSafety(tls, v) != 0 {
return _sqlite3MisuseError(tls, int32(90035))
}
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
if (*TVdbe)(unsafe.Pointer(v)).FstartTime > 0 {
_invokeProfileCallback(tls, db, v)
}
rc = _sqlite3VdbeReset(tls, v)
_sqlite3VdbeDelete(tls, v)
rc = _sqlite3ApiExit(tls, db, rc)
_sqlite3LeaveMutexAndCloseZombie(tls, db)
}
return rc
}
// C documentation
//
// /*
// ** Terminate the current execution of an SQL statement and reset it
// ** back to its starting state so that it can be reused. A success code from
// ** the prior execution is returned.
// **
// ** This routine sets the error code and string returned by
// ** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
// */
func Xsqlite3_reset(tls *libc.TLS, pStmt uintptr) (r int32) {
var db, v uintptr
var rc int32
_, _, _ = db, rc, v
if pStmt == uintptr(0) {
rc = SQLITE_OK
} else {
v = pStmt
db = (*TVdbe)(unsafe.Pointer(v)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
if (*TVdbe)(unsafe.Pointer(v)).FstartTime > 0 {
_invokeProfileCallback(tls, db, v)
}
rc = _sqlite3VdbeReset(tls, v)
_sqlite3VdbeRewind(tls, v)
rc = _sqlite3ApiExit(tls, db, rc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
}
return rc
}
// C documentation
//
// /*
// ** Set all the parameters in the compiled SQL statement to NULL.
// */
func Xsqlite3_clear_bindings(tls *libc.TLS, pStmt uintptr) (r int32) {
var i, rc int32
var mutex, p uintptr
_, _, _, _ = i, mutex, p, rc
rc = SQLITE_OK
p = pStmt
mutex = (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex
Xsqlite3_mutex_enter(tls, mutex)
i = 0
for {
if !(i < int32((*TVdbe)(unsafe.Pointer(p)).FnVar)) {
break
}
_sqlite3VdbeMemRelease(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar+uintptr(i)*56)
(*(*TMem)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaVar + uintptr(i)*56))).Fflags = uint16(MEM_Null)
goto _1
_1:
;
i++
}
if (*TVdbe)(unsafe.Pointer(p)).Fexpmask != 0 {
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 0, 0x3)
}
Xsqlite3_mutex_leave(tls, mutex)
return rc
}
// C documentation
//
// /**************************** sqlite3_value_ *******************************
// ** The following routines extract information from a Mem or sqlite3_value
// ** structure.
// */
func Xsqlite3_value_blob(tls *libc.TLS, pVal uintptr) (r uintptr) {
var p, v3, p2 uintptr
var v1 int32
_, _, _, _ = p, v1, v3, p2
p = pVal
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Blob)|libc.Int32FromInt32(MEM_Str)) != 0 {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Zero) != 0 {
v1 = _sqlite3VdbeMemExpandBlob(tls, p)
} else {
v1 = 0
}
if v1 != SQLITE_OK {
return uintptr(0)
}
p2 = p + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Blob))
if (*TMem)(unsafe.Pointer(p)).Fn != 0 {
v3 = (*TMem)(unsafe.Pointer(p)).Fz
} else {
v3 = uintptr(0)
}
return v3
} else {
return Xsqlite3_value_text(tls, pVal)
}
return r
}
func Xsqlite3_value_bytes(tls *libc.TLS, pVal uintptr) (r int32) {
return _sqlite3ValueBytes(tls, pVal, uint8(SQLITE_UTF8))
}
func Xsqlite3_value_bytes16(tls *libc.TLS, pVal uintptr) (r int32) {
return _sqlite3ValueBytes(tls, pVal, uint8(SQLITE_UTF16LE))
}
func Xsqlite3_value_double(tls *libc.TLS, pVal uintptr) (r float64) {
return _sqlite3VdbeRealValue(tls, pVal)
}
func Xsqlite3_value_int(tls *libc.TLS, pVal uintptr) (r int32) {
return int32(_sqlite3VdbeIntValue(tls, pVal))
}
func Xsqlite3_value_int64(tls *libc.TLS, pVal uintptr) (r Tsqlite_int64) {
return _sqlite3VdbeIntValue(tls, pVal)
}
func Xsqlite3_value_subtype(tls *libc.TLS, pVal uintptr) (r uint32) {
var pMem uintptr
var v1 int32
_, _ = pMem, v1
pMem = pVal
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Subtype) != 0 {
v1 = int32((*TMem)(unsafe.Pointer(pMem)).FeSubtype)
} else {
v1 = 0
}
return uint32(v1)
}
func Xsqlite3_value_pointer(tls *libc.TLS, pVal uintptr, zPType uintptr) (r uintptr) {
var p uintptr
_ = p
p = pVal
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Term)|libc.Int32FromInt32(MEM_Subtype)) == libc.Int32FromInt32(MEM_Null)|libc.Int32FromInt32(MEM_Term)|libc.Int32FromInt32(MEM_Subtype) && zPType != uintptr(0) && int32((*TMem)(unsafe.Pointer(p)).FeSubtype) == int32('p') && libc.Xstrcmp(tls, *(*uintptr)(unsafe.Pointer(p)), zPType) == 0 {
return (*TMem)(unsafe.Pointer(p)).Fz
} else {
return uintptr(0)
}
return r
}
func Xsqlite3_value_text(tls *libc.TLS, pVal uintptr) (r uintptr) {
return _sqlite3ValueText(tls, pVal, uint8(SQLITE_UTF8))
}
func Xsqlite3_value_text16(tls *libc.TLS, pVal uintptr) (r uintptr) {
return _sqlite3ValueText(tls, pVal, uint8(SQLITE_UTF16LE))
}
func Xsqlite3_value_text16be(tls *libc.TLS, pVal uintptr) (r uintptr) {
return _sqlite3ValueText(tls, pVal, uint8(SQLITE_UTF16BE))
}
func Xsqlite3_value_text16le(tls *libc.TLS, pVal uintptr) (r uintptr) {
return _sqlite3ValueText(tls, pVal, uint8(SQLITE_UTF16LE))
}
// C documentation
//
// /* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five
// ** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating
// ** point number string BLOB NULL
// */
func Xsqlite3_value_type(tls *libc.TLS, pVal uintptr) (r int32) {
return int32(_aType[int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_AffMask)])
}
var _aType = [64]Tu8{
0: uint8(SQLITE_BLOB),
1: uint8(SQLITE_NULL),
2: uint8(SQLITE_TEXT),
3: uint8(SQLITE_NULL),
4: uint8(SQLITE_INTEGER),
5: uint8(SQLITE_NULL),
6: uint8(SQLITE_INTEGER),
7: uint8(SQLITE_NULL),
8: uint8(SQLITE_FLOAT),
9: uint8(SQLITE_NULL),
10: uint8(SQLITE_FLOAT),
11: uint8(SQLITE_NULL),
12: uint8(SQLITE_INTEGER),
13: uint8(SQLITE_NULL),
14: uint8(SQLITE_INTEGER),
15: uint8(SQLITE_NULL),
16: uint8(SQLITE_BLOB),
17: uint8(SQLITE_NULL),
18: uint8(SQLITE_TEXT),
19: uint8(SQLITE_NULL),
20: uint8(SQLITE_INTEGER),
21: uint8(SQLITE_NULL),
22: uint8(SQLITE_INTEGER),
23: uint8(SQLITE_NULL),
24: uint8(SQLITE_FLOAT),
25: uint8(SQLITE_NULL),
26: uint8(SQLITE_FLOAT),
27: uint8(SQLITE_NULL),
28: uint8(SQLITE_INTEGER),
29: uint8(SQLITE_NULL),
30: uint8(SQLITE_INTEGER),
31: uint8(SQLITE_NULL),
32: uint8(SQLITE_FLOAT),
33: uint8(SQLITE_NULL),
34: uint8(SQLITE_FLOAT),
35: uint8(SQLITE_NULL),
36: uint8(SQLITE_FLOAT),
37: uint8(SQLITE_NULL),
38: uint8(SQLITE_FLOAT),
39: uint8(SQLITE_NULL),
40: uint8(SQLITE_FLOAT),
41: uint8(SQLITE_NULL),
42: uint8(SQLITE_FLOAT),
43: uint8(SQLITE_NULL),
44: uint8(SQLITE_FLOAT),
45: uint8(SQLITE_NULL),
46: uint8(SQLITE_FLOAT),
47: uint8(SQLITE_NULL),
48: uint8(SQLITE_BLOB),
49: uint8(SQLITE_NULL),
50: uint8(SQLITE_TEXT),
51: uint8(SQLITE_NULL),
52: uint8(SQLITE_FLOAT),
53: uint8(SQLITE_NULL),
54: uint8(SQLITE_FLOAT),
55: uint8(SQLITE_NULL),
56: uint8(SQLITE_FLOAT),
57: uint8(SQLITE_NULL),
58: uint8(SQLITE_FLOAT),
59: uint8(SQLITE_NULL),
60: uint8(SQLITE_FLOAT),
61: uint8(SQLITE_NULL),
62: uint8(SQLITE_FLOAT),
63: uint8(SQLITE_NULL),
}
func Xsqlite3_value_encoding(tls *libc.TLS, pVal uintptr) (r int32) {
return int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fenc)
}
// C documentation
//
// /* Return true if a parameter to xUpdate represents an unchanged column */
func Xsqlite3_value_nochange(tls *libc.TLS, pVal uintptr) (r int32) {
return libc.BoolInt32(int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&(libc.Int32FromInt32(MEM_Null)|libc.Int32FromInt32(MEM_Zero)) == libc.Int32FromInt32(MEM_Null)|libc.Int32FromInt32(MEM_Zero))
}
// C documentation
//
// /* Return true if a parameter value originated from an sqlite3_bind() */
func Xsqlite3_value_frombind(tls *libc.TLS, pVal uintptr) (r int32) {
return libc.BoolInt32(int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_FromBind) != 0)
}
// C documentation
//
// /* Make a copy of an sqlite3_value object
// */
func Xsqlite3_value_dup(tls *libc.TLS, pOrig uintptr) (r uintptr) {
var pNew, p1, p2, p3, p4 uintptr
_, _, _, _, _ = pNew, p1, p2, p3, p4
if pOrig == uintptr(0) {
return uintptr(0)
}
pNew = Xsqlite3_malloc(tls, int32(56))
if pNew == uintptr(0) {
return uintptr(0)
}
libc.Xmemset(tls, pNew, 0, uint64(56))
libc.Xmemcpy(tls, pNew, pOrig, uint64(libc.UintptrFromInt32(0)+24))
p1 = pNew + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(MEM_Dyn))
(*Tsqlite3_value)(unsafe.Pointer(pNew)).Fdb = uintptr(0)
if int32((*Tsqlite3_value)(unsafe.Pointer(pNew)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
p2 = pNew + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) & ^(libc.Int32FromInt32(MEM_Static) | libc.Int32FromInt32(MEM_Dyn)))
p3 = pNew + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(MEM_Ephem))
if _sqlite3VdbeMemMakeWriteable(tls, pNew) != SQLITE_OK {
_sqlite3ValueFree(tls, pNew)
pNew = uintptr(0)
}
} else {
if int32((*Tsqlite3_value)(unsafe.Pointer(pNew)).Fflags)&int32(MEM_Null) != 0 {
/* Do not duplicate pointer values */
p4 = pNew + 20
*(*Tu16)(unsafe.Pointer(p4)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p4))) & ^(libc.Int32FromInt32(MEM_Term) | libc.Int32FromInt32(MEM_Subtype)))
}
}
return pNew
}
// C documentation
//
// /* Destroy an sqlite3_value object previously obtained from
// ** sqlite3_value_dup().
// */
func Xsqlite3_value_free(tls *libc.TLS, pOld uintptr) {
_sqlite3ValueFree(tls, pOld)
}
// C documentation
//
// /**************************** sqlite3_result_ *******************************
// ** The following routines are used by user-defined functions to specify
// ** the function result.
// **
// ** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the
// ** result as a string or blob. Appropriate errors are set if the string/blob
// ** is too big or if an OOM occurs.
// **
// ** The invokeValueDestructor(P,X) routine invokes destructor function X()
// ** on value P if P is not going to be used and need to be destroyed.
// */
func _setResultStrOrError(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, enc Tu8, xDel uintptr) {
var pOut uintptr
var rc int32
_, _ = pOut, rc
pOut = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut
rc = _sqlite3VdbeMemSetStr(tls, pOut, z, int64(n), enc, xDel)
if rc != 0 {
if rc == int32(SQLITE_TOOBIG) {
Xsqlite3_result_error_toobig(tls, pCtx)
} else {
/* The only errors possible from sqlite3VdbeMemSetStr are
** SQLITE_TOOBIG and SQLITE_NOMEM */
Xsqlite3_result_error_nomem(tls, pCtx)
}
return
}
_sqlite3VdbeChangeEncoding(tls, pOut, int32((*Tsqlite3_context)(unsafe.Pointer(pCtx)).Fenc))
if _sqlite3VdbeMemTooBig(tls, pOut) != 0 {
Xsqlite3_result_error_toobig(tls, pCtx)
}
}
func _invokeValueDestructor(tls *libc.TLS, p uintptr, xDel uintptr, pCtx uintptr) (r int32) {
if xDel == uintptr(0) {
/* noop */
} else {
if xDel == uintptr(-libc.Int32FromInt32(1)) {
/* noop */
} else {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{xDel})))(tls, p)
}
}
Xsqlite3_result_error_toobig(tls, pCtx)
return int32(SQLITE_TOOBIG)
}
func Xsqlite3_result_blob(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, xDel uintptr) {
_setResultStrOrError(tls, pCtx, z, n, uint8(0), xDel)
}
func Xsqlite3_result_blob64(tls *libc.TLS, pCtx uintptr, z uintptr, n Tsqlite3_uint64, xDel uintptr) {
if n > uint64(0x7fffffff) {
_invokeValueDestructor(tls, z, xDel, pCtx)
} else {
_setResultStrOrError(tls, pCtx, z, int32(n), uint8(0), xDel)
}
}
func Xsqlite3_result_double(tls *libc.TLS, pCtx uintptr, rVal float64) {
_sqlite3VdbeMemSetDouble(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, rVal)
}
func Xsqlite3_result_error(tls *libc.TLS, pCtx uintptr, z uintptr, n int32) {
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = int32(SQLITE_ERROR)
_sqlite3VdbeMemSetStr(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, z, int64(n), uint8(SQLITE_UTF8), uintptr(-libc.Int32FromInt32(1)))
}
func Xsqlite3_result_error16(tls *libc.TLS, pCtx uintptr, z uintptr, n int32) {
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = int32(SQLITE_ERROR)
_sqlite3VdbeMemSetStr(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, z, int64(n), uint8(SQLITE_UTF16LE), uintptr(-libc.Int32FromInt32(1)))
}
func Xsqlite3_result_int(tls *libc.TLS, pCtx uintptr, iVal int32) {
_sqlite3VdbeMemSetInt64(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, int64(iVal))
}
func Xsqlite3_result_int64(tls *libc.TLS, pCtx uintptr, iVal Ti64) {
_sqlite3VdbeMemSetInt64(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, iVal)
}
func Xsqlite3_result_null(tls *libc.TLS, pCtx uintptr) {
_sqlite3VdbeMemSetNull(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)
}
func Xsqlite3_result_pointer(tls *libc.TLS, pCtx uintptr, pPtr uintptr, zPType uintptr, xDestructor uintptr) {
var pOut uintptr
_ = pOut
pOut = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut
_sqlite3VdbeMemRelease(tls, pOut)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Null)
_sqlite3VdbeMemSetPointer(tls, pOut, pPtr, zPType, xDestructor)
}
func Xsqlite3_result_subtype(tls *libc.TLS, pCtx uintptr, eSubtype uint32) {
var pOut, p1 uintptr
_, _ = pOut, p1
pOut = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut
(*TMem)(unsafe.Pointer(pOut)).FeSubtype = uint8(eSubtype & uint32(0xff))
p1 = pOut + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(MEM_Subtype))
}
func Xsqlite3_result_text(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, xDel uintptr) {
_setResultStrOrError(tls, pCtx, z, n, uint8(SQLITE_UTF8), xDel)
}
func Xsqlite3_result_text64(tls *libc.TLS, pCtx uintptr, z uintptr, n Tsqlite3_uint64, xDel uintptr, enc uint8) {
if int32(enc) != int32(SQLITE_UTF8) {
if int32(enc) == int32(SQLITE_UTF16) {
enc = uint8(SQLITE_UTF16LE)
}
n &= ^libc.Uint64FromInt32(1)
}
if n > uint64(0x7fffffff) {
_invokeValueDestructor(tls, z, xDel, pCtx)
} else {
_setResultStrOrError(tls, pCtx, z, int32(n), enc, xDel)
_sqlite3VdbeMemZeroTerminateIfAble(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)
}
}
func Xsqlite3_result_text16(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, xDel uintptr) {
_setResultStrOrError(tls, pCtx, z, int32(uint64(n) & ^libc.Uint64FromInt32(1)), uint8(SQLITE_UTF16LE), xDel)
}
func Xsqlite3_result_text16be(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, xDel uintptr) {
_setResultStrOrError(tls, pCtx, z, int32(uint64(n) & ^libc.Uint64FromInt32(1)), uint8(SQLITE_UTF16BE), xDel)
}
func Xsqlite3_result_text16le(tls *libc.TLS, pCtx uintptr, z uintptr, n int32, xDel uintptr) {
_setResultStrOrError(tls, pCtx, z, int32(uint64(n) & ^libc.Uint64FromInt32(1)), uint8(SQLITE_UTF16LE), xDel)
}
func Xsqlite3_result_value(tls *libc.TLS, pCtx uintptr, pValue uintptr) {
var pOut uintptr
_ = pOut
pOut = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut
_sqlite3VdbeMemCopy(tls, pOut, pValue)
_sqlite3VdbeChangeEncoding(tls, pOut, int32((*Tsqlite3_context)(unsafe.Pointer(pCtx)).Fenc))
if _sqlite3VdbeMemTooBig(tls, pOut) != 0 {
Xsqlite3_result_error_toobig(tls, pCtx)
}
}
func Xsqlite3_result_zeroblob(tls *libc.TLS, pCtx uintptr, n int32) {
var v1 int32
_ = v1
if n > 0 {
v1 = n
} else {
v1 = 0
}
Xsqlite3_result_zeroblob64(tls, pCtx, uint64(v1))
}
func Xsqlite3_result_zeroblob64(tls *libc.TLS, pCtx uintptr, n Tu64) (r int32) {
var pOut uintptr
_ = pOut
pOut = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut
if n > uint64(*(*int32)(unsafe.Pointer((*TMem)(unsafe.Pointer(pOut)).Fdb + 136))) {
Xsqlite3_result_error_toobig(tls, pCtx)
return int32(SQLITE_TOOBIG)
}
_sqlite3VdbeMemSetZeroBlob(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, int32(n))
return SQLITE_OK
}
func Xsqlite3_result_error_code(tls *libc.TLS, pCtx uintptr, errCode int32) {
var v1 int32
_ = v1
if errCode != 0 {
v1 = errCode
} else {
v1 = -int32(1)
}
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = v1
if int32((*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)).Fflags)&int32(MEM_Null) != 0 {
_setResultStrOrError(tls, pCtx, _sqlite3ErrStr(tls, errCode), -int32(1), uint8(SQLITE_UTF8), libc.UintptrFromInt32(0))
}
}
// C documentation
//
// /* Force an SQLITE_TOOBIG error. */
func Xsqlite3_result_error_toobig(tls *libc.TLS, pCtx uintptr) {
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = int32(SQLITE_TOOBIG)
_sqlite3VdbeMemSetStr(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut, __ccgo_ts+5425, int64(-int32(1)), uint8(SQLITE_UTF8), libc.UintptrFromInt32(0))
}
// C documentation
//
// /* An SQLITE_NOMEM error. */
func Xsqlite3_result_error_nomem(tls *libc.TLS, pCtx uintptr) {
_sqlite3VdbeMemSetNull(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = int32(SQLITE_NOMEM)
_sqlite3OomFault(tls, (*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)).Fdb)
}
// C documentation
//
// /* Force the INT64 value currently stored as the result to be
// ** a MEM_IntReal value. See the SQLITE_TESTCTRL_RESULT_INTREAL
// ** test-control.
// */
func _sqlite3ResultIntReal(tls *libc.TLS, pCtx uintptr) {
var p1, p2 uintptr
_, _ = p1, p2
if int32((*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut)).Fflags)&int32(MEM_Int) != 0 {
p1 = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(MEM_Int))
p2 = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpOut + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_IntReal))
}
}
// C documentation
//
// /*
// ** This function is called after a transaction has been committed. It
// ** invokes callbacks registered with sqlite3_wal_hook() as required.
// */
func _doWalCallbacks(tls *libc.TLS, db uintptr) (r int32) {
var i, nEntry, rc int32
var pBt uintptr
_, _, _, _ = i, nEntry, pBt, rc
rc = SQLITE_OK
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpBt
if pBt != 0 {
_sqlite3BtreeEnter(tls, pBt)
nEntry = _sqlite3PagerWalCallback(tls, _sqlite3BtreePager(tls, pBt))
_sqlite3BtreeLeave(tls, pBt)
if nEntry > 0 && (*Tsqlite3)(unsafe.Pointer(db)).FxWalCallback != 0 && rc == SQLITE_OK {
rc = (*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxWalCallback})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpWalArg, db, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FzDbSName, nEntry)
}
}
goto _1
_1:
;
i++
}
return rc
}
// C documentation
//
// /*
// ** Execute the statement pStmt, either until a row of data is ready, the
// ** statement is completely executed or an error occurs.
// **
// ** This routine implements the bulk of the logic behind the sqlite_step()
// ** API. The only thing omitted is the automatic recompile if a
// ** schema change has occurred. That detail is handled by the
// ** outer sqlite3_step() wrapper procedure.
// */
func _sqlite3Step(tls *libc.TLS, p uintptr) (r int32) {
var db uintptr
var rc int32
_, _ = db, rc
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) != int32(VDBE_RUN_STATE) {
goto restart_step
restart_step:
;
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) == int32(VDBE_READY_STATE) {
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x3>>0)) != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = int32(SQLITE_SCHEMA)
rc = int32(SQLITE_ERROR)
if int32((*TVdbe)(unsafe.Pointer(p)).FprepFlags)&int32(SQLITE_PREPARE_SAVESQL) != 0 {
/* If this statement was prepared using saved SQL and an
** error has occurred, then return the error code in p->rc to the
** caller. Set the error code in the database handle to the same
** value.
*/
rc = _sqlite3VdbeTransferError(tls, p)
}
goto end_of_step
}
/* If there are no other statements currently running, then
** reset the interrupt flag. This prevents a call to sqlite3_interrupt
** from interrupting a statement that has not yet started.
*/
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeActive == 0 {
libc.AtomicStorePInt32(db+432, libc.Int32FromInt32(0))
}
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmTrace)&(libc.Int32FromInt32(SQLITE_TRACE_PROFILE)|libc.Int32FromInt32(SQLITE_TRACE_XPROFILE)) != 0 && !((*Tsqlite3)(unsafe.Pointer(db)).Finit1.Fbusy != 0) && (*TVdbe)(unsafe.Pointer(p)).FzSql != 0 {
_sqlite3OsCurrentTimeInt64(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpVfs, p+184)
} else {
}
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeActive++
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x40>>6)) == 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeWrite++
}
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x80>>7)) != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead++
}
(*TVdbe)(unsafe.Pointer(p)).Fpc = 0
(*TVdbe)(unsafe.Pointer(p)).FeVdbeState = uint8(VDBE_RUN_STATE)
} else {
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) == int32(VDBE_HALT_STATE) {
/* We used to require that sqlite3_reset() be called before retrying
** sqlite3_step() after any error or after SQLITE_DONE. But beginning
** with version 3.7.0, we changed this so that sqlite3_reset() would
** be called automatically instead of throwing the SQLITE_MISUSE error.
** This "automatic-reset" change is not technically an incompatibility,
** since any application that receives an SQLITE_MISUSE is broken by
** definition.
**
** Nevertheless, some published applications that were originally written
** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE
** returns, and those were broken by the automatic-reset change. As a
** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the
** legacy behavior of returning SQLITE_MISUSE for cases where the
** previous sqlite3_step() returned something other than a SQLITE_LOCKED
** or SQLITE_BUSY error.
*/
Xsqlite3_reset(tls, p)
goto restart_step
}
}
}
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) != 0 {
rc = _sqlite3VdbeList(tls, p)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec++
rc = _sqlite3VdbeExec(tls, p)
(*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec--
}
if rc == int32(SQLITE_ROW) {
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = int32(SQLITE_ROW)
return int32(SQLITE_ROW)
} else {
/* If the statement completed successfully, invoke the profile callback */
if (*TVdbe)(unsafe.Pointer(p)).FstartTime > 0 {
_invokeProfileCallback(tls, db, p)
}
(*TVdbe)(unsafe.Pointer(p)).FpResultRow = uintptr(0)
if rc == int32(SQLITE_DONE) && (*Tsqlite3)(unsafe.Pointer(db)).FautoCommit != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = _doWalCallbacks(tls, db)
if (*TVdbe)(unsafe.Pointer(p)).Frc != SQLITE_OK {
rc = int32(SQLITE_ERROR)
}
} else {
if rc != int32(SQLITE_DONE) && int32((*TVdbe)(unsafe.Pointer(p)).FprepFlags)&int32(SQLITE_PREPARE_SAVESQL) != 0 {
/* If this statement was prepared using saved SQL and an
** error has occurred, then return the error code in p->rc to the
** caller. Set the error code in the database handle to the same value.
*/
rc = _sqlite3VdbeTransferError(tls, p)
}
}
}
(*Tsqlite3)(unsafe.Pointer(db)).FerrCode = rc
if int32(SQLITE_NOMEM) == _sqlite3ApiExit(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, (*TVdbe)(unsafe.Pointer(p)).Frc) {
(*TVdbe)(unsafe.Pointer(p)).Frc = int32(SQLITE_NOMEM)
if int32((*TVdbe)(unsafe.Pointer(p)).FprepFlags)&int32(SQLITE_PREPARE_SAVESQL) != 0 {
rc = (*TVdbe)(unsafe.Pointer(p)).Frc
}
}
goto end_of_step
end_of_step:
;
/* There are only a limited number of result codes allowed from the
** statements prepared using the legacy sqlite3_prepare() interface */
return rc & (*Tsqlite3)(unsafe.Pointer(db)).FerrMask
}
// C documentation
//
// /*
// ** This is the top-level implementation of sqlite3_step(). Call
// ** sqlite3Step() to do most of the work. If a schema error occurs,
// ** call sqlite3Reprepare() and try again.
// */
func Xsqlite3_step(tls *libc.TLS, pStmt uintptr) (r int32) {
var cnt, rc, savedPc, v1, v2, v4, v5 int32
var db, v, zErr uintptr
var v3 bool
_, _, _, _, _, _, _, _, _, _, _ = cnt, db, rc, savedPc, v, zErr, v1, v2, v3, v4, v5
rc = SQLITE_OK /* Result from sqlite3Step() */
v = pStmt /* the prepared statement */
cnt = 0 /* The database connection */
if _vdbeSafetyNotNull(tls, v) != 0 {
return _sqlite3MisuseError(tls, int32(90829))
}
db = (*TVdbe)(unsafe.Pointer(v)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
for {
v1 = _sqlite3Step(tls, v)
rc = v1
if v3 = v1 == int32(SQLITE_SCHEMA); v3 {
v2 = cnt
cnt++
}
if !(v3 && v2 < int32(SQLITE_MAX_SCHEMA_RETRY)) {
break
}
savedPc = (*TVdbe)(unsafe.Pointer(v)).Fpc
rc = _sqlite3Reprepare(tls, v)
if rc != SQLITE_OK {
/* This case occurs after failing to recompile an sql statement.
** The error message from the SQL compiler has already been loaded
** into the database handle. This block copies the error message
** from the database handle into the statement and sets the statement
** program counter to 0 to ensure that when the statement is
** finalized or reset the parser error message is available via
** sqlite3_errmsg() and sqlite3_errcode().
*/
zErr = Xsqlite3_value_text(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpErr)
_sqlite3DbFree(tls, db, (*TVdbe)(unsafe.Pointer(v)).FzErrMsg)
if !((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) {
(*TVdbe)(unsafe.Pointer(v)).FzErrMsg = _sqlite3DbStrDup(tls, db, zErr)
v4 = _sqlite3ApiExit(tls, db, rc)
rc = v4
(*TVdbe)(unsafe.Pointer(v)).Frc = v4
} else {
(*TVdbe)(unsafe.Pointer(v)).FzErrMsg = uintptr(0)
v5 = libc.Int32FromInt32(SQLITE_NOMEM)
rc = v5
(*TVdbe)(unsafe.Pointer(v)).Frc = v5
}
break
}
Xsqlite3_reset(tls, pStmt)
if savedPc >= 0 {
/* Setting minWriteFileFormat to 254 is a signal to the OP_Init and
** OP_Trace opcodes to *not* perform SQLITE_TRACE_STMT because it has
** already been done once on a prior invocation that failed due to
** SQLITE_SCHEMA. tag-20220401a */
(*TVdbe)(unsafe.Pointer(v)).FminWriteFileFormat = uint8(254)
}
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
// C documentation
//
// /*
// ** Extract the user data from a sqlite3_context structure and return a
// ** pointer to it.
// */
func Xsqlite3_user_data(tls *libc.TLS, p uintptr) (r uintptr) {
return (*TFuncDef)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpFunc)).FpUserData
}
// C documentation
//
// /*
// ** Extract the user data from a sqlite3_context structure and return a
// ** pointer to it.
// **
// ** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface
// ** returns a copy of the pointer to the database connection (the 1st
// ** parameter) of the sqlite3_create_function() and
// ** sqlite3_create_function16() routines that originally registered the
// ** application defined function.
// */
func Xsqlite3_context_db_handle(tls *libc.TLS, p uintptr) (r uintptr) {
return (*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpOut)).Fdb
}
// C documentation
//
// /*
// ** If this routine is invoked from within an xColumn method of a virtual
// ** table, then it returns true if and only if the the call is during an
// ** UPDATE operation and the value of the column will not be modified
// ** by the UPDATE.
// **
// ** If this routine is called from any context other than within the
// ** xColumn method of a virtual table, then the return value is meaningless
// ** and arbitrary.
// **
// ** Virtual table implements might use this routine to optimize their
// ** performance by substituting a NULL result, or some other light-weight
// ** value, as a signal to the xUpdate routine that the column is unchanged.
// */
func Xsqlite3_vtab_nochange(tls *libc.TLS, p uintptr) (r int32) {
return Xsqlite3_value_nochange(tls, (*Tsqlite3_context)(unsafe.Pointer(p)).FpOut)
}
// C documentation
//
// /*
// ** The destructor function for a ValueList object. This needs to be
// ** a separate function, unknowable to the application, to ensure that
// ** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not
// ** preceded by activation of IN processing via sqlite3_vtab_int() do not
// ** try to access a fake ValueList object inserted by a hostile extension.
// */
func _sqlite3VdbeValueListFree(tls *libc.TLS, pToDelete uintptr) {
Xsqlite3_free(tls, pToDelete)
}
// C documentation
//
// /*
// ** Implementation of sqlite3_vtab_in_first() (if bNext==0) and
// ** sqlite3_vtab_in_next() (if bNext!=0).
// */
func _valueFromValueList(tls *libc.TLS, pVal uintptr, ppOut uintptr, bNext int32) (r int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var iOff, rc, v1 int32
var pOut, pRhs, zBuf uintptr
var sz Tu32
var _ /* dummy at bp+0 */ int32
var _ /* iSerial at bp+64 */ Tu32
var _ /* sMem at bp+8 */ TMem
_, _, _, _, _, _, _ = iOff, pOut, pRhs, rc, sz, zBuf, v1
*(*uintptr)(unsafe.Pointer(ppOut)) = uintptr(0)
if pVal == uintptr(0) {
return _sqlite3MisuseError(tls, int32(90950))
}
if int32((*Tsqlite3_value)(unsafe.Pointer(pVal)).Fflags)&int32(MEM_Dyn) == 0 || (*Tsqlite3_value)(unsafe.Pointer(pVal)).FxDel != __ccgo_fp(_sqlite3VdbeValueListFree) {
return int32(SQLITE_ERROR)
} else {
pRhs = (*Tsqlite3_value)(unsafe.Pointer(pVal)).Fz
}
if bNext != 0 {
rc = _sqlite3BtreeNext(tls, (*TValueList)(unsafe.Pointer(pRhs)).FpCsr, 0)
} else {
*(*int32)(unsafe.Pointer(bp)) = 0
rc = _sqlite3BtreeFirst(tls, (*TValueList)(unsafe.Pointer(pRhs)).FpCsr, bp)
if _sqlite3BtreeEof(tls, (*TValueList)(unsafe.Pointer(pRhs)).FpCsr) != 0 {
rc = int32(SQLITE_DONE)
}
}
if rc == SQLITE_OK { /* Raw content of current row */
libc.Xmemset(tls, bp+8, 0, uint64(56))
sz = _sqlite3BtreePayloadSize(tls, (*TValueList)(unsafe.Pointer(pRhs)).FpCsr)
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, (*TValueList)(unsafe.Pointer(pRhs)).FpCsr, uint32(int32(sz)), bp+8)
if rc == SQLITE_OK {
zBuf = (*(*TMem)(unsafe.Pointer(bp + 8))).Fz
pOut = (*TValueList)(unsafe.Pointer(pRhs)).FpOut
if int32(*(*Tu8)(unsafe.Pointer(zBuf + 1))) < int32(libc.Uint8FromInt32(0x80)) {
*(*Tu32)(unsafe.Pointer(bp + 64)) = uint32(*(*Tu8)(unsafe.Pointer(zBuf + 1)))
v1 = libc.Int32FromInt32(1)
} else {
v1 = int32(_sqlite3GetVarint32(tls, zBuf+1, bp+64))
}
iOff = int32(1) + int32(uint8(v1))
_sqlite3VdbeSerialGet(tls, zBuf+uintptr(iOff), *(*Tu32)(unsafe.Pointer(bp + 64)), pOut)
(*Tsqlite3_value)(unsafe.Pointer(pOut)).Fenc = (*Tsqlite3)(unsafe.Pointer((*Tsqlite3_value)(unsafe.Pointer(pOut)).Fdb)).Fenc
if int32((*Tsqlite3_value)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Ephem) != 0 && _sqlite3VdbeMemMakeWriteable(tls, pOut) != 0 {
rc = int32(SQLITE_NOMEM)
} else {
*(*uintptr)(unsafe.Pointer(ppOut)) = pOut
}
}
_sqlite3VdbeMemRelease(tls, bp+8)
}
return rc
}
// C documentation
//
// /*
// ** Set the iterator value pVal to point to the first value in the set.
// ** Set (*ppOut) to point to this value before returning.
// */
func Xsqlite3_vtab_in_first(tls *libc.TLS, pVal uintptr, ppOut uintptr) (r int32) {
return _valueFromValueList(tls, pVal, ppOut, 0)
}
// C documentation
//
// /*
// ** Set the iterator value pVal to point to the next value in the set.
// ** Set (*ppOut) to point to this value before returning.
// */
func Xsqlite3_vtab_in_next(tls *libc.TLS, pVal uintptr, ppOut uintptr) (r int32) {
return _valueFromValueList(tls, pVal, ppOut, int32(1))
}
// C documentation
//
// /*
// ** Return the current time for a statement. If the current time
// ** is requested more than once within the same run of a single prepared
// ** statement, the exact same time is returned for each invocation regardless
// ** of the amount of time that elapses between invocations. In other words,
// ** the time returned is always the time of the first call.
// */
func _sqlite3StmtCurrentTime(tls *libc.TLS, p uintptr) (r Tsqlite3_int64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var piTime, v1 uintptr
var rc int32
var _ /* iTime at bp+0 */ Tsqlite3_int64
_, _, _ = piTime, rc, v1
*(*Tsqlite3_int64)(unsafe.Pointer(bp)) = 0
if (*Tsqlite3_context)(unsafe.Pointer(p)).FpVdbe != uintptr(0) {
v1 = (*Tsqlite3_context)(unsafe.Pointer(p)).FpVdbe + 72
} else {
v1 = bp
}
piTime = v1
if *(*Tsqlite3_int64)(unsafe.Pointer(piTime)) == 0 {
rc = _sqlite3OsCurrentTimeInt64(tls, (*Tsqlite3)(unsafe.Pointer((*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpOut)).Fdb)).FpVfs, piTime)
if rc != 0 {
*(*Tsqlite3_int64)(unsafe.Pointer(piTime)) = 0
}
}
return *(*Tsqlite3_int64)(unsafe.Pointer(piTime))
}
// C documentation
//
// /*
// ** Create a new aggregate context for p and return a pointer to
// ** its pMem->z element.
// */
func _createAggContext(tls *libc.TLS, p uintptr, nByte int32) (r uintptr) {
var pMem uintptr
_ = pMem
pMem = (*Tsqlite3_context)(unsafe.Pointer(p)).FpMem
if nByte <= 0 {
_sqlite3VdbeMemSetNull(tls, pMem)
(*TMem)(unsafe.Pointer(pMem)).Fz = uintptr(0)
} else {
_sqlite3VdbeMemClearAndResize(tls, pMem, nByte)
(*TMem)(unsafe.Pointer(pMem)).Fflags = uint16(MEM_Agg)
*(*uintptr)(unsafe.Pointer(pMem)) = (*Tsqlite3_context)(unsafe.Pointer(p)).FpFunc
if (*TMem)(unsafe.Pointer(pMem)).Fz != 0 {
libc.Xmemset(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, 0, uint64(nByte))
}
}
return (*TMem)(unsafe.Pointer(pMem)).Fz
}
// C documentation
//
// /*
// ** Allocate or return the aggregate context for a user function. A new
// ** context is allocated on the first call. Subsequent calls return the
// ** same context that was returned on prior calls.
// */
func Xsqlite3_aggregate_context(tls *libc.TLS, p uintptr, nByte int32) (r uintptr) {
if int32((*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpMem)).Fflags)&int32(MEM_Agg) == 0 {
return _createAggContext(tls, p, nByte)
} else {
return (*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpMem)).Fz
}
return r
}
// C documentation
//
// /*
// ** Return the auxiliary data pointer, if any, for the iArg'th argument to
// ** the user-function defined by pCtx.
// **
// ** The left-most argument is 0.
// **
// ** Undocumented behavior: If iArg is negative then access a cache of
// ** auxiliary data pointers that is available to all functions within a
// ** single prepared statement. The iArg values must match.
// */
func Xsqlite3_get_auxdata(tls *libc.TLS, pCtx uintptr, iArg int32) (r uintptr) {
var pAuxData uintptr
_ = pAuxData
if (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe == uintptr(0) {
return uintptr(0)
}
pAuxData = (*TVdbe)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe)).FpAuxData
for {
if !(pAuxData != 0) {
break
}
if (*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxArg == iArg && ((*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxOp == (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FiOp || iArg < 0) {
return (*TAuxData)(unsafe.Pointer(pAuxData)).FpAux
}
goto _1
_1:
;
pAuxData = (*TAuxData)(unsafe.Pointer(pAuxData)).FpNextAux
}
return uintptr(0)
}
// C documentation
//
// /*
// ** Set the auxiliary data pointer and delete function, for the iArg'th
// ** argument to the user-function defined by pCtx. Any previous value is
// ** deleted by calling the delete function specified when it was set.
// **
// ** The left-most argument is 0.
// **
// ** Undocumented behavior: If iArg is negative then make the data available
// ** to all functions within the current prepared statement using iArg as an
// ** access code.
// */
func Xsqlite3_set_auxdata(tls *libc.TLS, pCtx uintptr, iArg int32, pAux uintptr, xDelete uintptr) {
var pAuxData, pVdbe uintptr
_, _ = pAuxData, pVdbe
pVdbe = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FpVdbe
if pVdbe == uintptr(0) {
goto failed
}
pAuxData = (*TVdbe)(unsafe.Pointer(pVdbe)).FpAuxData
for {
if !(pAuxData != 0) {
break
}
if (*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxArg == iArg && ((*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxOp == (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FiOp || iArg < 0) {
break
}
goto _1
_1:
;
pAuxData = (*TAuxData)(unsafe.Pointer(pAuxData)).FpNextAux
}
if pAuxData == uintptr(0) {
pAuxData = _sqlite3DbMallocZero(tls, (*TVdbe)(unsafe.Pointer(pVdbe)).Fdb, uint64(32))
if !(pAuxData != 0) {
goto failed
}
(*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxOp = (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FiOp
(*TAuxData)(unsafe.Pointer(pAuxData)).FiAuxArg = iArg
(*TAuxData)(unsafe.Pointer(pAuxData)).FpNextAux = (*TVdbe)(unsafe.Pointer(pVdbe)).FpAuxData
(*TVdbe)(unsafe.Pointer(pVdbe)).FpAuxData = pAuxData
if (*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError == 0 {
(*Tsqlite3_context)(unsafe.Pointer(pCtx)).FisError = -int32(1)
}
} else {
if (*TAuxData)(unsafe.Pointer(pAuxData)).FxDeleteAux != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TAuxData)(unsafe.Pointer(pAuxData)).FxDeleteAux})))(tls, (*TAuxData)(unsafe.Pointer(pAuxData)).FpAux)
}
}
(*TAuxData)(unsafe.Pointer(pAuxData)).FpAux = pAux
(*TAuxData)(unsafe.Pointer(pAuxData)).FxDeleteAux = xDelete
return
goto failed
failed:
;
if xDelete != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{xDelete})))(tls, pAux)
}
}
// C documentation
//
// /*
// ** Return the number of times the Step function of an aggregate has been
// ** called.
// **
// ** This function is deprecated. Do not use it for new code. It is
// ** provide only to avoid breaking legacy code. New aggregate function
// ** implementations should keep their own counts within their aggregate
// ** context.
// */
func Xsqlite3_aggregate_count(tls *libc.TLS, p uintptr) (r int32) {
return (*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(p)).FpMem)).Fn
}
// C documentation
//
// /*
// ** Return the number of columns in the result set for the statement pStmt.
// */
func Xsqlite3_column_count(tls *libc.TLS, pStmt uintptr) (r int32) {
var pVm uintptr
_ = pVm
pVm = pStmt
if pVm == uintptr(0) {
return 0
}
return int32((*TVdbe)(unsafe.Pointer(pVm)).FnResColumn)
}
// C documentation
//
// /*
// ** Return the number of values available from the current row of the
// ** currently executing statement pStmt.
// */
func Xsqlite3_data_count(tls *libc.TLS, pStmt uintptr) (r int32) {
var pVm uintptr
_ = pVm
pVm = pStmt
if pVm == uintptr(0) || (*TVdbe)(unsafe.Pointer(pVm)).FpResultRow == uintptr(0) {
return 0
}
return int32((*TVdbe)(unsafe.Pointer(pVm)).FnResColumn)
}
// C documentation
//
// /*
// ** Return a pointer to static memory containing an SQL NULL value.
// */
func _columnNullValue(tls *libc.TLS) (r uintptr) {
return uintptr(unsafe.Pointer(&_nullMem))
}
/* Even though the Mem structure contains an element
** of type i64, on certain architectures (x86) with certain compiler
** switches (-Os), gcc may align this Mem object on a 4-byte boundary
** instead of an 8-byte one. This all works fine, except that when
** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s
** that a Mem structure is located on an 8-byte boundary. To prevent
** these assert()s from failing, when building with SQLITE_DEBUG defined
** using gcc, we force nullMem to be 8-byte aligned using the magical
** __attribute__((aligned(8))) macro. */
var _nullMem = TMem{
Fflags: libc.Uint16FromInt32(MEM_Null),
}
// C documentation
//
// /*
// ** Check to see if column iCol of the given statement is valid. If
// ** it is, return a pointer to the Mem for the value of that column.
// ** If iCol is not valid, return a pointer to a Mem which has a value
// ** of NULL.
// */
func _columnMem(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var pOut, pVm uintptr
_, _ = pOut, pVm
pVm = pStmt
if pVm == uintptr(0) {
return _columnNullValue(tls)
}
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(pVm)).Fdb)).Fmutex)
if (*TVdbe)(unsafe.Pointer(pVm)).FpResultRow != uintptr(0) && i < int32((*TVdbe)(unsafe.Pointer(pVm)).FnResColumn) && i >= 0 {
pOut = (*TVdbe)(unsafe.Pointer(pVm)).FpResultRow + uintptr(i)*56
} else {
_sqlite3Error(tls, (*TVdbe)(unsafe.Pointer(pVm)).Fdb, int32(SQLITE_RANGE))
pOut = _columnNullValue(tls)
}
return pOut
}
// C documentation
//
// /*
// ** This function is called after invoking an sqlite3_value_XXX function on a
// ** column value (i.e. a value returned by evaluating an SQL expression in the
// ** select list of a SELECT statement) that may cause a malloc() failure. If
// ** malloc() has failed, the threads mallocFailed flag is cleared and the result
// ** code of statement pStmt set to SQLITE_NOMEM.
// **
// ** Specifically, this is called from within:
// **
// ** sqlite3_column_int()
// ** sqlite3_column_int64()
// ** sqlite3_column_text()
// ** sqlite3_column_text16()
// ** sqlite3_column_real()
// ** sqlite3_column_bytes()
// ** sqlite3_column_bytes16()
// ** sqlite3_column_blob()
// */
func _columnMallocFailure(tls *libc.TLS, pStmt uintptr) {
var p uintptr
_ = p
/* If malloc() failed during an encoding conversion within an
** sqlite3_column_XXX API, then set the return code of the statement to
** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR
** and _finalize() will return NOMEM.
*/
p = pStmt
if p != 0 {
(*TVdbe)(unsafe.Pointer(p)).Frc = _sqlite3ApiExit(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, (*TVdbe)(unsafe.Pointer(p)).Frc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
}
// C documentation
//
// /**************************** sqlite3_column_ *******************************
// ** The following routines are used to access elements of the current row
// ** in the result set.
// */
func Xsqlite3_column_blob(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var val uintptr
_ = val
val = Xsqlite3_value_blob(tls, _columnMem(tls, pStmt, i))
/* Even though there is no encoding conversion, value_blob() might
** need to call malloc() to expand the result of a zeroblob()
** expression.
*/
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_bytes(tls *libc.TLS, pStmt uintptr, i int32) (r int32) {
var val int32
_ = val
val = Xsqlite3_value_bytes(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_bytes16(tls *libc.TLS, pStmt uintptr, i int32) (r int32) {
var val int32
_ = val
val = Xsqlite3_value_bytes16(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_double(tls *libc.TLS, pStmt uintptr, i int32) (r float64) {
var val float64
_ = val
val = Xsqlite3_value_double(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_int(tls *libc.TLS, pStmt uintptr, i int32) (r int32) {
var val int32
_ = val
val = Xsqlite3_value_int(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_int64(tls *libc.TLS, pStmt uintptr, i int32) (r Tsqlite_int64) {
var val Tsqlite_int64
_ = val
val = Xsqlite3_value_int64(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_text(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var val uintptr
_ = val
val = Xsqlite3_value_text(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_value(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var pOut, p1, p2 uintptr
_, _, _ = pOut, p1, p2
pOut = _columnMem(tls, pStmt, i)
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Static) != 0 {
p1 = pOut + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^libc.Int32FromInt32(MEM_Static))
p2 = pOut + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Ephem))
}
_columnMallocFailure(tls, pStmt)
return pOut
}
func Xsqlite3_column_text16(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var val uintptr
_ = val
val = Xsqlite3_value_text16(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return val
}
func Xsqlite3_column_type(tls *libc.TLS, pStmt uintptr, i int32) (r int32) {
var iType int32
_ = iType
iType = Xsqlite3_value_type(tls, _columnMem(tls, pStmt, i))
_columnMallocFailure(tls, pStmt)
return iType
}
// C documentation
//
// /*
// ** Column names appropriate for EXPLAIN or EXPLAIN QUERY PLAN.
// */
var _azExplainColNames8 = [12]uintptr{
0: __ccgo_ts + 5448,
1: __ccgo_ts + 5453,
2: __ccgo_ts + 5460,
3: __ccgo_ts + 5463,
4: __ccgo_ts + 5466,
5: __ccgo_ts + 5469,
6: __ccgo_ts + 5472,
7: __ccgo_ts + 5475,
8: __ccgo_ts + 5483,
9: __ccgo_ts + 5486,
10: __ccgo_ts + 5493,
11: __ccgo_ts + 5501,
}
var _azExplainColNames16data = [60]Tu16{
0: uint16('a'),
1: uint16('d'),
2: uint16('d'),
3: uint16('r'),
5: uint16('o'),
6: uint16('p'),
7: uint16('c'),
8: uint16('o'),
9: uint16('d'),
10: uint16('e'),
12: uint16('p'),
13: uint16('1'),
15: uint16('p'),
16: uint16('2'),
18: uint16('p'),
19: uint16('3'),
21: uint16('p'),
22: uint16('4'),
24: uint16('p'),
25: uint16('5'),
27: uint16('c'),
28: uint16('o'),
29: uint16('m'),
30: uint16('m'),
31: uint16('e'),
32: uint16('n'),
33: uint16('t'),
35: uint16('i'),
36: uint16('d'),
38: uint16('p'),
39: uint16('a'),
40: uint16('r'),
41: uint16('e'),
42: uint16('n'),
43: uint16('t'),
45: uint16('n'),
46: uint16('o'),
47: uint16('t'),
48: uint16('u'),
49: uint16('s'),
50: uint16('e'),
51: uint16('d'),
53: uint16('d'),
54: uint16('e'),
55: uint16('t'),
56: uint16('a'),
57: uint16('i'),
58: uint16('l'),
}
var _iExplainColNames16 = [12]Tu8{
1: uint8(5),
2: uint8(12),
3: uint8(15),
4: uint8(18),
5: uint8(21),
6: uint8(24),
7: uint8(27),
8: uint8(35),
9: uint8(38),
10: uint8(45),
11: uint8(53),
}
// C documentation
//
// /*
// ** Convert the N-th element of pStmt->pColName[] into a string using
// ** xFunc() then return that string. If N is out of range, return 0.
// **
// ** There are up to 5 names for each column. useType determines which
// ** name is returned. Here are the names:
// **
// ** 0 The column name as it should be displayed for output
// ** 1 The datatype name for the column
// ** 2 The name of the database that the column derives from
// ** 3 The name of the table that the column derives from
// ** 4 The name of the table column that the result column derives from
// **
// ** If the result is not a simple column reference (if it is an expression
// ** or a constant) then useTypes 2, 3, and 4 return NULL.
// */
func _columnName(tls *libc.TLS, pStmt uintptr, N int32, useUtf16 int32, useType int32) (r uintptr) {
var db, p, ret uintptr
var i, n, v1 int32
var prior_mallocFailed Tu8
_, _, _, _, _, _, _ = db, i, n, p, prior_mallocFailed, ret, v1
if N < 0 {
return uintptr(0)
}
ret = uintptr(0)
p = pStmt
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) != 0 {
if useType > 0 {
goto columnName_end
}
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2)) == int32(1) {
v1 = int32(8)
} else {
v1 = int32(4)
}
n = v1
if N >= n {
goto columnName_end
}
if useUtf16 != 0 {
i = int32(_iExplainColNames16[N+int32(8)*int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2))-int32(8)])
ret = uintptr(unsafe.Pointer(&_azExplainColNames16data)) + uintptr(i)*2
} else {
ret = _azExplainColNames8[N+int32(8)*int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0xc>>2))-int32(8)]
}
goto columnName_end
}
n = int32((*TVdbe)(unsafe.Pointer(p)).FnResColumn)
if N < n {
prior_mallocFailed = (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed
N += useType * n
if useUtf16 != 0 {
ret = Xsqlite3_value_text16(tls, (*TVdbe)(unsafe.Pointer(p)).FaColName+uintptr(N)*56)
} else {
ret = Xsqlite3_value_text(tls, (*TVdbe)(unsafe.Pointer(p)).FaColName+uintptr(N)*56)
}
/* A malloc may have failed inside of the _text() call. If this
** is the case, clear the mallocFailed flag and return NULL.
*/
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) > int32(prior_mallocFailed) {
_sqlite3OomClear(tls, db)
ret = uintptr(0)
}
}
goto columnName_end
columnName_end:
;
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return ret
}
// C documentation
//
// /*
// ** Return the name of the Nth column of the result set returned by SQL
// ** statement pStmt.
// */
func Xsqlite3_column_name(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, 0, COLNAME_NAME)
}
func Xsqlite3_column_name16(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, int32(1), COLNAME_NAME)
}
/*
** Constraint: If you have ENABLE_COLUMN_METADATA then you must
** not define OMIT_DECLTYPE.
*/
// C documentation
//
// /*
// ** Return the column declaration type (if applicable) of the 'i'th column
// ** of the result set of SQL statement pStmt.
// */
func Xsqlite3_column_decltype(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, 0, int32(COLNAME_DECLTYPE))
}
func Xsqlite3_column_decltype16(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, int32(1), int32(COLNAME_DECLTYPE))
}
// C documentation
//
// /*
// ** Return the name of the database from which a result column derives.
// ** NULL is returned if the result column is an expression or constant or
// ** anything else which is not an unambiguous reference to a database column.
// */
func Xsqlite3_column_database_name(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, 0, int32(COLNAME_DATABASE))
}
func Xsqlite3_column_database_name16(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, int32(1), int32(COLNAME_DATABASE))
}
// C documentation
//
// /*
// ** Return the name of the table from which a result column derives.
// ** NULL is returned if the result column is an expression or constant or
// ** anything else which is not an unambiguous reference to a database column.
// */
func Xsqlite3_column_table_name(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, 0, int32(COLNAME_TABLE))
}
func Xsqlite3_column_table_name16(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, int32(1), int32(COLNAME_TABLE))
}
// C documentation
//
// /*
// ** Return the name of the table column from which a result column derives.
// ** NULL is returned if the result column is an expression or constant or
// ** anything else which is not an unambiguous reference to a database column.
// */
func Xsqlite3_column_origin_name(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, 0, int32(COLNAME_COLUMN))
}
func Xsqlite3_column_origin_name16(tls *libc.TLS, pStmt uintptr, N int32) (r uintptr) {
return _columnName(tls, pStmt, N, int32(1), int32(COLNAME_COLUMN))
}
// C documentation
//
// /******************************* sqlite3_bind_ ***************************
// **
// ** Routines used to attach values to wildcards in a compiled SQL statement.
// */
// /*
// ** Unbind the value bound to variable i in virtual machine p. This is the
// ** the same as binding a NULL value to the column. If the "i" parameter is
// ** out of range, then SQLITE_RANGE is returned. Otherwise SQLITE_OK.
// **
// ** A successful evaluation of this routine acquires the mutex on p.
// ** the mutex is released if any kind of error occurs.
// **
// ** The error code stored in database p->db is overwritten with the return
// ** value in any case.
// */
func _vdbeUnbind(tls *libc.TLS, p uintptr, i uint32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pVar uintptr
var v1 uint32
var v2 bool
_, _, _ = pVar, v1, v2
if _vdbeSafetyNotNull(tls, p) != 0 {
return _sqlite3MisuseError(tls, int32(91554))
}
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
if int32((*TVdbe)(unsafe.Pointer(p)).FeVdbeState) != int32(VDBE_READY_STATE) {
_sqlite3Error(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, _sqlite3MisuseError(tls, int32(91558)))
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
Xsqlite3_log(tls, int32(SQLITE_MISUSE), __ccgo_ts+5508, libc.VaList(bp+8, (*TVdbe)(unsafe.Pointer(p)).FzSql))
return _sqlite3MisuseError(tls, int32(91562))
}
if i >= uint32((*TVdbe)(unsafe.Pointer(p)).FnVar) {
_sqlite3Error(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, int32(SQLITE_RANGE))
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
return int32(SQLITE_RANGE)
}
pVar = (*TVdbe)(unsafe.Pointer(p)).FaVar + uintptr(i)*56
_sqlite3VdbeMemRelease(tls, pVar)
(*TMem)(unsafe.Pointer(pVar)).Fflags = uint16(MEM_Null)
(*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FerrCode = SQLITE_OK
/* If the bit corresponding to this variable in Vdbe.expmask is set, then
** binding a new value to this variable invalidates the current query plan.
**
** IMPLEMENTATION-OF: R-57496-20354 If the specific value bound to a host
** parameter in the WHERE clause might influence the choice of query plan
** for a statement, then the statement will be automatically recompiled,
** as if there had been a schema change, on the first sqlite3_step() call
** following any change to the bindings of that parameter.
*/
if v2 = (*TVdbe)(unsafe.Pointer(p)).Fexpmask != uint32(0); v2 {
if i >= uint32(31) {
v1 = uint32(0x80000000)
} else {
v1 = libc.Uint32FromInt32(1) << i
}
}
if v2 && (*TVdbe)(unsafe.Pointer(p)).Fexpmask&v1 != uint32(0) {
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 0, 0x3)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Bind a text or BLOB value.
// */
func _bindText(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, nData Ti64, xDel uintptr, encoding Tu8) (r int32) {
var p, pVar uintptr
var rc int32
_, _, _ = p, pVar, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
if zData != uintptr(0) {
pVar = (*TVdbe)(unsafe.Pointer(p)).FaVar + uintptr(i-int32(1))*56
rc = _sqlite3VdbeMemSetStr(tls, pVar, zData, nData, encoding, xDel)
if rc == SQLITE_OK && int32(encoding) != 0 {
rc = _sqlite3VdbeChangeEncoding(tls, pVar, int32((*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fenc))
}
if rc != 0 {
_sqlite3Error(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, rc)
rc = _sqlite3ApiExit(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, rc)
}
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
} else {
if xDel != libc.UintptrFromInt32(0) && xDel != uintptr(-libc.Int32FromInt32(1)) {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{xDel})))(tls, zData)
}
}
return rc
}
// C documentation
//
// /*
// ** Bind a blob value to an SQL statement variable.
// */
func Xsqlite3_bind_blob(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, nData int32, xDel uintptr) (r int32) {
return _bindText(tls, pStmt, i, zData, int64(nData), xDel, uint8(0))
}
func Xsqlite3_bind_blob64(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, nData Tsqlite3_uint64, xDel uintptr) (r int32) {
return _bindText(tls, pStmt, i, zData, int64(nData), xDel, uint8(0))
}
func Xsqlite3_bind_double(tls *libc.TLS, pStmt uintptr, i int32, rValue float64) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
_sqlite3VdbeMemSetDouble(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar+uintptr(i-int32(1))*56, rValue)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
return rc
}
func Xsqlite3_bind_int(tls *libc.TLS, p uintptr, i int32, iValue int32) (r int32) {
return Xsqlite3_bind_int64(tls, p, i, int64(iValue))
}
func Xsqlite3_bind_int64(tls *libc.TLS, pStmt uintptr, i int32, iValue Tsqlite_int64) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
_sqlite3VdbeMemSetInt64(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar+uintptr(i-int32(1))*56, iValue)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
return rc
}
func Xsqlite3_bind_null(tls *libc.TLS, pStmt uintptr, i int32) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
return rc
}
func Xsqlite3_bind_pointer(tls *libc.TLS, pStmt uintptr, i int32, pPtr uintptr, zPTtype uintptr, xDestructor uintptr) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
_sqlite3VdbeMemSetPointer(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar+uintptr(i-int32(1))*56, pPtr, zPTtype, xDestructor)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
} else {
if xDestructor != 0 {
(*(*func(*libc.TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{xDestructor})))(tls, pPtr)
}
}
return rc
}
func Xsqlite3_bind_text(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, nData int32, xDel uintptr) (r int32) {
return _bindText(tls, pStmt, i, zData, int64(nData), xDel, uint8(SQLITE_UTF8))
}
func Xsqlite3_bind_text64(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, nData Tsqlite3_uint64, xDel uintptr, enc uint8) (r int32) {
if int32(enc) != int32(SQLITE_UTF8) {
if int32(enc) == int32(SQLITE_UTF16) {
enc = uint8(SQLITE_UTF16LE)
}
nData &= uint64(^int32(libc.Uint16FromInt32(1)))
}
return _bindText(tls, pStmt, i, zData, int64(nData), xDel, enc)
}
func Xsqlite3_bind_text16(tls *libc.TLS, pStmt uintptr, i int32, zData uintptr, n int32, xDel uintptr) (r int32) {
return _bindText(tls, pStmt, i, zData, int64(uint64(n) & ^libc.Uint64FromInt32(1)), xDel, uint8(SQLITE_UTF16LE))
}
func Xsqlite3_bind_value(tls *libc.TLS, pStmt uintptr, i int32, pValue uintptr) (r int32) {
var rc int32
var v1 float64
_, _ = rc, v1
switch Xsqlite3_value_type(tls, pValue) {
case int32(SQLITE_INTEGER):
rc = Xsqlite3_bind_int64(tls, pStmt, i, *(*Ti64)(unsafe.Pointer(pValue)))
case int32(SQLITE_FLOAT):
if int32((*Tsqlite3_value)(unsafe.Pointer(pValue)).Fflags)&int32(MEM_Real) != 0 {
v1 = *(*float64)(unsafe.Pointer(pValue))
} else {
v1 = float64(*(*Ti64)(unsafe.Pointer(pValue)))
}
rc = Xsqlite3_bind_double(tls, pStmt, i, v1)
case int32(SQLITE_BLOB):
if int32((*Tsqlite3_value)(unsafe.Pointer(pValue)).Fflags)&int32(MEM_Zero) != 0 {
rc = Xsqlite3_bind_zeroblob(tls, pStmt, i, *(*int32)(unsafe.Pointer(&(*Tsqlite3_value)(unsafe.Pointer(pValue)).Fu)))
} else {
rc = Xsqlite3_bind_blob(tls, pStmt, i, (*Tsqlite3_value)(unsafe.Pointer(pValue)).Fz, (*Tsqlite3_value)(unsafe.Pointer(pValue)).Fn, uintptr(-libc.Int32FromInt32(1)))
}
case int32(SQLITE_TEXT):
rc = _bindText(tls, pStmt, i, (*Tsqlite3_value)(unsafe.Pointer(pValue)).Fz, int64((*Tsqlite3_value)(unsafe.Pointer(pValue)).Fn), uintptr(-libc.Int32FromInt32(1)), (*Tsqlite3_value)(unsafe.Pointer(pValue)).Fenc)
default:
rc = Xsqlite3_bind_null(tls, pStmt, i)
break
}
return rc
}
func Xsqlite3_bind_zeroblob(tls *libc.TLS, pStmt uintptr, i int32, n int32) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
rc = _vdbeUnbind(tls, p, uint32(i-libc.Int32FromInt32(1)))
if rc == SQLITE_OK {
_sqlite3VdbeMemSetZeroBlob(tls, (*TVdbe)(unsafe.Pointer(p)).FaVar+uintptr(i-int32(1))*56, n)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
return rc
}
func Xsqlite3_bind_zeroblob64(tls *libc.TLS, pStmt uintptr, i int32, n Tsqlite3_uint64) (r int32) {
var p uintptr
var rc int32
_, _ = p, rc
p = pStmt
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
if n > uint64(*(*int32)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb + 136))) {
rc = int32(SQLITE_TOOBIG)
} else {
rc = Xsqlite3_bind_zeroblob(tls, pStmt, i, int32(n))
}
rc = _sqlite3ApiExit(tls, (*TVdbe)(unsafe.Pointer(p)).Fdb, rc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
return rc
}
// C documentation
//
// /*
// ** Return the number of wildcards that can be potentially bound to.
// ** This routine is added to support DBD::SQLite.
// */
func Xsqlite3_bind_parameter_count(tls *libc.TLS, pStmt uintptr) (r int32) {
var p uintptr
var v1 int32
_, _ = p, v1
p = pStmt
if p != 0 {
v1 = int32((*TVdbe)(unsafe.Pointer(p)).FnVar)
} else {
v1 = 0
}
return v1
}
// C documentation
//
// /*
// ** Return the name of a wildcard parameter. Return NULL if the index
// ** is out of range or if the wildcard is unnamed.
// **
// ** The result is always UTF-8.
// */
func Xsqlite3_bind_parameter_name(tls *libc.TLS, pStmt uintptr, i int32) (r uintptr) {
var p uintptr
_ = p
p = pStmt
if p == uintptr(0) {
return uintptr(0)
}
return _sqlite3VListNumToName(tls, (*TVdbe)(unsafe.Pointer(p)).FpVList, i)
}
// C documentation
//
// /*
// ** Given a wildcard parameter name, return the index of the variable
// ** with that name. If there is no variable with the given name,
// ** return 0.
// */
func _sqlite3VdbeParameterIndex(tls *libc.TLS, p uintptr, zName uintptr, nName int32) (r int32) {
if p == uintptr(0) || zName == uintptr(0) {
return 0
}
return _sqlite3VListNameToNum(tls, (*TVdbe)(unsafe.Pointer(p)).FpVList, zName, nName)
}
func Xsqlite3_bind_parameter_index(tls *libc.TLS, pStmt uintptr, zName uintptr) (r int32) {
return _sqlite3VdbeParameterIndex(tls, pStmt, zName, _sqlite3Strlen30(tls, zName))
}
// C documentation
//
// /*
// ** Transfer all bindings from the first statement over to the second.
// */
func _sqlite3TransferBindings(tls *libc.TLS, pFromStmt uintptr, pToStmt uintptr) (r int32) {
var i int32
var pFrom, pTo uintptr
_, _, _ = i, pFrom, pTo
pFrom = pFromStmt
pTo = pToStmt
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(pTo)).Fdb)).Fmutex)
i = 0
for {
if !(i < int32((*TVdbe)(unsafe.Pointer(pFrom)).FnVar)) {
break
}
_sqlite3VdbeMemMove(tls, (*TVdbe)(unsafe.Pointer(pTo)).FaVar+uintptr(i)*56, (*TVdbe)(unsafe.Pointer(pFrom)).FaVar+uintptr(i)*56)
goto _1
_1:
;
i++
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(pTo)).Fdb)).Fmutex)
return SQLITE_OK
}
// C documentation
//
// /*
// ** Deprecated external interface. Internal/core SQLite code
// ** should call sqlite3TransferBindings.
// **
// ** It is misuse to call this routine with statements from different
// ** database connections. But as this is a deprecated interface, we
// ** will not bother to check for that condition.
// **
// ** If the two statements contain a different number of bindings, then
// ** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise
// ** SQLITE_OK is returned.
// */
func Xsqlite3_transfer_bindings(tls *libc.TLS, pFromStmt uintptr, pToStmt uintptr) (r int32) {
var pFrom, pTo uintptr
_, _ = pFrom, pTo
pFrom = pFromStmt
pTo = pToStmt
if int32((*TVdbe)(unsafe.Pointer(pFrom)).FnVar) != int32((*TVdbe)(unsafe.Pointer(pTo)).FnVar) {
return int32(SQLITE_ERROR)
}
if (*TVdbe)(unsafe.Pointer(pTo)).Fexpmask != 0 {
libc.SetBitFieldPtr16Uint32(pTo+200, libc.Uint32FromInt32(1), 0, 0x3)
}
if (*TVdbe)(unsafe.Pointer(pFrom)).Fexpmask != 0 {
libc.SetBitFieldPtr16Uint32(pFrom+200, libc.Uint32FromInt32(1), 0, 0x3)
}
return _sqlite3TransferBindings(tls, pFromStmt, pToStmt)
}
// C documentation
//
// /*
// ** Return the sqlite3* database handle to which the prepared statement given
// ** in the argument belongs. This is the same database handle that was
// ** the first argument to the sqlite3_prepare() that was used to create
// ** the statement in the first place.
// */
func Xsqlite3_db_handle(tls *libc.TLS, pStmt uintptr) (r uintptr) {
var v1 uintptr
_ = v1
if pStmt != 0 {
v1 = (*TVdbe)(unsafe.Pointer(pStmt)).Fdb
} else {
v1 = uintptr(0)
}
return v1
}
// C documentation
//
// /*
// ** Return true if the prepared statement is guaranteed to not modify the
// ** database.
// */
func Xsqlite3_stmt_readonly(tls *libc.TLS, pStmt uintptr) (r int32) {
var v1 int32
_ = v1
if pStmt != 0 {
v1 = int32(Tbft(*(*uint16)(unsafe.Pointer(pStmt + 200)) & 0x40 >> 6))
} else {
v1 = int32(1)
}
return v1
}
// C documentation
//
// /*
// ** Return 1 if the statement is an EXPLAIN and return 2 if the
// ** statement is an EXPLAIN QUERY PLAN
// */
func Xsqlite3_stmt_isexplain(tls *libc.TLS, pStmt uintptr) (r int32) {
var v1 int32
_ = v1
if pStmt != 0 {
v1 = int32(Tbft(*(*uint16)(unsafe.Pointer(pStmt + 200)) & 0xc >> 2))
} else {
v1 = 0
}
return v1
}
// C documentation
//
// /*
// ** Set the explain mode for a statement.
// */
func Xsqlite3_stmt_explain(tls *libc.TLS, pStmt uintptr, eMode int32) (r int32) {
var rc int32
var v uintptr
_, _ = rc, v
v = pStmt
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).Fdb)).Fmutex)
if int32(Tbft(*(*uint16)(unsafe.Pointer(v + 200))&0xc>>2)) == eMode {
rc = SQLITE_OK
} else {
if eMode < 0 || eMode > int32(2) {
rc = int32(SQLITE_ERROR)
} else {
if int32((*TVdbe)(unsafe.Pointer(v)).FprepFlags)&int32(SQLITE_PREPARE_SAVESQL) == 0 {
rc = int32(SQLITE_ERROR)
} else {
if int32((*TVdbe)(unsafe.Pointer(v)).FeVdbeState) != int32(VDBE_READY_STATE) {
rc = int32(SQLITE_BUSY)
} else {
if (*TVdbe)(unsafe.Pointer(v)).FnMem >= int32(10) && (eMode != int32(2) || int32(Tbft(*(*uint16)(unsafe.Pointer(v + 200))&0x100>>8)) != 0) {
/* No reprepare necessary */
libc.SetBitFieldPtr16Uint32(v+200, uint32(eMode), 2, 0xc)
rc = SQLITE_OK
} else {
libc.SetBitFieldPtr16Uint32(v+200, uint32(eMode), 2, 0xc)
rc = _sqlite3Reprepare(tls, v)
libc.SetBitFieldPtr16Uint32(v+200, libc.BoolUint32(eMode == libc.Int32FromInt32(2)), 8, 0x100)
}
}
}
}
}
if int32(Tbft(*(*uint16)(unsafe.Pointer(v + 200))&0xc>>2)) != 0 {
(*TVdbe)(unsafe.Pointer(v)).FnResColumn = uint16(int32(12) - int32(4)*int32(Tbft(*(*uint16)(unsafe.Pointer(v + 200))&0xc>>2)))
} else {
(*TVdbe)(unsafe.Pointer(v)).FnResColumn = (*TVdbe)(unsafe.Pointer(v)).FnResAlloc
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).Fdb)).Fmutex)
return rc
}
// C documentation
//
// /*
// ** Return true if the prepared statement is in need of being reset.
// */
func Xsqlite3_stmt_busy(tls *libc.TLS, pStmt uintptr) (r int32) {
var v uintptr
_ = v
v = pStmt
return libc.BoolInt32(v != uintptr(0) && int32((*TVdbe)(unsafe.Pointer(v)).FeVdbeState) == int32(VDBE_RUN_STATE))
}
// C documentation
//
// /*
// ** Return a pointer to the next prepared statement after pStmt associated
// ** with database connection pDb. If pStmt is NULL, return the first
// ** prepared statement for the database connection. Return NULL if there
// ** are no more.
// */
func Xsqlite3_next_stmt(tls *libc.TLS, pDb uintptr, pStmt uintptr) (r uintptr) {
var pNext uintptr
_ = pNext
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(pDb)).Fmutex)
if pStmt == uintptr(0) {
pNext = (*Tsqlite3)(unsafe.Pointer(pDb)).FpVdbe
} else {
pNext = (*TVdbe)(unsafe.Pointer(pStmt)).FpVNext
}
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(pDb)).Fmutex)
return pNext
}
// C documentation
//
// /*
// ** Return the value of a status counter for a prepared statement
// */
func Xsqlite3_stmt_status(tls *libc.TLS, pStmt uintptr, op int32, resetFlag int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var db, pVdbe uintptr
var _ /* v at bp+0 */ Tu32
_, _ = db, pVdbe
pVdbe = pStmt
if op == int32(SQLITE_STMTSTATUS_MEMUSED) {
db = (*TVdbe)(unsafe.Pointer(pVdbe)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
*(*Tu32)(unsafe.Pointer(bp)) = uint32(0)
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = bp
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpStart
_sqlite3VdbeDelete(tls, pVdbe)
(*Tsqlite3)(unsafe.Pointer(db)).FpnBytesFreed = uintptr(0)
(*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpEnd = (*Tsqlite3)(unsafe.Pointer(db)).Flookaside.FpTrueEnd
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
} else {
*(*Tu32)(unsafe.Pointer(bp)) = *(*Tu32)(unsafe.Pointer(pVdbe + 212 + uintptr(op)*4))
if resetFlag != 0 {
*(*Tu32)(unsafe.Pointer(pVdbe + 212 + uintptr(op)*4)) = uint32(0)
}
}
return int32(*(*Tu32)(unsafe.Pointer(bp)))
}
// C documentation
//
// /*
// ** Return the SQL associated with a prepared statement
// */
func Xsqlite3_sql(tls *libc.TLS, pStmt uintptr) (r uintptr) {
var p, v1 uintptr
_, _ = p, v1
p = pStmt
if p != 0 {
v1 = (*TVdbe)(unsafe.Pointer(p)).FzSql
} else {
v1 = uintptr(0)
}
return v1
}
// C documentation
//
// /*
// ** Return the SQL associated with a prepared statement with
// ** bound parameters expanded. Space to hold the returned string is
// ** obtained from sqlite3_malloc(). The caller is responsible for
// ** freeing the returned string by passing it to sqlite3_free().
// **
// ** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of
// ** expanded bound parameters.
// */
func Xsqlite3_expanded_sql(tls *libc.TLS, pStmt uintptr) (r uintptr) {
var p, z, zSql uintptr
_, _, _ = p, z, zSql
z = uintptr(0)
zSql = Xsqlite3_sql(tls, pStmt)
if zSql != 0 {
p = pStmt
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
z = _sqlite3VdbeExpandSql(tls, p, zSql)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).Fmutex)
}
return z
}
// C documentation
//
// /*
// ** Allocate and populate an UnpackedRecord structure based on the serialized
// ** record in nKey/pKey. Return a pointer to the new UnpackedRecord structure
// ** if successful, or a NULL pointer if an OOM error is encountered.
// */
func _vdbeUnpackRecord(tls *libc.TLS, pKeyInfo uintptr, nKey int32, pKey uintptr) (r uintptr) {
var pRet uintptr
_ = pRet /* Return value */
pRet = _sqlite3VdbeAllocUnpackedRecord(tls, pKeyInfo)
if pRet != 0 {
libc.Xmemset(tls, (*TUnpackedRecord)(unsafe.Pointer(pRet)).FaMem, 0, uint64(56)*uint64(int32((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnKeyField)+libc.Int32FromInt32(1)))
_sqlite3VdbeRecordUnpack(tls, pKeyInfo, nKey, pKey, pRet)
}
return pRet
}
// C documentation
//
// /*
// ** This function is called from within a pre-update callback to retrieve
// ** a field of the row currently being updated or deleted.
// */
func Xsqlite3_preupdate_old(tls *libc.TLS, db uintptr, iIdx int32, ppValue uintptr) (r int32) {
var aRec, p, pMem, v1 uintptr
var nRec Tu32
var rc int32
_, _, _, _, _, _ = aRec, nRec, p, pMem, rc, v1
rc = SQLITE_OK
p = (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate
/* Test that this call is being made from within an SQLITE_DELETE or
** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */
if !(p != 0) || (*TPreUpdate)(unsafe.Pointer(p)).Fop == int32(SQLITE_INSERT) {
rc = _sqlite3MisuseError(tls, int32(92098))
goto preupdate_old_out
}
if (*TPreUpdate)(unsafe.Pointer(p)).FpPk != 0 {
iIdx = int32(_sqlite3TableColumnToIndex(tls, (*TPreUpdate)(unsafe.Pointer(p)).FpPk, int16(iIdx)))
}
if iIdx >= int32((*TVdbeCursor)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpCsr)).FnField) || iIdx < 0 {
rc = int32(SQLITE_RANGE)
goto preupdate_old_out
}
/* If the old.* record has not yet been loaded into memory, do so now. */
if (*TPreUpdate)(unsafe.Pointer(p)).FpUnpacked == uintptr(0) {
nRec = _sqlite3BtreePayloadSize(tls, *(*uintptr)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpCsr + 48)))
aRec = _sqlite3DbMallocRaw(tls, db, uint64(nRec))
if !(aRec != 0) {
goto preupdate_old_out
}
rc = _sqlite3BtreePayload(tls, *(*uintptr)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpCsr + 48)), uint32(0), nRec, aRec)
if rc == SQLITE_OK {
(*TPreUpdate)(unsafe.Pointer(p)).FpUnpacked = _vdbeUnpackRecord(tls, p+32, int32(nRec), aRec)
if !((*TPreUpdate)(unsafe.Pointer(p)).FpUnpacked != 0) {
rc = int32(SQLITE_NOMEM)
}
}
if rc != SQLITE_OK {
_sqlite3DbFree(tls, db, aRec)
goto preupdate_old_out
}
(*TPreUpdate)(unsafe.Pointer(p)).FaRecord = aRec
}
v1 = (*TUnpackedRecord)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpUnpacked)).FaMem + uintptr(iIdx)*56
*(*uintptr)(unsafe.Pointer(ppValue)) = v1
pMem = v1
if iIdx == int32((*TTable)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpTab)).FiPKey) {
_sqlite3VdbeMemSetInt64(tls, pMem, (*TPreUpdate)(unsafe.Pointer(p)).FiKey1)
} else {
if iIdx >= int32((*TUnpackedRecord)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpUnpacked)).FnField) {
*(*uintptr)(unsafe.Pointer(ppValue)) = _columnNullValue(tls)
} else {
if int32((*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpTab)).FaCol + uintptr(iIdx)*16))).Faffinity) == int32(SQLITE_AFF_REAL) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
_sqlite3VdbeMemRealify(tls, pMem)
}
}
}
}
goto preupdate_old_out
preupdate_old_out:
;
_sqlite3Error(tls, db, rc)
return _sqlite3ApiExit(tls, db, rc)
}
// C documentation
//
// /*
// ** This function is called from within a pre-update callback to retrieve
// ** the number of columns in the row being updated, deleted or inserted.
// */
func Xsqlite3_preupdate_count(tls *libc.TLS, db uintptr) (r int32) {
var p uintptr
var v1 int32
_, _ = p, v1
p = (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate
if p != 0 {
v1 = int32((*TPreUpdate)(unsafe.Pointer(p)).Fkeyinfo.FnKeyField)
} else {
v1 = 0
}
return v1
}
// C documentation
//
// /*
// ** This function is designed to be called from within a pre-update callback
// ** only. It returns zero if the change that caused the callback was made
// ** immediately by a user SQL statement. Or, if the change was made by a
// ** trigger program, it returns the number of trigger programs currently
// ** on the stack (1 for a top-level trigger, 2 for a trigger fired by a
// ** top-level trigger etc.).
// **
// ** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL
// ** or SET DEFAULT action is considered a trigger.
// */
func Xsqlite3_preupdate_depth(tls *libc.TLS, db uintptr) (r int32) {
var p uintptr
var v1 int32
_, _ = p, v1
p = (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate
if p != 0 {
v1 = (*TVdbe)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).Fv)).FnFrame
} else {
v1 = 0
}
return v1
}
// C documentation
//
// /*
// ** This function is designed to be called from within a pre-update callback
// ** only.
// */
func Xsqlite3_preupdate_blobwrite(tls *libc.TLS, db uintptr) (r int32) {
var p uintptr
var v1 int32
_, _ = p, v1
p = (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate
if p != 0 {
v1 = (*TPreUpdate)(unsafe.Pointer(p)).FiBlobWrite
} else {
v1 = -int32(1)
}
return v1
}
// C documentation
//
// /*
// ** This function is called from within a pre-update callback to retrieve
// ** a field of the row currently being updated or inserted.
// */
func Xsqlite3_preupdate_new(tls *libc.TLS, db uintptr, iIdx int32, ppValue uintptr) (r int32) {
var p, pData, pMem, pUnpack uintptr
var rc, v1 int32
_, _, _, _, _, _ = p, pData, pMem, pUnpack, rc, v1
rc = SQLITE_OK
p = (*Tsqlite3)(unsafe.Pointer(db)).FpPreUpdate
if !(p != 0) || (*TPreUpdate)(unsafe.Pointer(p)).Fop == int32(SQLITE_DELETE) {
rc = _sqlite3MisuseError(tls, int32(92221))
goto preupdate_new_out
}
if (*TPreUpdate)(unsafe.Pointer(p)).FpPk != 0 && (*TPreUpdate)(unsafe.Pointer(p)).Fop != int32(SQLITE_UPDATE) {
iIdx = int32(_sqlite3TableColumnToIndex(tls, (*TPreUpdate)(unsafe.Pointer(p)).FpPk, int16(iIdx)))
}
if iIdx >= int32((*TVdbeCursor)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpCsr)).FnField) || iIdx < 0 {
rc = int32(SQLITE_RANGE)
goto preupdate_new_out
}
if (*TPreUpdate)(unsafe.Pointer(p)).Fop == int32(SQLITE_INSERT) {
/* For an INSERT, memory cell p->iNewReg contains the serialized record
** that is being inserted. Deserialize it. */
pUnpack = (*TPreUpdate)(unsafe.Pointer(p)).FpNewUnpacked
if !(pUnpack != 0) {
pData = (*TVdbe)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).Fv)).FaMem + uintptr((*TPreUpdate)(unsafe.Pointer(p)).FiNewReg)*56
if int32((*TMem)(unsafe.Pointer(pData)).Fflags)&int32(MEM_Zero) != 0 {
v1 = _sqlite3VdbeMemExpandBlob(tls, pData)
} else {
v1 = 0
}
rc = v1
if rc != SQLITE_OK {
goto preupdate_new_out
}
pUnpack = _vdbeUnpackRecord(tls, p+32, (*TMem)(unsafe.Pointer(pData)).Fn, (*TMem)(unsafe.Pointer(pData)).Fz)
if !(pUnpack != 0) {
rc = int32(SQLITE_NOMEM)
goto preupdate_new_out
}
(*TPreUpdate)(unsafe.Pointer(p)).FpNewUnpacked = pUnpack
}
pMem = (*TUnpackedRecord)(unsafe.Pointer(pUnpack)).FaMem + uintptr(iIdx)*56
if iIdx == int32((*TTable)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpTab)).FiPKey) {
_sqlite3VdbeMemSetInt64(tls, pMem, (*TPreUpdate)(unsafe.Pointer(p)).FiKey2)
} else {
if iIdx >= int32((*TUnpackedRecord)(unsafe.Pointer(pUnpack)).FnField) {
pMem = _columnNullValue(tls)
}
}
} else {
/* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required
** value. Make a copy of the cell contents and return a pointer to it.
** It is not safe to return a pointer to the memory cell itself as the
** caller may modify the value text encoding.
*/
if !((*TPreUpdate)(unsafe.Pointer(p)).FaNew != 0) {
(*TPreUpdate)(unsafe.Pointer(p)).FaNew = _sqlite3DbMallocZero(tls, db, uint64(56)*uint64((*TVdbeCursor)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpCsr)).FnField))
if !((*TPreUpdate)(unsafe.Pointer(p)).FaNew != 0) {
rc = int32(SQLITE_NOMEM)
goto preupdate_new_out
}
}
pMem = (*TPreUpdate)(unsafe.Pointer(p)).FaNew + uintptr(iIdx)*56
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags) == 0 {
if iIdx == int32((*TTable)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).FpTab)).FiPKey) {
_sqlite3VdbeMemSetInt64(tls, pMem, (*TPreUpdate)(unsafe.Pointer(p)).FiKey2)
} else {
rc = _sqlite3VdbeMemCopy(tls, pMem, (*TVdbe)(unsafe.Pointer((*TPreUpdate)(unsafe.Pointer(p)).Fv)).FaMem+uintptr((*TPreUpdate)(unsafe.Pointer(p)).FiNewReg+int32(1)+iIdx)*56)
if rc != SQLITE_OK {
goto preupdate_new_out
}
}
}
}
*(*uintptr)(unsafe.Pointer(ppValue)) = pMem
goto preupdate_new_out
preupdate_new_out:
;
_sqlite3Error(tls, db, rc)
return _sqlite3ApiExit(tls, db, rc)
}
/************** End of vdbeapi.c *********************************************/
/************** Begin file vdbetrace.c ***************************************/
/*
** 2009 November 25
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains code used to insert the values of host parameters
** (aka "wildcards") into the SQL text output by sqlite3_trace().
**
** The Vdbe parse-tree explainer is also found here.
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
// C documentation
//
// /*
// ** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of
// ** bytes in this text up to but excluding the first character in
// ** a host parameter. If the text contains no host parameters, return
// ** the total number of bytes in the text.
// */
func _findNextHostParameter(tls *libc.TLS, zSql uintptr, pnToken uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var n, nTotal int32
var _ /* tokenType at bp+0 */ int32
_, _ = n, nTotal
nTotal = 0
*(*int32)(unsafe.Pointer(pnToken)) = 0
for *(*int8)(unsafe.Pointer(zSql)) != 0 {
n = _sqlite3GetToken(tls, zSql, bp)
if *(*int32)(unsafe.Pointer(bp)) == int32(TK_VARIABLE) {
*(*int32)(unsafe.Pointer(pnToken)) = n
break
}
nTotal += n
zSql += uintptr(n)
}
return nTotal
}
// C documentation
//
// /*
// ** This function returns a pointer to a nul-terminated string in memory
// ** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the
// ** string contains a copy of zRawSql but with host parameters expanded to
// ** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1,
// ** then the returned string holds a copy of zRawSql with "-- " prepended
// ** to each line of text.
// **
// ** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then
// ** then long strings and blobs are truncated to that many bytes. This
// ** can be used to prevent unreasonably large trace strings when dealing
// ** with large (multi-megabyte) strings and blobs.
// **
// ** The calling function is responsible for making sure the memory returned
// ** is eventually freed.
// **
// ** ALGORITHM: Scan the input string looking for host parameters in any of
// ** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within
// ** string literals, quoted identifier names, and comments. For text forms,
// ** the host parameter index is found by scanning the prepared
// ** statement for the corresponding OP_Variable opcode. Once the host
// ** parameter index is known, locate the value in p->aVar[]. Then render
// ** the value as a literal in place of the host parameter name.
// */
func _sqlite3VdbeExpandSql(tls *libc.TLS, p uintptr, zRawSql uintptr) (r uintptr) {
bp := tls.Alloc(128)
defer tls.Free(128)
var db, pVar, zStart, v1 uintptr
var enc Tu8
var i, n, nOut, nOut1, nextIndex, v2 int32
var _ /* idx at bp+0 */ int32
var _ /* nToken at bp+4 */ int32
var _ /* out at bp+8 */ TStrAccum
var _ /* utf8 at bp+40 */ TMem
_, _, _, _, _, _, _, _, _, _, _ = db, enc, i, n, nOut, nOut1, nextIndex, pVar, zStart, v1, v2 /* The database connection */
*(*int32)(unsafe.Pointer(bp)) = 0 /* Index of a host parameter */
nextIndex = int32(1) /* Used to convert UTF16 into UTF8 for display */
db = (*TVdbe)(unsafe.Pointer(p)).Fdb
_sqlite3StrAccumInit(tls, bp+8, uintptr(0), uintptr(0), 0, *(*int32)(unsafe.Pointer(db + 136)))
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec > int32(1) {
for *(*int8)(unsafe.Pointer(zRawSql)) != 0 {
zStart = zRawSql
for {
v1 = zRawSql
zRawSql++
if !(int32(*(*int8)(unsafe.Pointer(v1))) != int32('\n') && *(*int8)(unsafe.Pointer(zRawSql)) != 0) {
break
}
}
Xsqlite3_str_append(tls, bp+8, __ccgo_ts+5548, int32(3))
Xsqlite3_str_append(tls, bp+8, zStart, int32(int64(zRawSql)-int64(zStart)))
}
} else {
if int32((*TVdbe)(unsafe.Pointer(p)).FnVar) == 0 {
Xsqlite3_str_append(tls, bp+8, zRawSql, _sqlite3Strlen30(tls, zRawSql))
} else {
for *(*int8)(unsafe.Pointer(zRawSql)) != 0 {
n = _findNextHostParameter(tls, zRawSql, bp+4)
Xsqlite3_str_append(tls, bp+8, zRawSql, n)
zRawSql += uintptr(n)
if *(*int32)(unsafe.Pointer(bp + 4)) == 0 {
break
}
if int32(*(*int8)(unsafe.Pointer(zRawSql))) == int32('?') {
if *(*int32)(unsafe.Pointer(bp + 4)) > int32(1) {
_sqlite3GetInt32(tls, zRawSql+1, bp)
} else {
*(*int32)(unsafe.Pointer(bp)) = nextIndex
}
} else {
*(*int32)(unsafe.Pointer(bp)) = _sqlite3VdbeParameterIndex(tls, p, zRawSql, *(*int32)(unsafe.Pointer(bp + 4)))
}
zRawSql += uintptr(*(*int32)(unsafe.Pointer(bp + 4)))
if *(*int32)(unsafe.Pointer(bp))+int32(1) > nextIndex {
v2 = *(*int32)(unsafe.Pointer(bp)) + int32(1)
} else {
v2 = nextIndex
}
nextIndex = v2
pVar = (*TVdbe)(unsafe.Pointer(p)).FaVar + uintptr(*(*int32)(unsafe.Pointer(bp))-int32(1))*56
if int32((*TMem)(unsafe.Pointer(pVar)).Fflags)&int32(MEM_Null) != 0 {
Xsqlite3_str_append(tls, bp+8, __ccgo_ts+1651, int32(4))
} else {
if int32((*TMem)(unsafe.Pointer(pVar)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
Xsqlite3_str_appendf(tls, bp+8, __ccgo_ts+1406, libc.VaList(bp+104, *(*Ti64)(unsafe.Pointer(pVar))))
} else {
if int32((*TMem)(unsafe.Pointer(pVar)).Fflags)&int32(MEM_Real) != 0 {
Xsqlite3_str_appendf(tls, bp+8, __ccgo_ts+5069, libc.VaList(bp+104, *(*float64)(unsafe.Pointer(pVar))))
} else {
if int32((*TMem)(unsafe.Pointer(pVar)).Fflags)&int32(MEM_Str) != 0 { /* Number of bytes of the string text to include in output */
enc = (*Tsqlite3)(unsafe.Pointer(db)).Fenc
if int32(enc) != int32(SQLITE_UTF8) {
libc.Xmemset(tls, bp+40, 0, uint64(56))
(*(*TMem)(unsafe.Pointer(bp + 40))).Fdb = db
_sqlite3VdbeMemSetStr(tls, bp+40, (*TMem)(unsafe.Pointer(pVar)).Fz, int64((*TMem)(unsafe.Pointer(pVar)).Fn), enc, libc.UintptrFromInt32(0))
if int32(SQLITE_NOMEM) == _sqlite3VdbeChangeEncoding(tls, bp+40, int32(SQLITE_UTF8)) {
(*(*TStrAccum)(unsafe.Pointer(bp + 8))).FaccError = uint8(SQLITE_NOMEM)
(*(*TStrAccum)(unsafe.Pointer(bp + 8))).FnAlloc = uint32(0)
}
pVar = bp + 40
}
nOut = (*TMem)(unsafe.Pointer(pVar)).Fn
Xsqlite3_str_appendf(tls, bp+8, __ccgo_ts+5552, libc.VaList(bp+104, nOut, (*TMem)(unsafe.Pointer(pVar)).Fz))
if int32(enc) != int32(SQLITE_UTF8) {
_sqlite3VdbeMemRelease(tls, bp+40)
}
} else {
if int32((*TMem)(unsafe.Pointer(pVar)).Fflags)&int32(MEM_Zero) != 0 {
Xsqlite3_str_appendf(tls, bp+8, __ccgo_ts+5559, libc.VaList(bp+104, *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pVar)).Fu))))
} else { /* Number of bytes of the blob to include in output */
Xsqlite3_str_append(tls, bp+8, __ccgo_ts+5572, int32(2))
nOut1 = (*TMem)(unsafe.Pointer(pVar)).Fn
i = 0
for {
if !(i < nOut1) {
break
}
Xsqlite3_str_appendf(tls, bp+8, __ccgo_ts+5575, libc.VaList(bp+104, int32(*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pVar)).Fz + uintptr(i))))&int32(0xff)))
goto _3
_3:
;
i++
}
Xsqlite3_str_append(tls, bp+8, __ccgo_ts+5580, int32(1))
}
}
}
}
}
}
}
}
if (*(*TStrAccum)(unsafe.Pointer(bp + 8))).FaccError != 0 {
Xsqlite3_str_reset(tls, bp+8)
}
return _sqlite3StrAccumFinish(tls, bp+8)
}
/************** End of vdbetrace.c *******************************************/
/************** Begin file vdbe.c ********************************************/
/*
** 2001 September 15
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** The code in this file implements the function that runs the
** bytecode of a prepared statement.
**
** Various scripts scan this source file in order to generate HTML
** documentation, headers files, or other derived files. The formatting
** of the code in this file is, therefore, important. See other comments
** in this file for details. If in doubt, do not deviate from existing
** commenting and indentation practices when changing or adding code.
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
/*
** Invoke this macro on memory cells just prior to changing the
** value of the cell. This macro verifies that shallow copies are
** not misused. A shallow copy of a string or blob just copies a
** pointer to the string or blob, not the content. If the original
** is changed while the copy is still in use, the string or blob might
** be changed out from under the copy. This macro verifies that nothing
** like that ever happens.
*/
/*
** The following global variable is incremented every time a cursor
** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test
** procedures use this information to make sure that indices are
** working correctly. This variable has no function other than to
** help verify the correct operation of the library.
*/
/*
** When this global variable is positive, it gets decremented once before
** each instruction in the VDBE. When it reaches zero, the u1.isInterrupted
** field of the sqlite3 structure is set in order to simulate an interrupt.
**
** This facility is used for testing purposes only. It does not function
** in an ordinary build.
*/
/*
** The next global variable is incremented each type the OP_Sort opcode
** is executed. The test procedures use this information to make sure that
** sorting is occurring or not occurring at appropriate times. This variable
** has no function other than to help verify the correct operation of the
** library.
*/
/*
** The next global variable records the size of the largest MEM_Blob
** or MEM_Str that has been used by a VDBE opcode. The test procedures
** use this information to make sure that the zero-blob functionality
** is working correctly. This variable has no function other than to
** help verify the correct operation of the library.
*/
/*
** This macro evaluates to true if either the update hook or the preupdate
** hook are enabled for database connect DB.
*/
/*
** The next global variable is incremented each time the OP_Found opcode
** is executed. This is used to test whether or not the foreign key
** operation implemented using OP_FkIsZero is working. This variable
** has no function other than to help verify the correct operation of the
** library.
*/
/*
** Test a register to see if it exceeds the current maximum blob size.
** If it does, record the new maximum blob size.
*/
/*
** Invoke the VDBE coverage callback, if that callback is defined. This
** feature is used for test suite validation only and does not appear an
** production builds.
**
** M is the type of branch. I is the direction taken for this instance of
** the branch.
**
** M: 2 - two-way branch (I=0: fall-thru 1: jump )
** 3 - two-way + NULL (I=0: fall-thru 1: jump 2: NULL )
** 4 - OP_Jump (I=0: jump p1 1: jump p2 2: jump p3)
**
** In other words, if M is 2, then I is either 0 (for fall-through) or
** 1 (for when the branch is taken). If M is 3, the I is 0 for an
** ordinary fall-through, I is 1 if the branch was taken, and I is 2
** if the result of comparison is NULL. For M=3, I=2 the jump may or
** may not be taken, depending on the SQLITE_JUMPIFNULL flags in p5.
** When M is 4, that means that an OP_Jump is being run. I is 0, 1, or 2
** depending on if the operands are less than, equal, or greater than.
**
** iSrcLine is the source code line (from the __LINE__ macro) that
** generated the VDBE instruction combined with flag bits. The source
** code line number is in the lower 24 bits of iSrcLine and the upper
** 8 bytes are flags. The lower three bits of the flags indicate
** values for I that should never occur. For example, if the branch is
** always taken, the flags should be 0x05 since the fall-through and
** alternate branch are never taken. If a branch is never taken then
** flags should be 0x06 since only the fall-through approach is allowed.
**
** Bit 0x08 of the flags indicates an OP_Jump opcode that is only
** interested in equal or not-equal. In other words, I==0 and I==2
** should be treated as equivalent
**
** Since only a line number is retained, not the filename, this macro
** only works for amalgamation builds. But that is ok, since these macros
** should be no-ops except for special builds used to measure test coverage.
*/
/*
** An ephemeral string value (signified by the MEM_Ephem flag) contains
** a pointer to a dynamically allocated string where some other entity
** is responsible for deallocating that string. Because the register
** does not control the string, it might be deleted without the register
** knowing it.
**
** This routine converts an ephemeral string into a dynamically allocated
** string that the register itself controls. In other words, it
** converts an MEM_Ephem string into a string with P.z==P.zMalloc.
*/
/* Return true if the cursor was opened using the OP_OpenSorter opcode. */
// C documentation
//
// /*
// ** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL
// ** if we run out of memory.
// */
func _allocateCursor(tls *libc.TLS, p uintptr, iCur int32, nField int32, eCurType Tu8) (r uintptr) {
var nByte, v2 int32
var pCx, pMem, v1, v3, v4 uintptr
_, _, _, _, _, _, _ = nByte, pCx, pMem, v1, v2, v3, v4
if iCur > 0 {
v1 = (*TVdbe)(unsafe.Pointer(p)).FaMem + uintptr((*TVdbe)(unsafe.Pointer(p)).FnMem-iCur)*56
} else {
v1 = (*TVdbe)(unsafe.Pointer(p)).FaMem
}
/* Find the memory cell that will be used to store the blob of memory
** required for this VdbeCursor structure. It is convenient to use a
** vdbe memory cell to manage the memory allocation required for a
** VdbeCursor structure for the following reasons:
**
** * Sometimes cursor numbers are used for a couple of different
** purposes in a vdbe program. The different uses might require
** different sized allocations. Memory cells provide growable
** allocations.
**
** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can
** be freed lazily via the sqlite3_release_memory() API. This
** minimizes the number of malloc calls made by the system.
**
** The memory cell for cursor 0 is aMem[0]. The rest are allocated from
** the top of the register space. Cursor 1 is at Mem[p->nMem-1].
** Cursor 2 is at Mem[p->nMem-2]. And so forth.
*/
pMem = v1
pCx = uintptr(0)
if int32(eCurType) == CURTYPE_BTREE {
v2 = _sqlite3BtreeCursorSize(tls)
} else {
v2 = 0
}
nByte = int32(libc.Uint64FromInt64(128) + libc.Uint64FromInt32(2)*libc.Uint64FromInt64(4)*uint64(nField) + uint64(v2))
if *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(iCur)*8)) != 0 { /*OPTIMIZATION-IF-FALSE*/
_sqlite3VdbeFreeCursorNN(tls, p, *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(iCur)*8)))
*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(iCur)*8)) = uintptr(0)
}
/* There used to be a call to sqlite3VdbeMemClearAndResize() to make sure
** the pMem used to hold space for the cursor has enough storage available
** in pMem->zMalloc. But for the special case of the aMem[] entries used
** to hold cursors, it is faster to in-line the logic. */
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc < nByte {
if (*TMem)(unsafe.Pointer(pMem)).FszMalloc > 0 {
_sqlite3DbFreeNN(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, (*TMem)(unsafe.Pointer(pMem)).FzMalloc)
}
v3 = _sqlite3DbMallocRaw(tls, (*TMem)(unsafe.Pointer(pMem)).Fdb, uint64(nByte))
(*TMem)(unsafe.Pointer(pMem)).FzMalloc = v3
(*TMem)(unsafe.Pointer(pMem)).Fz = v3
if (*TMem)(unsafe.Pointer(pMem)).FzMalloc == uintptr(0) {
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = 0
return uintptr(0)
}
(*TMem)(unsafe.Pointer(pMem)).FszMalloc = nByte
}
v4 = (*TMem)(unsafe.Pointer(pMem)).FzMalloc
pCx = v4
*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr(iCur)*8)) = v4
libc.Xmemset(tls, pCx, 0, uint64(libc.UintptrFromInt32(0)+40))
(*TVdbeCursor)(unsafe.Pointer(pCx)).FeCurType = eCurType
(*TVdbeCursor)(unsafe.Pointer(pCx)).FnField = int16(nField)
(*TVdbeCursor)(unsafe.Pointer(pCx)).FaOffset = pCx + 120 + uintptr(nField)*4
if int32(eCurType) == CURTYPE_BTREE {
*(*uintptr)(unsafe.Pointer(pCx + 48)) = (*TMem)(unsafe.Pointer(pMem)).Fz + uintptr(libc.Uint64FromInt64(128)+libc.Uint64FromInt32(2)*libc.Uint64FromInt64(4)*uint64(nField))
_sqlite3BtreeCursorZero(tls, *(*uintptr)(unsafe.Pointer(pCx + 48)))
}
return pCx
}
// C documentation
//
// /*
// ** The string in pRec is known to look like an integer and to have a
// ** floating point value of rValue. Return true and set *piValue to the
// ** integer value if the string is in range to be an integer. Otherwise,
// ** return false.
// */
func _alsoAnInt(tls *libc.TLS, pRec uintptr, rValue float64, piValue uintptr) (r int32) {
var iValue Ti64
_ = iValue
iValue = _sqlite3RealToI64(tls, rValue)
if _sqlite3RealSameAsInt(tls, rValue, iValue) != 0 {
*(*Ti64)(unsafe.Pointer(piValue)) = iValue
return int32(1)
}
return libc.BoolInt32(0 == _sqlite3Atoi64(tls, (*TMem)(unsafe.Pointer(pRec)).Fz, piValue, (*TMem)(unsafe.Pointer(pRec)).Fn, (*TMem)(unsafe.Pointer(pRec)).Fenc))
}
// C documentation
//
// /*
// ** Try to convert a value into a numeric representation if we can
// ** do so without loss of information. In other words, if the string
// ** looks like a number, convert it into a number. If it does not
// ** look like a number, leave it alone.
// **
// ** If the bTryForInt flag is true, then extra effort is made to give
// ** an integer representation. Strings that look like floating point
// ** values but which have no fractional component (example: '48.00')
// ** will have a MEM_Int representation when bTryForInt is true.
// **
// ** If bTryForInt is false, then if the input string contains a decimal
// ** point or exponential notation, the result is only MEM_Real, even
// ** if there is an exact integer representation of the quantity.
// */
func _applyNumericAffinity(tls *libc.TLS, pRec uintptr, bTryForInt int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var enc Tu8
var rc int32
var p1, p2, p3 uintptr
var _ /* rValue at bp+0 */ float64
_, _, _, _, _ = enc, rc, p1, p2, p3
enc = (*TMem)(unsafe.Pointer(pRec)).Fenc
rc = _sqlite3AtoF(tls, (*TMem)(unsafe.Pointer(pRec)).Fz, bp, (*TMem)(unsafe.Pointer(pRec)).Fn, enc)
if rc <= 0 {
return
}
if rc == int32(1) && _alsoAnInt(tls, pRec, *(*float64)(unsafe.Pointer(bp)), pRec) != 0 {
p1 = pRec + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) | libc.Int32FromInt32(MEM_Int))
} else {
*(*float64)(unsafe.Pointer(pRec)) = *(*float64)(unsafe.Pointer(bp))
p2 = pRec + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Real))
if bTryForInt != 0 {
_sqlite3VdbeIntegerAffinity(tls, pRec)
}
}
/* TEXT->NUMERIC is many->one. Hence, it is important to invalidate the
** string representation after computing a numeric equivalent, because the
** string representation might not be the canonical representation for the
** numeric value. Ticket [343634942dd54ab57b7024] 2018-01-31. */
p3 = pRec + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) & ^libc.Int32FromInt32(MEM_Str))
}
// C documentation
//
// /*
// ** Processing is determine by the affinity parameter:
// **
// ** SQLITE_AFF_INTEGER:
// ** SQLITE_AFF_REAL:
// ** SQLITE_AFF_NUMERIC:
// ** Try to convert pRec to an integer representation or a
// ** floating-point representation if an integer representation
// ** is not possible. Note that the integer representation is
// ** always preferred, even if the affinity is REAL, because
// ** an integer representation is more space efficient on disk.
// **
// ** SQLITE_AFF_FLEXNUM:
// ** If the value is text, then try to convert it into a number of
// ** some kind (integer or real) but do not make any other changes.
// **
// ** SQLITE_AFF_TEXT:
// ** Convert pRec to a text representation.
// **
// ** SQLITE_AFF_BLOB:
// ** SQLITE_AFF_NONE:
// ** No-op. pRec is unchanged.
// */
func _applyAffinity(tls *libc.TLS, pRec uintptr, affinity int8, enc Tu8) {
var p1 uintptr
_ = p1
if int32(affinity) >= int32(SQLITE_AFF_NUMERIC) {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Int) == 0 { /*OPTIMIZATION-IF-FALSE*/
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&(libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)) == 0 {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Str) != 0 {
_applyNumericAffinity(tls, pRec, int32(1))
}
} else {
if int32(affinity) <= int32(SQLITE_AFF_REAL) {
_sqlite3VdbeIntegerAffinity(tls, pRec)
}
}
}
} else {
if int32(affinity) == int32(SQLITE_AFF_TEXT) {
/* Only attempt the conversion to TEXT if there is an integer or real
** representation (blob and NULL do not get converted) but no string
** representation. It would be harmless to repeat the conversion if
** there is already a string rep, but it is pointless to waste those
** CPU cycles. */
if 0 == int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Str) { /*OPTIMIZATION-IF-FALSE*/
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&(libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
_sqlite3VdbeMemStringify(tls, pRec, enc, uint8(1))
}
}
p1 = pRec + 20
*(*Tu16)(unsafe.Pointer(p1)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p1))) & ^(libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_IntReal)))
}
}
}
// C documentation
//
// /*
// ** Try to convert the type of a function argument or a result column
// ** into a numeric representation. Use either INTEGER or REAL whichever
// ** is appropriate. But only do the conversion if it is possible without
// ** loss of information and return the revised type of the argument.
// */
func Xsqlite3_value_numeric_type(tls *libc.TLS, pVal uintptr) (r int32) {
var eType int32
var pMem uintptr
_, _ = eType, pMem
eType = Xsqlite3_value_type(tls, pVal)
if eType == int32(SQLITE_TEXT) {
pMem = pVal
_applyNumericAffinity(tls, pMem, 0)
eType = Xsqlite3_value_type(tls, pVal)
}
return eType
}
// C documentation
//
// /*
// ** Exported version of applyAffinity(). This one works on sqlite3_value*,
// ** not the internal Mem* type.
// */
func _sqlite3ValueApplyAffinity(tls *libc.TLS, pVal uintptr, affinity Tu8, enc Tu8) {
_applyAffinity(tls, pVal, int8(affinity), enc)
}
// C documentation
//
// /*
// ** pMem currently only holds a string type (or maybe a BLOB that we can
// ** interpret as a string if we want to). Compute its corresponding
// ** numeric type, if has one. Set the pMem->u.r and pMem->u.i fields
// ** accordingly.
// */
func _computeNumericType(tls *libc.TLS, pMem uintptr) (r Tu16) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc, v1 int32
var _ /* ix at bp+0 */ Tsqlite3_int64
_, _ = rc, v1
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&int32(MEM_Zero) != 0 {
v1 = _sqlite3VdbeMemExpandBlob(tls, pMem)
} else {
v1 = 0
}
if v1 != 0 {
*(*Ti64)(unsafe.Pointer(pMem)) = 0
return uint16(MEM_Int)
}
rc = _sqlite3AtoF(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, pMem, (*TMem)(unsafe.Pointer(pMem)).Fn, (*TMem)(unsafe.Pointer(pMem)).Fenc)
if rc <= 0 {
if rc == 0 && _sqlite3Atoi64(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, bp, (*TMem)(unsafe.Pointer(pMem)).Fn, (*TMem)(unsafe.Pointer(pMem)).Fenc) <= int32(1) {
*(*Ti64)(unsafe.Pointer(pMem)) = *(*Tsqlite3_int64)(unsafe.Pointer(bp))
return uint16(MEM_Int)
} else {
return uint16(MEM_Real)
}
} else {
if rc == int32(1) && _sqlite3Atoi64(tls, (*TMem)(unsafe.Pointer(pMem)).Fz, bp, (*TMem)(unsafe.Pointer(pMem)).Fn, (*TMem)(unsafe.Pointer(pMem)).Fenc) == 0 {
*(*Ti64)(unsafe.Pointer(pMem)) = *(*Tsqlite3_int64)(unsafe.Pointer(bp))
return uint16(MEM_Int)
}
}
return uint16(MEM_Real)
}
// C documentation
//
// /*
// ** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or
// ** none.
// **
// ** Unlike applyNumericAffinity(), this routine does not modify pMem->flags.
// ** But it does set pMem->u.r and pMem->u.i appropriately.
// */
func _numericType(tls *libc.TLS, pMem uintptr) (r Tu16) {
if int32((*TMem)(unsafe.Pointer(pMem)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Null)) != 0 {
return uint16(int32((*TMem)(unsafe.Pointer(pMem)).Fflags) & (libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal) | libc.Int32FromInt32(MEM_Null)))
}
return _computeNumericType(tls, pMem)
return uint16(0)
}
// C documentation
//
// /*
// ** Return the register of pOp->p2 after first preparing it to be
// ** overwritten with an integer value.
// */
func _out2PrereleaseWithClear(tls *libc.TLS, pOut uintptr) (r uintptr) {
_sqlite3VdbeMemSetNull(tls, pOut)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Int)
return pOut
}
func _out2Prerelease(tls *libc.TLS, p uintptr, pOp uintptr) (r uintptr) {
var pOut uintptr
_ = pOut
pOut = (*TVdbe)(unsafe.Pointer(p)).FaMem + uintptr((*TVdbeOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 { /*OPTIMIZATION-IF-FALSE*/
return _out2PrereleaseWithClear(tls, pOut)
} else {
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Int)
return pOut
}
return r
}
// C documentation
//
// /*
// ** Compute a bloom filter hash using pOp->p4.i registers from aMem[] beginning
// ** with pOp->p3. Return the hash.
// */
func _filterHash(tls *libc.TLS, aMem uintptr, pOp uintptr) (r Tu64) {
var h Tu64
var i, mx int32
var p uintptr
_, _, _, _ = h, i, mx, p
h = uint64(0)
i = (*TOp)(unsafe.Pointer(pOp)).Fp3
mx = i + (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi
for {
if !(i < mx) {
break
}
p = aMem + uintptr(i)*56
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
h += uint64(*(*Ti64)(unsafe.Pointer(p)))
} else {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&int32(MEM_Real) != 0 {
h += uint64(_sqlite3VdbeIntValue(tls, p))
} else {
if int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) != 0 {
/* All strings have the same hash and all blobs have the same hash,
** though, at least, those hashes are different from each other and
** from NULL. */
h += uint64(int32(4093) + int32((*TMem)(unsafe.Pointer(p)).Fflags)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)))
}
}
}
goto _1
_1:
;
i++
}
return h
}
// C documentation
//
// /*
// ** For OP_Column, factor out the case where content is loaded from
// ** overflow pages, so that the code to implement this case is separate
// ** the common case where all content fits on the page. Factoring out
// ** the code reduces register pressure and helps the common case
// ** to run faster.
// */
func _vdbeColumnFromOverflow(tls *libc.TLS, pC uintptr, iCol int32, t int32, iOffset Ti64, cacheStatus Tu32, colCacheCtr Tu32, pDest uintptr) (r int32) {
var db, pBuf, pCache, v1, p2, p3, p4 uintptr
var encoding, len1, rc int32
_, _, _, _, _, _, _, _, _, _ = db, encoding, len1, pBuf, pCache, rc, v1, p2, p3, p4
db = (*TMem)(unsafe.Pointer(pDest)).Fdb
encoding = int32((*TMem)(unsafe.Pointer(pDest)).Fenc)
len1 = int32(_sqlite3VdbeSerialTypeLen(tls, uint32(t)))
if len1 > *(*int32)(unsafe.Pointer(db + 136)) {
return int32(SQLITE_TOOBIG)
}
if len1 > int32(4000) && (*TVdbeCursor)(unsafe.Pointer(pC)).FpKeyInfo == uintptr(0) {
if int32(TBool(*(*uint8)(unsafe.Pointer(pC + 8))&0x10>>4)) == 0 {
(*TVdbeCursor)(unsafe.Pointer(pC)).FpCache = _sqlite3DbMallocZero(tls, db, uint64(32))
if (*TVdbeCursor)(unsafe.Pointer(pC)).FpCache == uintptr(0) {
return int32(SQLITE_NOMEM)
}
libc.SetBitFieldPtr8Uint32(pC+8, libc.Uint32FromInt32(1), 4, 0x10)
}
pCache = (*TVdbeCursor)(unsafe.Pointer(pC)).FpCache
if (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue == uintptr(0) || (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FiCol != iCol || (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FcacheStatus != cacheStatus || (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FcolCacheCtr != colCacheCtr || (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FiOffset != _sqlite3BtreeOffset(tls, *(*uintptr)(unsafe.Pointer(pC + 48))) {
if (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue != 0 {
_sqlite3RCStrUnref(tls, (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue)
}
v1 = _sqlite3RCStrNew(tls, uint64(len1+int32(3)))
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue = v1
pBuf = v1
if pBuf == uintptr(0) {
return int32(SQLITE_NOMEM)
}
rc = _sqlite3BtreePayload(tls, *(*uintptr)(unsafe.Pointer(pC + 48)), uint32(iOffset), uint32(len1), pBuf)
if rc != 0 {
return rc
}
*(*int8)(unsafe.Pointer(pBuf + uintptr(len1))) = 0
*(*int8)(unsafe.Pointer(pBuf + uintptr(len1+int32(1)))) = 0
*(*int8)(unsafe.Pointer(pBuf + uintptr(len1+int32(2)))) = 0
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FiCol = iCol
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FcacheStatus = cacheStatus
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FcolCacheCtr = colCacheCtr
(*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FiOffset = _sqlite3BtreeOffset(tls, *(*uintptr)(unsafe.Pointer(pC + 48)))
} else {
pBuf = (*TVdbeTxtBlbCache)(unsafe.Pointer(pCache)).FpCValue
}
_sqlite3RCStrRef(tls, pBuf)
if t&int32(1) != 0 {
rc = _sqlite3VdbeMemSetStr(tls, pDest, pBuf, int64(len1), uint8(encoding), __ccgo_fp(_sqlite3RCStrUnref))
p2 = pDest + 20
*(*Tu16)(unsafe.Pointer(p2)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p2))) | libc.Int32FromInt32(MEM_Term))
} else {
rc = _sqlite3VdbeMemSetStr(tls, pDest, pBuf, int64(len1), uint8(0), __ccgo_fp(_sqlite3RCStrUnref))
}
} else {
rc = _sqlite3VdbeMemFromBtree(tls, *(*uintptr)(unsafe.Pointer(pC + 48)), uint32(iOffset), uint32(len1), pDest)
if rc != 0 {
return rc
}
_sqlite3VdbeSerialGet(tls, (*TMem)(unsafe.Pointer(pDest)).Fz, uint32(t), pDest)
if t&int32(1) != 0 && encoding == int32(SQLITE_UTF8) {
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pDest)).Fz + uintptr(len1))) = 0
p3 = pDest + 20
*(*Tu16)(unsafe.Pointer(p3)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p3))) | libc.Int32FromInt32(MEM_Term))
}
}
p4 = pDest + 20
*(*Tu16)(unsafe.Pointer(p4)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p4))) & ^libc.Int32FromInt32(MEM_Ephem))
return rc
}
// C documentation
//
// /*
// ** Return the symbolic name for the data type of a pMem
// */
func _vdbeMemTypeName(tls *libc.TLS, pMem uintptr) (r uintptr) {
return _azTypes[Xsqlite3_value_type(tls, pMem)-int32(1)]
}
var _azTypes = [5]uintptr{
0: __ccgo_ts + 1134,
1: __ccgo_ts + 1146,
2: __ccgo_ts + 1151,
3: __ccgo_ts + 1129,
4: __ccgo_ts + 1651,
}
// C documentation
//
// /*
// ** Execute as much of a VDBE program as we can.
// ** This is the core of sqlite3_step().
// */
func _sqlite3VdbeExec(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(976)
defer tls.Free(976)
var aCol, aMem, aOffset, aOp, aPermute, aRoot, apArg, apArg1, db, pArgc, pBt, pBt1, pBt2, pBt3, pC, pC1, pC10, pC11, pC12, pC13, pC14, pC15, pC16, pC17, pC18, pC19, pC2, pC20, pC21, pC22, pC23, pC24, pC25, pC26, pC27, pC28, pC29, pC3, pC30, pC31, pC32, pC4, pC5, pC6, pC7, pC8, pC9, pCaller, pColl, pCrsr, pCrsr1, pCrsr2, pCrsr3, pCrsr4, pCrsr5, pCrsr6, pCrsr7, pCtx, pCtx1, pCtx2, pCur, pCur1, pCur2, pCur3, pCur4, pCur5, pCur6, pCx, pCx1, pCx2, pCx3, pData, pData0, pDb, pDb1, pDb2, pDb3, pDest, pDest1, pDest2, pEnd, pFrame, pFrame1, pFrame2, pFrame3, pFrame4, pIdxKey, pIn, pIn1, pIn2, pIn3, pKey, pKeyInfo, pKeyInfo1, pKeyInfo2, pLast, pMem, pMem1, pMem2, pMem3, pMem4, pModule, pModule1, pModule2, pModule3, pModule4, pModule5, pModule6, pName, pNew, pOp, pOrig, pOut, pPager, pProgram, pQuery, pRec, pReg, pRhs, pRt, pSavepoint, pSrc, pTab, pTab1, pTab2, pTab3, pTabCur, pTmp, pVCur1, pVTab, pVar, pVtab, pVtab1, pVtab2, pVtab3, pVtab4, pVtab5, pVtab6, pVtab7, pX, pX1, pnErr, t1, z1, z2, z3, zAffinity, zAffinity1, zData, zDb, zDb1, zEndHdr, zFilename, zHdr, zHdr1, zName, zPayload, zSchema, zSql, zTab, zTrace, v240, v241, v242, v250, v251, v252, v253, v255, v263, v278, v279, v286, v288, v297, v298, p189, p192, p193, p196, p199, p208, p209, p212, p227, p228, p229, p230, p231, p232, p233, p234, p235, p236, p237, p238, p239, p293, p294, p295, p296 uintptr
var affinity int8
var alreadyExists, bRev, c, c1, c2, cnt, cnt1, desiredAutoCommit, eNew, eOld, eqOnly, exists, i, i1, i2, i4, i5, i6, i7, i8, i9, iCompare, iCookie, iDb, iDb1, iDb2, iDb3, iQuery, iRollback, iSavepoint, iSet, ii, ii1, isLegacy, isSchemaChange, isTransaction, len1, n, n1, n2, n4, nArg, nArg1, nByte2, nField, nField1, nField2, nHdr, nKeyCol, nMem, nName, nRoot, nStep, nVarint, oc, opflags, p1, p11, p12, p13, p2, p21, pcDest, pcx, rc, res, res10, res11, res12, res21, seekResult, v11, v21, wrFlag, v188, v191, v194, v197, v198, v205, v206, v207, v223, v224, v244, v245, v248, v249, v256, v260, v265, v266, v269, v270, v272, v273, v274, v275, v284, v287, v291 int32
var colCacheCtr, iAddr, iMap, iPrior, idx, len11, n3, p22, p23, serialType, serial_type, v213, v214, v216, v221, v222 Tu32
var encoding, isWriteLock, mTrace, op, p5, resetSchemaOnFault, vtabOnConflict, v225 Tu8
var flags1, flags11, flags2, flags3, flags31, newType, nullFlag, type1, type2, typeMask, v190 Tu16
var h, h1, iKey1, nData, nProgressLimit, nVmStep, offset64, uu Tu64
var i3, iA, iB1, iKey, iKey2, nByte, nByte1, nCellKey, nZero, sz, v254, v262 Ti64
var newMax, v211 uint32
var rA, rB float64
var xAuth Tsqlite3_xauth
var v204, v271, v281 int64
var v215, v267, v282, v299 bool
var _ /* aRes at bp+712 */ [3]int32
var _ /* iA at bp+8 */ Ti64
var _ /* iB at bp+0 */ Ti64
var _ /* iMeta at bp+104 */ int32
var _ /* iMeta at bp+108 */ int32
var _ /* iMoved at bp+608 */ int32
var _ /* initData at bp+640 */ TInitData
var _ /* m at bp+552 */ TMem
var _ /* nChange at bp+616 */ Ti64
var _ /* nEntry at bp+96 */ Ti64
var _ /* nErr at bp+680 */ int32
var _ /* nullFunc at bp+856 */ TFuncDef
var _ /* pVCur at bp+784 */ uintptr
var _ /* pgno at bp+624 */ TPgno
var _ /* r at bp+120 */ TUnpackedRecord
var _ /* r at bp+168 */ TUnpackedRecord
var _ /* r at bp+208 */ TUnpackedRecord
var _ /* r at bp+464 */ TUnpackedRecord
var _ /* r at bp+512 */ TUnpackedRecord
var _ /* res at bp+112 */ int32
var _ /* res at bp+160 */ int32
var _ /* res at bp+248 */ int32
var _ /* res at bp+320 */ int32
var _ /* res at bp+376 */ int32
var _ /* res at bp+392 */ int32
var _ /* res at bp+396 */ int32
var _ /* res at bp+400 */ int32
var _ /* res at bp+456 */ int32
var _ /* rowid at bp+504 */ Ti64
var _ /* rowid at bp+928 */ Tsqlite_int64
var _ /* sContext at bp+800 */ Tsqlite3_context
var _ /* sMem at bp+24 */ TMem
var _ /* sMem at bp+728 */ TMem
var _ /* t at bp+80 */ Tu32
var _ /* uA at bp+16 */ Tu64
var _ /* v at bp+312 */ Ti64
var _ /* v at bp+384 */ Ti64
var _ /* v at bp+88 */ Tu64
var _ /* val at bp+696 */ Ti64
var _ /* x at bp+256 */ TMem
var _ /* x at bp+328 */ TBtreePayload
var _ /* x at bp+408 */ TBtreePayload
var _ /* x at bp+704 */ Ti64
var _ /* z at bp+688 */ uintptr
var _ /* zErr at bp+632 */ uintptr
var _ /* zErr at bp+792 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = aCol, aMem, aOffset, aOp, aPermute, aRoot, affinity, alreadyExists, apArg, apArg1, bRev, c, c1, c2, cnt, cnt1, colCacheCtr, db, desiredAutoCommit, eNew, eOld, encoding, eqOnly, exists, flags1, flags11, flags2, flags3, flags31, h, h1, i, i1, i2, i3, i4, i5, i6, i7, i8, i9, iA, iAddr, iB1, iCompare, iCookie, iDb, iDb1, iDb2, iDb3, iKey, iKey1, iKey2, iMap, iPrior, iQuery, iRollback, iSavepoint, iSet, idx, ii, ii1, isLegacy, isSchemaChange, isTransaction, isWriteLock, len1, len11, mTrace, n, n1, n2, n3, n4, nArg, nArg1, nByte, nByte1, nByte2, nCellKey, nData, nField, nField1, nField2, nHdr, nKeyCol, nMem, nName, nProgressLimit, nRoot, nStep, nVarint, nVmStep, nZero, newMax, newType, nullFlag, oc, offset64, op, opflags, p1, p11, p12, p13, p2, p21, p22, p23, p5, pArgc, pBt, pBt1, pBt2, pBt3, pC, pC1, pC10, pC11, pC12, pC13, pC14, pC15, pC16, pC17, pC18, pC19, pC2, pC20, pC21, pC22, pC23, pC24, pC25, pC26, pC27, pC28, pC29, pC3, pC30, pC31, pC32, pC4, pC5, pC6, pC7, pC8, pC9, pCaller, pColl, pCrsr, pCrsr1, pCrsr2, pCrsr3, pCrsr4, pCrsr5, pCrsr6, pCrsr7, pCtx, pCtx1, pCtx2, pCur, pCur1, pCur2, pCur3, pCur4, pCur5, pCur6, pCx, pCx1, pCx2, pCx3, pData, pData0, pDb, pDb1, pDb2, pDb3, pDest, pDest1, pDest2, pEnd, pFrame, pFrame1, pFrame2, pFrame3, pFrame4, pIdxKey, pIn, pIn1, pIn2, pIn3, pKey, pKeyInfo, pKeyInfo1, pKeyInfo2, pLast, pMem, pMem1, pMem2, pMem3, pMem4, pModule, pModule1, pModule2, pModule3, pModule4, pModule5, pModule6, pName, pNew, pOp, pOrig, pOut, pPager, pProgram, pQuery, pRec, pReg, pRhs, pRt, pSavepoint, pSrc, pTab, pTab1, pTab2, pTab3, pTabCur, pTmp, pVCur1, pVTab, pVar, pVtab, pVtab1, pVtab2, pVtab3, pVtab4, pVtab5, pVtab6, pVtab7, pX, pX1, pcDest, pcx, pnErr, rA, rB, rc, res, res10, res11, res12, res21, resetSchemaOnFault, seekResult, serialType, serial_type, sz, t1, type1, type2, typeMask, uu, v11, v21, vtabOnConflict, wrFlag, xAuth, z1, z2, z3, zAffinity, zAffinity1, zData, zDb, zDb1, zEndHdr, zFilename, zHdr, zHdr1, zName, zPayload, zSchema, zSql, zTab, zTrace, v188, v190, v191, v194, v197, v198, v204, v205, v206, v207, v211, v213, v214, v215, v216, v221, v222, v223, v224, v225, v240, v241, v242, v244, v245, v248, v249, v250, v251, v252, v253, v254, v255, v256, v260, v262, v263, v265, v266, v267, v269, v270, v271, v272, v273, v274, v275, v278, v279, v281, v282, v284, v286, v287, v288, v291, v297, v298, v299, p189, p192, p193, p196, p199, p208, p209, p212, p227, p228, p229, p230, p231, p232, p233, p234, p235, p236, p237, p238, p239, p293, p294, p295, p296
aOp = (*TVdbe)(unsafe.Pointer(p)).FaOp /* Copy of p->aOp */
pOp = aOp /* Current operation */
rc = SQLITE_OK /* Value to return */
db = (*TVdbe)(unsafe.Pointer(p)).Fdb /* The database */
resetSchemaOnFault = uint8(0) /* Reset schema after an error if positive */
encoding = (*Tsqlite3)(unsafe.Pointer(db)).Fenc /* The database encoding */
iCompare = 0 /* Result of last comparison */
nVmStep = uint64(0) /* Invoke xProgress() when nVmStep reaches this */
aMem = (*TVdbe)(unsafe.Pointer(p)).FaMem /* Copy of p->aMem */
pIn1 = uintptr(0) /* 1st input operand */
pIn2 = uintptr(0) /* 2nd input operand */
pIn3 = uintptr(0) /* 3rd input operand */
pOut = uintptr(0) /* Output operand */
colCacheCtr = uint32(0) /* Column cache counter */
/*** INSERT STACK UNION HERE ***/
/* sqlite3_step() verifies this */
if (*TVdbe)(unsafe.Pointer(p)).FlockMask != uint32(0) {
_sqlite3VdbeEnter(tls, p)
}
if (*Tsqlite3)(unsafe.Pointer(db)).FxProgress != 0 {
iPrior = *(*Tu32)(unsafe.Pointer(p + 212 + 4*4))
nProgressLimit = uint64((*Tsqlite3)(unsafe.Pointer(db)).FnProgressOps - iPrior%(*Tsqlite3)(unsafe.Pointer(db)).FnProgressOps)
} else {
nProgressLimit = libc.Uint64FromUint32(0xffffffff) | libc.Uint64FromUint32(0xffffffff)<= nProgressLimit && (*Tsqlite3)(unsafe.Pointer(db)).FxProgress != uintptr(0) {
nProgressLimit += uint64((*Tsqlite3)(unsafe.Pointer(db)).FnProgressOps)
if (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxProgress})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpProgressArg) != 0 {
nProgressLimit = libc.Uint64FromUint32(0xffffffff) | libc.Uint64FromUint32(0xffffffff)< *(*int32)(unsafe.Pointer(db + 136)) {
goto too_big
}
(*TOp)(unsafe.Pointer(pOp)).Fopcode = uint8(OP_String)
/* Fall through to the next case, OP_String */
/* Opcode: String P1 P2 P3 P4 P5
** Synopsis: r[P2]='P4' (len=P1)
**
** The string value P4 of length P1 (bytes) is stored in register P2.
**
** If P3 is not zero and the content of register P3 is equal to P5, then
** the datatype of the register P2 is converted to BLOB. The content is
** the same sequence of bytes, it is merely interpreted as a BLOB instead
** of a string, as if it had been CAST. In other words:
**
** if( P3!=0 and reg[P3]==P5 ) reg[P2] := CAST(reg[P2] as BLOB)
*/
_14:
; /* out2 */
pOut = _out2Prerelease(tls, p, pOp)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(libc.Int32FromInt32(MEM_Str) | libc.Int32FromInt32(MEM_Static) | libc.Int32FromInt32(MEM_Term))
(*TMem)(unsafe.Pointer(pOut)).Fz = *(*uintptr)(unsafe.Pointer(pOp + 16))
(*TMem)(unsafe.Pointer(pOut)).Fn = (*TOp)(unsafe.Pointer(pOp)).Fp1
(*TMem)(unsafe.Pointer(pOut)).Fenc = encoding
goto _187
/* Opcode: BeginSubrtn * P2 * * *
** Synopsis: r[P2]=NULL
**
** Mark the beginning of a subroutine that can be entered in-line
** or that can be called using OP_Gosub. The subroutine should
** be terminated by an OP_Return instruction that has a P1 operand that
** is the same as the P2 operand to this opcode and that has P3 set to 1.
** If the subroutine is entered in-line, then the OP_Return will simply
** fall through. But if the subroutine is entered using OP_Gosub, then
** the OP_Return will jump back to the first instruction after the OP_Gosub.
**
** This routine works by loading a NULL into the P2 register. When the
** return address register contains a NULL, the OP_Return instruction is
** a no-op that simply falls through to the next instruction (assuming that
** the OP_Return opcode has a P3 value of 1). Thus if the subroutine is
** entered in-line, then the OP_Return will cause in-line execution to
** continue. But if the subroutine is entered via OP_Gosub, then the
** OP_Return will cause a return to the address following the OP_Gosub.
**
** This opcode is identical to OP_Null. It has a different name
** only to make the byte code easier to read and verify.
*/
/* Opcode: Null P1 P2 P3 * *
** Synopsis: r[P2..P3]=NULL
**
** Write a NULL into registers P2. If P3 greater than P2, then also write
** NULL into register P3 and every register in between P2 and P3. If P3
** is less than P2 (typically P3 is zero) then only register P2 is
** set to NULL.
**
** If the P1 value is non-zero, then also set the MEM_Cleared flag so that
** NULL values will not compare equal even if SQLITE_NULLEQ is set on
** OP_Ne or OP_Eq.
*/
_16:
;
_15:
;
pOut = _out2Prerelease(tls, p, pOp)
cnt = (*TOp)(unsafe.Pointer(pOp)).Fp3 - (*TOp)(unsafe.Pointer(pOp)).Fp2
if (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
v191 = libc.Int32FromInt32(MEM_Null) | libc.Int32FromInt32(MEM_Cleared)
} else {
v191 = int32(MEM_Null)
}
v190 = uint16(v191)
nullFlag = v190
(*TMem)(unsafe.Pointer(pOut)).Fflags = v190
(*TMem)(unsafe.Pointer(pOut)).Fn = 0
for cnt > 0 {
pOut += 56
_sqlite3VdbeMemSetNull(tls, pOut)
(*TMem)(unsafe.Pointer(pOut)).Fflags = nullFlag
(*TMem)(unsafe.Pointer(pOut)).Fn = 0
cnt--
}
goto _187
/* Opcode: SoftNull P1 * * * *
** Synopsis: r[P1]=NULL
**
** Set register P1 to have the value NULL as seen by the OP_MakeRecord
** instruction, but do not free any string or blob memory associated with
** the register, so that if the value was a string or blob that was
** previously copied using OP_SCopy, the copies will continue to be valid.
*/
_17:
;
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_Undefined)|libc.Int32FromInt32(MEM_AffMask)) | int32(MEM_Null))
goto _187
/* Opcode: Blob P1 P2 * P4 *
** Synopsis: r[P2]=P4 (len=P1)
**
** P4 points to a blob of data P1 bytes long. Store this
** blob in register P2. If P4 is a NULL pointer, then construct
** a zero-filled blob that is P1 bytes long in P2.
*/
_18:
; /* out2 */
pOut = _out2Prerelease(tls, p, pOp)
if *(*uintptr)(unsafe.Pointer(pOp + 16)) == uintptr(0) {
_sqlite3VdbeMemSetZeroBlob(tls, pOut, (*TOp)(unsafe.Pointer(pOp)).Fp1)
if _sqlite3VdbeMemExpandBlob(tls, pOut) != 0 {
goto no_mem
}
} else {
_sqlite3VdbeMemSetStr(tls, pOut, *(*uintptr)(unsafe.Pointer(pOp + 16)), int64((*TOp)(unsafe.Pointer(pOp)).Fp1), uint8(0), uintptr(0))
}
(*TMem)(unsafe.Pointer(pOut)).Fenc = encoding
goto _187
/* Opcode: Variable P1 P2 * P4 *
** Synopsis: r[P2]=parameter(P1,P4)
**
** Transfer the values of bound parameter P1 into register P2
**
** If the parameter is named, then its name appears in P4.
** The P4 value is used by sqlite3_bind_parameter_name().
*/
_19:
; /* Value being transferred */
pVar = (*TVdbe)(unsafe.Pointer(p)).FaVar + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1-int32(1))*56
if _sqlite3VdbeMemTooBig(tls, pVar) != 0 {
goto too_big
}
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_sqlite3VdbeMemSetNull(tls, pOut)
}
libc.Xmemcpy(tls, pOut, pVar, uint64(libc.UintptrFromInt32(0)+24))
p192 = pOut + 20
*(*Tu16)(unsafe.Pointer(p192)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p192))) & ^(libc.Int32FromInt32(MEM_Dyn) | libc.Int32FromInt32(MEM_Ephem)))
p193 = pOut + 20
*(*Tu16)(unsafe.Pointer(p193)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p193))) | (libc.Int32FromInt32(MEM_Static) | libc.Int32FromInt32(MEM_FromBind)))
goto _187
/* Opcode: Move P1 P2 P3 * *
** Synopsis: r[P2@P3]=r[P1@P3]
**
** Move the P3 values in register P1..P1+P3-1 over into
** registers P2..P2+P3-1. Registers P1..P1+P3-1 are
** left holding a NULL. It is an error for register ranges
** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error
** for P3 to be less than 1.
*/
_20:
; /* Register to copy to */
n = (*TOp)(unsafe.Pointer(pOp)).Fp3
p1 = (*TOp)(unsafe.Pointer(pOp)).Fp1
p2 = (*TOp)(unsafe.Pointer(pOp)).Fp2
pIn1 = aMem + uintptr(p1)*56
pOut = aMem + uintptr(p2)*56
for {
_sqlite3VdbeMemMove(tls, pOut, pIn1)
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Ephem) != 0 && _sqlite3VdbeMemMakeWriteable(tls, pOut) != 0 {
goto no_mem
}
pIn1 += 56
pOut += 56
goto _195
_195:
;
n--
v194 = n
if !(v194 != 0) {
break
}
}
goto _187
/* Opcode: Copy P1 P2 P3 * P5
** Synopsis: r[P2@P3+1]=r[P1@P3+1]
**
** Make a copy of registers P1..P1+P3 into registers P2..P2+P3.
**
** If the 0x0002 bit of P5 is set then also clear the MEM_Subtype flag in the
** destination. The 0x0001 bit of P5 indicates that this Copy opcode cannot
** be merged. The 0x0001 bit is used by the query planner and does not
** come into play during query execution.
**
** This instruction makes a deep copy of the value. A duplicate
** is made of any string or blob constant. See also OP_SCopy.
*/
_21:
;
n1 = (*TOp)(unsafe.Pointer(pOp)).Fp3
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
for int32(1) != 0 {
_sqlite3VdbeMemShallowCopy(tls, pOut, pIn1, int32(MEM_Ephem))
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Ephem) != 0 && _sqlite3VdbeMemMakeWriteable(tls, pOut) != 0 {
goto no_mem
}
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Subtype) != 0 && int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(0x0002) != 0 {
p196 = pOut + 20
*(*Tu16)(unsafe.Pointer(p196)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p196))) & ^libc.Int32FromInt32(MEM_Subtype))
}
v197 = n1
n1--
if v197 == 0 {
break
}
pOut += 56
pIn1 += 56
}
goto _187
/* Opcode: SCopy P1 P2 * * *
** Synopsis: r[P2]=r[P1]
**
** Make a shallow copy of register P1 into register P2.
**
** This instruction makes a shallow copy of the value. If the value
** is a string or blob, then the copy is only a pointer to the
** original and hence if the original changes so will the copy.
** Worse, if the original is deallocated, the copy becomes invalid.
** Thus the program must guarantee that the original will not change
** during the lifetime of the copy. Use OP_Copy to make a complete
** copy.
*/
_22:
; /* out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
_sqlite3VdbeMemShallowCopy(tls, pOut, pIn1, int32(MEM_Ephem))
goto _187
/* Opcode: IntCopy P1 P2 * * *
** Synopsis: r[P2]=r[P1]
**
** Transfer the integer value held in register P1 into register P2.
**
** This is an optimized version of SCopy that works only for integer
** values.
*/
_23:
; /* out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
_sqlite3VdbeMemSetInt64(tls, pOut, *(*Ti64)(unsafe.Pointer(pIn1)))
goto _187
/* Opcode: FkCheck * * * * *
**
** Halt with an SQLITE_CONSTRAINT error if there are any unresolved
** foreign key constraint violations. If there are no foreign key
** constraint violations, this is a no-op.
**
** FK constraint violations are also checked when the prepared statement
** exits. This opcode is used to raise foreign key constraint errors prior
** to returning results such as a row change count or the result of a
** RETURNING clause.
*/
_24:
;
v198 = _sqlite3VdbeCheckFk(tls, p, 0)
rc = v198
if v198 != SQLITE_OK {
goto abort_due_to_error
}
goto _187
/* Opcode: ResultRow P1 P2 * * *
** Synopsis: output=r[P1@P2]
**
** The registers P1 through P1+P2-1 contain a single row of
** results. This opcode causes the sqlite3_step() call to terminate
** with an SQLITE_ROW return code and it sets up the sqlite3_stmt
** structure to provide access to the r(P1)..r(P1+P2-1) values as
** the result row.
*/
_25:
;
(*TVdbe)(unsafe.Pointer(p)).FcacheCtr = (*TVdbe)(unsafe.Pointer(p)).FcacheCtr + uint32(2) | uint32(1)
(*TVdbe)(unsafe.Pointer(p)).FpResultRow = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
goto no_mem
}
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmTrace)&int32(SQLITE_TRACE_ROW) != 0 {
(*(*func(*libc.TLS, Tu32, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{*(*uintptr)(unsafe.Pointer(&(*Tsqlite3)(unsafe.Pointer(db)).Ftrace))})))(tls, uint32(SQLITE_TRACE_ROW), (*Tsqlite3)(unsafe.Pointer(db)).FpTraceArg, p, uintptr(0))
}
(*TVdbe)(unsafe.Pointer(p)).Fpc = int32((int64(pOp)-int64(aOp))/24) + int32(1)
rc = int32(SQLITE_ROW)
goto vdbe_return
/* Opcode: Concat P1 P2 P3 * *
** Synopsis: r[P3]=r[P2]+r[P1]
**
** Add the text in register P1 onto the end of the text in
** register P2 and store the result in register P3.
** If either the P1 or P2 text are NULL then store NULL in P3.
**
** P3 = P2 || P1
**
** It is illegal for P1 and P3 to be the same register. Sometimes,
** if P3 is the same register as P2, the implementation is able
** to avoid a memcpy().
*/
_26:
; /* Initial flags for P2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
flags1 = (*TMem)(unsafe.Pointer(pIn1)).Fflags
if (int32(flags1)|int32((*TMem)(unsafe.Pointer(pIn2)).Fflags))&int32(MEM_Null) != 0 {
_sqlite3VdbeMemSetNull(tls, pOut)
goto _187
}
if int32(flags1)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) == 0 {
if _sqlite3VdbeMemStringify(tls, pIn1, encoding, uint8(0)) != 0 {
goto no_mem
}
flags1 = uint16(int32((*TMem)(unsafe.Pointer(pIn1)).Fflags) & ^libc.Int32FromInt32(MEM_Str))
} else {
if int32(flags1)&int32(MEM_Zero) != 0 {
if _sqlite3VdbeMemExpandBlob(tls, pIn1) != 0 {
goto no_mem
}
flags1 = uint16(int32((*TMem)(unsafe.Pointer(pIn1)).Fflags) & ^libc.Int32FromInt32(MEM_Str))
}
}
flags2 = (*TMem)(unsafe.Pointer(pIn2)).Fflags
if int32(flags2)&(libc.Int32FromInt32(MEM_Str)|libc.Int32FromInt32(MEM_Blob)) == 0 {
if _sqlite3VdbeMemStringify(tls, pIn2, encoding, uint8(0)) != 0 {
goto no_mem
}
flags2 = uint16(int32((*TMem)(unsafe.Pointer(pIn2)).Fflags) & ^libc.Int32FromInt32(MEM_Str))
} else {
if int32(flags2)&int32(MEM_Zero) != 0 {
if _sqlite3VdbeMemExpandBlob(tls, pIn2) != 0 {
goto no_mem
}
flags2 = uint16(int32((*TMem)(unsafe.Pointer(pIn2)).Fflags) & ^libc.Int32FromInt32(MEM_Str))
}
}
nByte = int64((*TMem)(unsafe.Pointer(pIn1)).Fn + (*TMem)(unsafe.Pointer(pIn2)).Fn)
if nByte > int64(*(*int32)(unsafe.Pointer(db + 136))) {
goto too_big
}
if _sqlite3VdbeMemGrow(tls, pOut, int32(nByte)+int32(2), libc.BoolInt32(pOut == pIn2)) != 0 {
goto no_mem
}
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Str))
if pOut != pIn2 {
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pOut)).Fz, (*TMem)(unsafe.Pointer(pIn2)).Fz, uint64((*TMem)(unsafe.Pointer(pIn2)).Fn))
(*TMem)(unsafe.Pointer(pIn2)).Fflags = flags2
}
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pOut)).Fz+uintptr((*TMem)(unsafe.Pointer(pIn2)).Fn), (*TMem)(unsafe.Pointer(pIn1)).Fz, uint64((*TMem)(unsafe.Pointer(pIn1)).Fn))
(*TMem)(unsafe.Pointer(pIn1)).Fflags = flags1
if int32(encoding) > int32(SQLITE_UTF8) {
nByte &= int64(^libc.Int32FromInt32(1))
}
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pOut)).Fz + uintptr(nByte))) = 0
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pOut)).Fz + uintptr(nByte+int64(1)))) = 0
p199 = pOut + 20
*(*Tu16)(unsafe.Pointer(p199)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p199))) | libc.Int32FromInt32(MEM_Term))
(*TMem)(unsafe.Pointer(pOut)).Fn = int32(nByte)
(*TMem)(unsafe.Pointer(pOut)).Fenc = encoding
goto _187
/* Opcode: Add P1 P2 P3 * *
** Synopsis: r[P3]=r[P1]+r[P2]
**
** Add the value in register P1 to the value in register P2
** and store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: Multiply P1 P2 P3 * *
** Synopsis: r[P3]=r[P1]*r[P2]
**
**
** Multiply the value in register P1 by the value in register P2
** and store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: Subtract P1 P2 P3 * *
** Synopsis: r[P3]=r[P2]-r[P1]
**
** Subtract the value in register P1 from the value in register P2
** and store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: Divide P1 P2 P3 * *
** Synopsis: r[P3]=r[P2]/r[P1]
**
** Divide the value in register P1 by the value in register P2
** and store the result in register P3 (P3=P2/P1). If the value in
** register P1 is zero, then the result is NULL. If either input is
** NULL, the result is NULL.
*/
/* Opcode: Remainder P1 P2 P3 * *
** Synopsis: r[P3]=r[P2]%r[P1]
**
** Compute the remainder after integer register P2 is divided by
** register P1 and store the result in register P3.
** If the value in register P1 is zero the result is NULL.
** If either operand is NULL, the result is NULL.
*/
_31:
; /* same as TK_PLUS, in1, in2, out3 */
_30:
; /* same as TK_MINUS, in1, in2, out3 */
_29:
; /* same as TK_STAR, in1, in2, out3 */
_28:
; /* same as TK_SLASH, in1, in2, out3 */
_27:
; /* Real value of right operand */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
type1 = (*TMem)(unsafe.Pointer(pIn1)).Fflags
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
type2 = (*TMem)(unsafe.Pointer(pIn2)).Fflags
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if !(int32(type1)&int32(type2)&int32(MEM_Int) != 0) {
goto _200
}
goto int_math
int_math:
;
iA = *(*Ti64)(unsafe.Pointer(pIn1))
*(*Ti64)(unsafe.Pointer(bp)) = *(*Ti64)(unsafe.Pointer(pIn2))
switch int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) {
case int32(OP_Add):
if _sqlite3AddInt64(tls, bp, iA) != 0 {
goto fp_math
}
case int32(OP_Subtract):
if _sqlite3SubInt64(tls, bp, iA) != 0 {
goto fp_math
}
case int32(OP_Multiply):
if _sqlite3MulInt64(tls, bp, iA) != 0 {
goto fp_math
}
case int32(OP_Divide):
if iA == 0 {
goto arithmetic_result_is_null
}
if iA == int64(-int32(1)) && *(*Ti64)(unsafe.Pointer(bp)) == int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<>r[P1]
**
** Shift the integer value in register P2 to the right by the
** number of bits specified by the integer in register P1.
** Store the result in register P3.
** If either input is NULL, the result is NULL.
*/
_36:
; /* same as TK_BITAND, in1, in2, out3 */
_35:
; /* same as TK_BITOR, in1, in2, out3 */
_34:
; /* same as TK_LSHIFT, in1, in2, out3 */
_33:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if (int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)|int32((*TMem)(unsafe.Pointer(pIn2)).Fflags))&int32(MEM_Null) != 0 {
_sqlite3VdbeMemSetNull(tls, pOut)
goto _187
}
*(*Ti64)(unsafe.Pointer(bp + 8)) = _sqlite3VdbeIntValue(tls, pIn2)
iB1 = _sqlite3VdbeIntValue(tls, pIn1)
op = (*TOp)(unsafe.Pointer(pOp)).Fopcode
if int32(op) == int32(OP_BitAnd) {
*(*Ti64)(unsafe.Pointer(bp + 8)) &= iB1
} else {
if int32(op) == int32(OP_BitOr) {
*(*Ti64)(unsafe.Pointer(bp + 8)) |= iB1
} else {
if iB1 != 0 {
/* If shifting by a negative amount, shift in the other direction */
if iB1 < 0 {
op = uint8(libc.Int32FromInt32(2)*libc.Int32FromInt32(OP_ShiftLeft) + libc.Int32FromInt32(1) - int32(op))
if iB1 > int64(-libc.Int32FromInt32(64)) {
v204 = -iB1
} else {
v204 = int64(64)
}
iB1 = v204
}
if iB1 >= int64(64) {
if *(*Ti64)(unsafe.Pointer(bp + 8)) >= 0 || int32(op) == int32(OP_ShiftLeft) {
v205 = 0
} else {
v205 = -int32(1)
}
*(*Ti64)(unsafe.Pointer(bp + 8)) = int64(v205)
} else {
libc.Xmemcpy(tls, bp+16, bp+8, uint64(8))
if int32(op) == int32(OP_ShiftLeft) {
*(*Tu64)(unsafe.Pointer(bp + 16)) <<= uint64(iB1)
} else {
*(*Tu64)(unsafe.Pointer(bp + 16)) >>= uint64(iB1)
/* Sign-extend on a right shift of a negative number */
if *(*Ti64)(unsafe.Pointer(bp + 8)) < 0 {
*(*Tu64)(unsafe.Pointer(bp + 16)) |= (libc.Uint64FromUint32(0xffffffff)<
** - P2=='A' → BLOB
**
- P2=='B' → TEXT
**
- P2=='C' → NUMERIC
**
- P2=='D' → INTEGER
**
- P2=='E' → REAL
**
**
** A NULL value is not changed by this routine. It remains NULL.
*/
_40:
; /* in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Zero) != 0 {
v206 = _sqlite3VdbeMemExpandBlob(tls, pIn1)
} else {
v206 = 0
}
rc = v206
if rc != 0 {
goto abort_due_to_error
}
rc = _sqlite3VdbeMemCast(tls, pIn1, uint8((*TOp)(unsafe.Pointer(pOp)).Fp2), encoding)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: Eq P1 P2 P3 P4 P5
** Synopsis: IF r[P3]==r[P1]
**
** Compare the values in register P1 and P3. If reg(P3)==reg(P1) then
** jump to address P2.
**
** The SQLITE_AFF_MASK portion of P5 must be an affinity character -
** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made
** to coerce both inputs according to this affinity before the
** comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric
** affinity is used. Note that the affinity conversions are stored
** back into the input registers P1 and P3. So this opcode can cause
** persistent changes to registers P1 and P3.
**
** Once any conversions have taken place, and neither value is NULL,
** the values are compared. If both values are blobs then memcmp() is
** used to determine the results of the comparison. If both values
** are text, then the appropriate collating function specified in
** P4 is used to do the comparison. If P4 is not specified then
** memcmp() is used to compare text string. If both values are
** numeric, then a numeric comparison is used. If the two values
** are of different types, then numbers are considered less than
** strings and strings are considered less than blobs.
**
** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either
** true or false and is never NULL. If both operands are NULL then the result
** of comparison is true. If either operand is NULL then the result is false.
** If neither operand is NULL the result is the same as it would be if
** the SQLITE_NULLEQ flag were omitted from P5.
**
** This opcode saves the result of comparison for use by the new
** OP_Jump opcode.
*/
/* Opcode: Ne P1 P2 P3 P4 P5
** Synopsis: IF r[P3]!=r[P1]
**
** This works just like the Eq opcode except that the jump is taken if
** the operands in registers P1 and P3 are not equal. See the Eq opcode for
** additional information.
*/
/* Opcode: Lt P1 P2 P3 P4 P5
** Synopsis: IF r[P3]r[P1]
**
** This works just like the Lt opcode except that the jump is taken if
** the content of register P3 is greater than the content of
** register P1. See the Lt opcode for additional information.
*/
/* Opcode: Ge P1 P2 P3 P4 P5
** Synopsis: IF r[P3]>=r[P1]
**
** This works just like the Lt opcode except that the jump is taken if
** the content of register P3 is greater than or equal to the content of
** register P1. See the Lt opcode for additional information.
*/
_46:
; /* same as TK_EQ, jump, in1, in3 */
_45:
; /* same as TK_NE, jump, in1, in3 */
_44:
; /* same as TK_LT, jump, in1, in3 */
_43:
; /* same as TK_LE, jump, in1, in3 */
_42:
; /* same as TK_GT, jump, in1, in3 */
_41:
; /* Copy of initial value of pIn3->flags */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
flags11 = (*TMem)(unsafe.Pointer(pIn1)).Fflags
flags3 = (*TMem)(unsafe.Pointer(pIn3)).Fflags
if int32(flags11)&int32(flags3)&int32(MEM_Int) != 0 {
/* Common case of comparison of two integers */
if *(*Ti64)(unsafe.Pointer(pIn3)) > *(*Ti64)(unsafe.Pointer(pIn1)) {
if *(*uint8)(unsafe.Pointer(_sqlite3aGTb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))) != 0 {
goto jump_to_p2
}
iCompare = +libc.Int32FromInt32(1)
} else {
if *(*Ti64)(unsafe.Pointer(pIn3)) < *(*Ti64)(unsafe.Pointer(pIn1)) {
if *(*uint8)(unsafe.Pointer(_sqlite3aLTb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))) != 0 {
goto jump_to_p2
}
iCompare = -int32(1)
} else {
if *(*uint8)(unsafe.Pointer(_sqlite3aEQb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))) != 0 {
goto jump_to_p2
}
iCompare = 0
}
}
goto _187
}
if (int32(flags11)|int32(flags3))&int32(MEM_Null) != 0 {
/* One or both operands are NULL */
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(SQLITE_NULLEQ) != 0 {
/* If SQLITE_NULLEQ is set (which will only happen if the operator is
** OP_Eq or OP_Ne) then take the jump or not depending on whether
** or not both operands are null.
*/
if int32(flags11)&int32(flags3)&int32(MEM_Null) != 0 && int32(flags3)&int32(MEM_Cleared) == 0 {
res = 0 /* Operands are equal */
} else {
if int32(flags3)&int32(MEM_Null) != 0 {
v207 = -int32(1)
} else {
v207 = +libc.Int32FromInt32(1)
}
res = v207 /* Operands are not equal */
}
} else {
/* SQLITE_NULLEQ is clear and at least one operand is NULL,
** then the result is always NULL.
** The jump is taken if the SQLITE_JUMPIFNULL bit is set.
*/
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(SQLITE_JUMPIFNULL) != 0 {
goto jump_to_p2
}
iCompare = int32(1) /* Operands are not equal */
goto _187
}
} else {
/* Neither operand is NULL and we couldn't do the special high-speed
** integer comparison case. So do a general-case comparison. */
affinity = int8(int32((*TOp)(unsafe.Pointer(pOp)).Fp5) & int32(SQLITE_AFF_MASK))
if int32(affinity) >= int32(SQLITE_AFF_NUMERIC) {
if (int32(flags11)|int32(flags3))&int32(MEM_Str) != 0 {
if int32(flags11)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_Str)) == int32(MEM_Str) {
_applyNumericAffinity(tls, pIn1, 0)
flags3 = (*TMem)(unsafe.Pointer(pIn3)).Fflags
}
if int32(flags3)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_Str)) == int32(MEM_Str) {
_applyNumericAffinity(tls, pIn3, 0)
}
}
} else {
if int32(affinity) == int32(SQLITE_AFF_TEXT) && (int32(flags11)|int32(flags3))&int32(MEM_Str) != 0 {
if int32(flags11)&int32(MEM_Str) != 0 {
p208 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p208)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p208))) & ^(libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal)))
} else {
if int32(flags11)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
_sqlite3VdbeMemStringify(tls, pIn1, encoding, uint8(1))
flags11 = uint16(int32((*TMem)(unsafe.Pointer(pIn1)).Fflags) & ^libc.Int32FromInt32(MEM_TypeMask) | int32(flags11)&int32(MEM_TypeMask))
if pIn1 == pIn3 {
flags3 = uint16(int32(flags11) | int32(MEM_Str))
}
}
}
if int32(flags3)&int32(MEM_Str) != 0 {
p209 = pIn3 + 20
*(*Tu16)(unsafe.Pointer(p209)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p209))) & ^(libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Real) | libc.Int32FromInt32(MEM_IntReal)))
} else {
if int32(flags3)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
_sqlite3VdbeMemStringify(tls, pIn3, encoding, uint8(1))
flags3 = uint16(int32((*TMem)(unsafe.Pointer(pIn3)).Fflags) & ^libc.Int32FromInt32(MEM_TypeMask) | int32(flags3)&int32(MEM_TypeMask))
}
}
}
}
res = _sqlite3MemCompare(tls, pIn3, pIn1, *(*uintptr)(unsafe.Pointer(pOp + 16)))
}
/* At this point, res is negative, zero, or positive if reg[P1] is
** less than, equal to, or greater than reg[P3], respectively. Compute
** the answer to this operator in res2, depending on what the comparison
** operator actually is. The next block of code depends on the fact
** that the 6 comparison operators are consecutive integers in this
** order: NE, EQ, GT, LE, LT, GE */
if res < 0 {
res21 = int32(*(*uint8)(unsafe.Pointer(_sqlite3aLTb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))))
} else {
if res == 0 {
res21 = int32(*(*uint8)(unsafe.Pointer(_sqlite3aEQb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))))
} else {
res21 = int32(*(*uint8)(unsafe.Pointer(_sqlite3aGTb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fopcode))))
}
}
iCompare = res
/* Undo any changes made by applyAffinity() to the input registers. */
(*TMem)(unsafe.Pointer(pIn3)).Fflags = flags3
(*TMem)(unsafe.Pointer(pIn1)).Fflags = flags11
if res21 != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: ElseEq * P2 * * *
**
** This opcode must follow an OP_Lt or OP_Gt comparison operator. There
** can be zero or more OP_ReleaseReg opcodes intervening, but no other
** opcodes are allowed to occur between this instruction and the previous
** OP_Lt or OP_Gt.
**
** If the result of an OP_Eq comparison on the same two operands as
** the prior OP_Lt or OP_Gt would have been true, then jump to P2. If
** the result of an OP_Eq comparison on the two previous operands
** would have been false or NULL, then fall through.
*/
_47:
; /* same as TK_ESCAPE, jump */
if iCompare == 0 {
goto jump_to_p2
}
goto _187
/* Opcode: Permutation * * * P4 *
**
** Set the permutation used by the OP_Compare operator in the next
** instruction. The permutation is stored in the P4 operand.
**
** The permutation is only valid for the next opcode which must be
** an OP_Compare that has the OPFLAG_PERMUTE bit set in P5.
**
** The first integer in the P4 integer array is the length of the array
** and does not become part of the permutation.
*/
_48:
;
goto _187
/* Opcode: Compare P1 P2 P3 P4 P5
** Synopsis: r[P1@P3] <-> r[P2@P3]
**
** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this
** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of
** the comparison for use by the next OP_Jump instruct.
**
** If P5 has the OPFLAG_PERMUTE bit set, then the order of comparison is
** determined by the most recent OP_Permutation operator. If the
** OPFLAG_PERMUTE bit is clear, then register are compared in sequential
** order.
**
** P4 is a KeyInfo structure that defines collating sequences and sort
** orders for the comparison. The permutation applies to registers
** only. The KeyInfo elements are used sequentially.
**
** The comparison is a sort comparison, so NULLs compare equal,
** NULLs are less than numbers, numbers are less than strings,
** and strings are less than blobs.
**
** This opcode must be immediately followed by an OP_Jump opcode.
*/
_49:
; /* The permutation */
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_PERMUTE) == 0 {
aPermute = uintptr(0)
} else {
aPermute = *(*uintptr)(unsafe.Pointer(pOp + uintptr(-libc.Int32FromInt32(1))*24 + 16)) + uintptr(1)*4
}
n2 = (*TOp)(unsafe.Pointer(pOp)).Fp3
pKeyInfo = *(*uintptr)(unsafe.Pointer(pOp + 16))
p11 = (*TOp)(unsafe.Pointer(pOp)).Fp1
p21 = (*TOp)(unsafe.Pointer(pOp)).Fp2
i = 0
for {
if !(i < n2) {
break
}
if aPermute != 0 {
v211 = *(*Tu32)(unsafe.Pointer(aPermute + uintptr(i)*4))
} else {
v211 = uint32(i)
}
idx = v211
pColl = *(*uintptr)(unsafe.Pointer(pKeyInfo + 32 + uintptr(i)*8))
bRev = int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FaSortFlags + uintptr(i)))) & int32(KEYINFO_ORDER_DESC)
iCompare = _sqlite3MemCompare(tls, aMem+uintptr(uint32(p11)+idx)*56, aMem+uintptr(uint32(p21)+idx)*56, pColl)
if iCompare != 0 {
if int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FaSortFlags + uintptr(i))))&int32(KEYINFO_ORDER_BIGNULL) != 0 && (int32((*(*TMem)(unsafe.Pointer(aMem + uintptr(uint32(p11)+idx)*56))).Fflags)&int32(MEM_Null) != 0 || int32((*(*TMem)(unsafe.Pointer(aMem + uintptr(uint32(p21)+idx)*56))).Fflags)&int32(MEM_Null) != 0) {
iCompare = -iCompare
}
if bRev != 0 {
iCompare = -iCompare
}
break
}
goto _210
_210:
;
i++
}
goto _187
/* Opcode: Jump P1 P2 P3 * *
**
** Jump to the instruction at address P1, P2, or P3 depending on whether
** in the most recent OP_Compare instruction the P1 vector was less than,
** equal to, or greater than the P2 vector, respectively.
**
** This opcode must immediately follow an OP_Compare opcode.
*/
_50:
; /* jump */
if iCompare < 0 {
pOp = aOp + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1-int32(1))*24
} else {
if iCompare == 0 {
pOp = aOp + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2-int32(1))*24
} else {
pOp = aOp + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3-int32(1))*24
}
}
goto _187
/* Opcode: And P1 P2 P3 * *
** Synopsis: r[P3]=(r[P1] && r[P2])
**
** Take the logical AND of the values in registers P1 and P2 and
** write the result into register P3.
**
** If either P1 or P2 is 0 (false) then the result is 0 even if
** the other input is NULL. A NULL and true or two NULLs give
** a NULL output.
*/
/* Opcode: Or P1 P2 P3 * *
** Synopsis: r[P3]=(r[P1] || r[P2])
**
** Take the logical OR of the values in register P1 and P2 and
** store the answer in register P3.
**
** If either P1 or P2 is nonzero (true) then the result is 1 (true)
** even if the other input is NULL. A NULL and false or two NULLs
** give a NULL output.
*/
_52:
; /* same as TK_AND, in1, in2, out3 */
_51:
; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */
v11 = _sqlite3VdbeBooleanValue(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56, int32(2))
v21 = _sqlite3VdbeBooleanValue(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56, int32(2))
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_And) {
v11 = int32(_and_logic[v11*int32(3)+v21])
} else {
v11 = int32(_or_logic[v11*int32(3)+v21])
}
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if v11 == int32(2) {
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Null))
} else {
*(*Ti64)(unsafe.Pointer(pOut)) = int64(v11)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Int))
}
goto _187
/* Opcode: IsTrue P1 P2 P3 P4 *
** Synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4
**
** This opcode implements the IS TRUE, IS FALSE, IS NOT TRUE, and
** IS NOT FALSE operators.
**
** Interpret the value in register P1 as a boolean value. Store that
** boolean (a 0 or 1) in register P2. Or if the value in register P1 is
** NULL, then the P3 is stored in register P2. Invert the answer if P4
** is 1.
**
** The logic is summarized like this:
**
**
** - If P3==0 and P4==0 then r[P2] := r[P1] IS TRUE
**
- If P3==1 and P4==1 then r[P2] := r[P1] IS FALSE
**
- If P3==0 and P4==1 then r[P2] := r[P1] IS NOT TRUE
**
- If P3==1 and P4==0 then r[P2] := r[P1] IS NOT FALSE
**
*/
_53:
; /* in1, out2 */
_sqlite3VdbeMemSetInt64(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56, int64(_sqlite3VdbeBooleanValue(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56, (*TOp)(unsafe.Pointer(pOp)).Fp3)^(*TOp)(unsafe.Pointer(pOp)).Fp4.Fi))
goto _187
/* Opcode: Not P1 P2 * * *
** Synopsis: r[P2]= !r[P1]
**
** Interpret the value in register P1 as a boolean value. Store the
** boolean complement in register P2. If the value in register P1 is
** NULL, then a NULL is stored in P2.
*/
_54:
; /* same as TK_NOT, in1, out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) == 0 {
_sqlite3VdbeMemSetInt64(tls, pOut, libc.BoolInt64(!(_sqlite3VdbeBooleanValue(tls, pIn1, 0) != 0)))
} else {
_sqlite3VdbeMemSetNull(tls, pOut)
}
goto _187
/* Opcode: BitNot P1 P2 * * *
** Synopsis: r[P2]= ~r[P1]
**
** Interpret the content of register P1 as an integer. Store the
** ones-complement of the P1 value into register P2. If P1 holds
** a NULL then store a NULL in P2.
*/
_55:
; /* same as TK_BITNOT, in1, out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
_sqlite3VdbeMemSetNull(tls, pOut)
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) == 0 {
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Int)
*(*Ti64)(unsafe.Pointer(pOut)) = ^_sqlite3VdbeIntValue(tls, pIn1)
}
goto _187
/* Opcode: Once P1 P2 * * *
**
** Fall through to the next instruction the first time this opcode is
** encountered on each invocation of the byte-code program. Jump to P2
** on the second and all subsequent encounters during the same invocation.
**
** Top-level programs determine first invocation by comparing the P1
** operand against the P1 operand on the OP_Init opcode at the beginning
** of the program. If the P1 values differ, then fall through and make
** the P1 of this opcode equal to the P1 of OP_Init. If P1 values are
** the same then take the jump.
**
** For subprograms, there is a bitmask in the VdbeFrame that determines
** whether or not the jump should be taken. The bitmask is necessary
** because the self-altering code trick does not work for recursive
** triggers.
*/
_56:
; /* Address of this instruction */
if (*TVdbe)(unsafe.Pointer(p)).FpFrame != 0 {
iAddr = uint32(int32((int64(pOp) - int64((*TVdbe)(unsafe.Pointer(p)).FaOp)) / 24))
if int32(*(*Tu8)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FpFrame)).FaOnce + uintptr(iAddr/uint32(8)))))&(int32(1)<<(iAddr&uint32(7))) != 0 {
goto jump_to_p2
}
p212 = (*TVdbeFrame)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FpFrame)).FaOnce + uintptr(iAddr/uint32(8))
*(*Tu8)(unsafe.Pointer(p212)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p212))) | libc.Int32FromInt32(1)<<(iAddr&libc.Uint32FromInt32(7)))
} else {
if (*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp))).Fp1 == (*TOp)(unsafe.Pointer(pOp)).Fp1 {
goto jump_to_p2
}
}
(*TOp)(unsafe.Pointer(pOp)).Fp1 = (*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp))).Fp1
goto _187
/* Opcode: If P1 P2 P3 * *
**
** Jump to P2 if the value in register P1 is true. The value
** is considered true if it is numeric and non-zero. If the value
** in P1 is NULL then take the jump if and only if P3 is non-zero.
*/
_57:
;
c = _sqlite3VdbeBooleanValue(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56, (*TOp)(unsafe.Pointer(pOp)).Fp3)
if c != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: IfNot P1 P2 P3 * *
**
** Jump to P2 if the value in register P1 is False. The value
** is considered false if it has a numeric value of zero. If the value
** in P1 is NULL then take the jump if and only if P3 is non-zero.
*/
_58:
;
c1 = libc.BoolInt32(!(_sqlite3VdbeBooleanValue(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56, libc.BoolInt32(!((*TOp)(unsafe.Pointer(pOp)).Fp3 != 0))) != 0))
if c1 != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: IsNull P1 P2 * * *
** Synopsis: if r[P1]==NULL goto P2
**
** Jump to P2 if the value in register P1 is NULL.
*/
_59:
; /* same as TK_ISNULL, jump, in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: IsType P1 P2 P3 P4 P5
** Synopsis: if typeof(P1.P3) in P5 goto P2
**
** Jump to P2 if the type of a column in a btree is one of the types specified
** by the P5 bitmask.
**
** P1 is normally a cursor on a btree for which the row decode cache is
** valid through at least column P3. In other words, there should have been
** a prior OP_Column for column P3 or greater. If the cursor is not valid,
** then this opcode might give spurious results.
** The the btree row has fewer than P3 columns, then use P4 as the
** datatype.
**
** If P1 is -1, then P3 is a register number and the datatype is taken
** from the value in that register.
**
** P5 is a bitmask of data types. SQLITE_INTEGER is the least significant
** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04.
** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10.
**
** WARNING: This opcode does not reliably distinguish between NULL and REAL
** when P1>=0. If the database contains a NaN value, this opcode will think
** that the datatype is REAL when it should be NULL. When P1<0 and the value
** is already stored in register P3, then this opcode does reliably
** distinguish between NULL and REAL. The problem only arises then P1>=0.
**
** Take the jump to address P2 if and only if the datatype of the
** value determined by P1 and P3 corresponds to one of the bits in the
** P5 bitmask.
**
*/
_60:
;
if (*TOp)(unsafe.Pointer(pOp)).Fp1 >= 0 {
pC = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if (*TOp)(unsafe.Pointer(pOp)).Fp3 < int32((*TVdbeCursor)(unsafe.Pointer(pC)).FnHdrParsed) {
serialType = *(*Tu32)(unsafe.Pointer(pC + 120 + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*4))
if serialType >= uint32(12) {
if serialType&uint32(1) != 0 {
typeMask = uint16(0x04) /* SQLITE_TEXT */
} else {
typeMask = uint16(0x08) /* SQLITE_BLOB */
}
} else {
typeMask = uint16(_aMask[serialType])
}
} else {
typeMask = uint16(int32(1) << ((*TOp)(unsafe.Pointer(pOp)).Fp4.Fi - int32(1)))
}
} else {
typeMask = uint16(int32(1) << (Xsqlite3_value_type(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56) - int32(1)))
}
if int32(typeMask)&int32((*TOp)(unsafe.Pointer(pOp)).Fp5) != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: ZeroOrNull P1 P2 P3 * *
** Synopsis: r[P2] = 0 OR NULL
**
** If both registers P1 and P3 are NOT NULL, then store a zero in
** register P2. If either registers P1 or P3 are NULL then put
** a NULL in register P2.
*/
_61:
; /* in1, in2, out2, in3 */
if int32((*(*TMem)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56))).Fflags)&int32(MEM_Null) != 0 || int32((*(*TMem)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56))).Fflags)&int32(MEM_Null) != 0 {
_sqlite3VdbeMemSetNull(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56)
} else {
_sqlite3VdbeMemSetInt64(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56, 0)
}
goto _187
/* Opcode: NotNull P1 P2 * * *
** Synopsis: if r[P1]!=NULL goto P2
**
** Jump to P2 if the value in register P1 is not NULL.
*/
_62:
; /* same as TK_NOTNULL, jump, in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) == 0 {
goto jump_to_p2
}
goto _187
/* Opcode: IfNullRow P1 P2 P3 * *
** Synopsis: if P1.nullRow then r[P3]=NULL, goto P2
**
** Check the cursor P1 to see if it is currently pointing at a NULL row.
** If it is, then set register P3 to NULL and jump immediately to P2.
** If P1 is not on a NULL row, then fall through without making any
** changes.
**
** If P1 is not an open cursor, then this opcode is a no-op.
*/
_63:
;
pC1 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if pC1 != 0 && (*TVdbeCursor)(unsafe.Pointer(pC1)).FnullRow != 0 {
_sqlite3VdbeMemSetNull(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56)
goto jump_to_p2
}
goto _187
/* Opcode: Offset P1 P2 P3 * *
** Synopsis: r[P3] = sqlite_offset(P1)
**
** Store in register r[P3] the byte offset into the database file that is the
** start of the payload for the record at which that cursor P1 is currently
** pointing.
**
** P2 is the column number for the argument to the sqlite_offset() function.
** This opcode does not use P2 itself, but the P2 value is used by the
** code generator. The P1, P2, and P3 operands to this opcode are the
** same as for OP_Column.
**
** This opcode is only available if SQLite is compiled with the
** -DSQLITE_ENABLE_OFFSET_SQL_FUNC option.
*/
_64:
; /* The VDBE cursor */
pC2 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pOut = (*TVdbe)(unsafe.Pointer(p)).FaMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if pC2 == uintptr(0) || int32((*TVdbeCursor)(unsafe.Pointer(pC2)).FeCurType) != CURTYPE_BTREE {
_sqlite3VdbeMemSetNull(tls, pOut)
} else {
if (*TVdbeCursor)(unsafe.Pointer(pC2)).FdeferredMoveto != 0 {
rc = _sqlite3VdbeFinishMoveto(tls, pC2)
if rc != 0 {
goto abort_due_to_error
}
}
if _sqlite3BtreeEof(tls, *(*uintptr)(unsafe.Pointer(pC2 + 48))) != 0 {
_sqlite3VdbeMemSetNull(tls, pOut)
} else {
_sqlite3VdbeMemSetInt64(tls, pOut, _sqlite3BtreeOffset(tls, *(*uintptr)(unsafe.Pointer(pC2 + 48))))
}
}
goto _187
/* Opcode: Column P1 P2 P3 P4 P5
** Synopsis: r[P3]=PX cursor P1 column P2
**
** Interpret the data that cursor P1 points to as a structure built using
** the MakeRecord instruction. (See the MakeRecord opcode for additional
** information about the format of the data.) Extract the P2-th column
** from this record. If there are less than (P2+1)
** values in the record, extract a NULL.
**
** The value extracted is stored in register P3.
**
** If the record contains fewer than P2 fields, then extract a NULL. Or,
** if the P4 argument is a P4_MEM use the value of the P4 argument as
** the result.
**
** If the OPFLAG_LENGTHARG bit is set in P5 then the result is guaranteed
** to only be used by the length() function or the equivalent. The content
** of large blobs is not loaded, thus saving CPU cycles. If the
** OPFLAG_TYPEOFARG bit is set then the result will only be used by the
** typeof() function or the IS NULL or IS NOT NULL operators or the
** equivalent. In this case, all content loading can be omitted.
*/
_65:
; /* PseudoTable input register */
pC3 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
p22 = uint32((*TOp)(unsafe.Pointer(pOp)).Fp2)
goto op_column_restart
op_column_restart:
;
aOffset = (*TVdbeCursor)(unsafe.Pointer(pC3)).FaOffset
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FcacheStatus != (*TVdbe)(unsafe.Pointer(p)).FcacheCtr { /*OPTIMIZATION-IF-FALSE*/
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FnullRow != 0 {
if int32((*TVdbeCursor)(unsafe.Pointer(pC3)).FeCurType) == int32(CURTYPE_PSEUDO) && (*TVdbeCursor)(unsafe.Pointer(pC3)).FseekResult > 0 {
/* For the special case of as pseudo-cursor, the seekResult field
** identifies the register that holds the record */
pReg = aMem + uintptr((*TVdbeCursor)(unsafe.Pointer(pC3)).FseekResult)*56
v213 = uint32((*TMem)(unsafe.Pointer(pReg)).Fn)
(*TVdbeCursor)(unsafe.Pointer(pC3)).FszRow = v213
(*TVdbeCursor)(unsafe.Pointer(pC3)).FpayloadSize = v213
(*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow = (*TMem)(unsafe.Pointer(pReg)).Fz
} else {
pDest = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
_sqlite3VdbeMemSetNull(tls, pDest)
goto op_column_out
}
} else {
pCrsr = *(*uintptr)(unsafe.Pointer(pC3 + 48))
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FdeferredMoveto != 0 {
if v215 = *(*uintptr)(unsafe.Pointer(pC3 + 16)) != 0; v215 {
v214 = *(*Tu32)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pC3 + 16)) + uintptr(uint32(1)+p22)*4))
iMap = v214
}
if v215 && v214 > uint32(0) {
pC3 = (*TVdbeCursor)(unsafe.Pointer(pC3)).FpAltCursor
p22 = iMap - uint32(1)
goto op_column_restart
}
rc = _sqlite3VdbeFinishMoveto(tls, pC3)
if rc != 0 {
goto abort_due_to_error
}
} else {
if _sqlite3BtreeCursorHasMoved(tls, pCrsr) != 0 {
rc = _sqlite3VdbeHandleMovedCursor(tls, pC3)
if rc != 0 {
goto abort_due_to_error
}
goto op_column_restart
}
}
(*TVdbeCursor)(unsafe.Pointer(pC3)).FpayloadSize = _sqlite3BtreePayloadSize(tls, pCrsr)
(*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow = _sqlite3BtreePayloadFetch(tls, pCrsr, pC3+108)
/* Maximum page size is 64KiB */
}
(*TVdbeCursor)(unsafe.Pointer(pC3)).FcacheStatus = (*TVdbe)(unsafe.Pointer(p)).FcacheCtr
v216 = uint32(*(*Tu8)(unsafe.Pointer((*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow)))
*(*Tu32)(unsafe.Pointer(aOffset)) = v216
if v216 < uint32(0x80) {
(*TVdbeCursor)(unsafe.Pointer(pC3)).FiHdrOffset = uint32(1)
} else {
(*TVdbeCursor)(unsafe.Pointer(pC3)).FiHdrOffset = uint32(_sqlite3GetVarint32(tls, (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow, aOffset))
}
(*TVdbeCursor)(unsafe.Pointer(pC3)).FnHdrParsed = uint16(0)
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FszRow < *(*Tu32)(unsafe.Pointer(aOffset)) { /*OPTIMIZATION-IF-FALSE*/
/* pC->aRow does not have to hold the entire row, but it does at least
** need to cover the header of the record. If pC->aRow does not contain
** the complete header, then set it to zero, forcing the header to be
** dynamically allocated. */
(*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow = uintptr(0)
(*TVdbeCursor)(unsafe.Pointer(pC3)).FszRow = uint32(0)
/* Make sure a corrupt database has not given us an oversize header.
** Do this now to avoid an oversize memory allocation.
**
** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte
** types use so much data space that there can only be 4096 and 32 of
** them, respectively. So the maximum header length results from a
** 3-byte type for each of the maximum of 32768 columns plus three
** extra bytes for the header length itself. 32768*3 + 3 = 98307.
*/
if *(*Tu32)(unsafe.Pointer(aOffset)) > uint32(98307) || *(*Tu32)(unsafe.Pointer(aOffset)) > (*TVdbeCursor)(unsafe.Pointer(pC3)).FpayloadSize {
goto op_column_corrupt
}
} else {
/* This is an optimization. By skipping over the first few tests
** (ex: pC->nHdrParsed<=p2) in the next section, we achieve a
** measurable performance gain.
**
** This branch is taken even if aOffset[0]==0. Such a record is never
** generated by SQLite, and could be considered corruption, but we
** accept it for historical reasons. When aOffset[0]==0, the code this
** branch jumps to reads past the end of the record, but never more
** than a few bytes. Even if the record occurs at the end of the page
** content area, the "page header" comes after the page content and so
** this overread is harmless. Similar overreads can occur for a corrupt
** database file.
*/
zData = (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow
/* Conditional skipped */
goto op_column_read_header
}
} else {
if _sqlite3BtreeCursorHasMoved(tls, *(*uintptr)(unsafe.Pointer(pC3 + 48))) != 0 {
rc = _sqlite3VdbeHandleMovedCursor(tls, pC3)
if rc != 0 {
goto abort_due_to_error
}
goto op_column_restart
}
}
/* Make sure at least the first p2+1 entries of the header have been
** parsed and valid information is in aOffset[] and pC->aType[].
*/
if !(uint32((*TVdbeCursor)(unsafe.Pointer(pC3)).FnHdrParsed) <= p22) {
goto _217
}
/* If there is more header available for parsing in the record, try
** to extract additional fields up through the p2+1-th field
*/
if !((*TVdbeCursor)(unsafe.Pointer(pC3)).FiHdrOffset < *(*Tu32)(unsafe.Pointer(aOffset))) {
goto _219
}
/* Make sure zData points to enough of the record to cover the header. */
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow == uintptr(0) {
libc.Xmemset(tls, bp+24, 0, uint64(56))
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, *(*uintptr)(unsafe.Pointer(pC3 + 48)), *(*Tu32)(unsafe.Pointer(aOffset)), bp+24)
if rc != SQLITE_OK {
goto abort_due_to_error
}
zData = (*(*TMem)(unsafe.Pointer(bp + 24))).Fz
} else {
zData = (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow
}
/* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */
goto op_column_read_header
op_column_read_header:
;
i1 = int32((*TVdbeCursor)(unsafe.Pointer(pC3)).FnHdrParsed)
offset64 = uint64(*(*Tu32)(unsafe.Pointer(aOffset + uintptr(i1)*4)))
zHdr = zData + uintptr((*TVdbeCursor)(unsafe.Pointer(pC3)).FiHdrOffset)
zEndHdr = zData + uintptr(*(*Tu32)(unsafe.Pointer(aOffset)))
for cond := true; cond; cond = uint32(i1) <= p22 && zHdr < zEndHdr {
v222 = uint32(*(*Tu8)(unsafe.Pointer(zHdr)))
*(*Tu32)(unsafe.Pointer(bp + 80)) = v222
v221 = v222
*(*Tu32)(unsafe.Pointer(pC3 + 120 + uintptr(i1)*4)) = v221
if v221 < uint32(0x80) {
zHdr++
offset64 += uint64(_sqlite3VdbeOneByteSerialTypeLen(tls, uint8(*(*Tu32)(unsafe.Pointer(bp + 80)))))
} else {
zHdr += uintptr(_sqlite3GetVarint32(tls, zHdr, bp+80))
*(*Tu32)(unsafe.Pointer(pC3 + 120 + uintptr(i1)*4)) = *(*Tu32)(unsafe.Pointer(bp + 80))
offset64 += uint64(_sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp + 80))))
}
i1++
v223 = i1
*(*Tu32)(unsafe.Pointer(aOffset + uintptr(v223)*4)) = uint32(offset64 & libc.Uint64FromUint32(0xffffffff))
}
/* The record is corrupt if any of the following are true:
** (1) the bytes of the header extend past the declared header size
** (2) the entire header was used but not all data was used
** (3) the end of the data extends beyond the end of the record.
*/
if zHdr >= zEndHdr && (zHdr > zEndHdr || offset64 != uint64((*TVdbeCursor)(unsafe.Pointer(pC3)).FpayloadSize)) || offset64 > uint64((*TVdbeCursor)(unsafe.Pointer(pC3)).FpayloadSize) {
if *(*Tu32)(unsafe.Pointer(aOffset)) == uint32(0) {
i1 = 0
zHdr = zEndHdr
} else {
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow == uintptr(0) {
_sqlite3VdbeMemRelease(tls, bp+24)
}
goto op_column_corrupt
}
}
(*TVdbeCursor)(unsafe.Pointer(pC3)).FnHdrParsed = uint16(i1)
(*TVdbeCursor)(unsafe.Pointer(pC3)).FiHdrOffset = uint32(int64(zHdr) - int64(zData))
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow == uintptr(0) {
_sqlite3VdbeMemRelease(tls, bp+24)
}
goto _220
_219:
;
*(*Tu32)(unsafe.Pointer(bp + 80)) = uint32(0)
_220:
;
/* If after trying to extract new entries from the header, nHdrParsed is
** still not up to p2, that means that the record has fewer than p2
** columns. So the result will be either the default value or a NULL.
*/
if uint32((*TVdbeCursor)(unsafe.Pointer(pC3)).FnHdrParsed) <= p22 {
pDest = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if int32((*TOp)(unsafe.Pointer(pOp)).Fp4type) == -int32(10) {
_sqlite3VdbeMemShallowCopy(tls, pDest, *(*uintptr)(unsafe.Pointer(pOp + 16)), int32(MEM_Static))
} else {
_sqlite3VdbeMemSetNull(tls, pDest)
}
goto op_column_out
}
goto _218
_217:
;
*(*Tu32)(unsafe.Pointer(bp + 80)) = *(*Tu32)(unsafe.Pointer(pC3 + 120 + uintptr(p22)*4))
_218:
;
/* Extract the content for the p2+1-th column. Control can only
** reach this point if aOffset[p2], aOffset[p2+1], and pC->aType[p2] are
** all valid.
*/
pDest = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if int32((*TMem)(unsafe.Pointer(pDest)).Fflags)&(libc.Int32FromInt32(MEM_Agg)|libc.Int32FromInt32(MEM_Dyn)) != 0 {
_sqlite3VdbeMemSetNull(tls, pDest)
}
if (*TVdbeCursor)(unsafe.Pointer(pC3)).FszRow >= *(*Tu32)(unsafe.Pointer(aOffset + uintptr(p22+uint32(1))*4)) {
/* This is the common case where the desired content fits on the original
** page - where the content is not on an overflow page */
zData = (*TVdbeCursor)(unsafe.Pointer(pC3)).FaRow + uintptr(*(*Tu32)(unsafe.Pointer(aOffset + uintptr(p22)*4)))
if *(*Tu32)(unsafe.Pointer(bp + 80)) < uint32(12) {
_sqlite3VdbeSerialGet(tls, zData, *(*Tu32)(unsafe.Pointer(bp + 80)), pDest)
} else {
v224 = int32((*(*Tu32)(unsafe.Pointer(bp + 80)) - libc.Uint32FromInt32(12)) / libc.Uint32FromInt32(2))
len1 = v224
(*TMem)(unsafe.Pointer(pDest)).Fn = v224
(*TMem)(unsafe.Pointer(pDest)).Fenc = encoding
if (*TMem)(unsafe.Pointer(pDest)).FszMalloc < len1+int32(2) {
if len1 > *(*int32)(unsafe.Pointer(db + 136)) {
goto too_big
}
(*TMem)(unsafe.Pointer(pDest)).Fflags = uint16(MEM_Null)
if _sqlite3VdbeMemGrow(tls, pDest, len1+int32(2), 0) != 0 {
goto no_mem
}
} else {
(*TMem)(unsafe.Pointer(pDest)).Fz = (*TMem)(unsafe.Pointer(pDest)).FzMalloc
}
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pDest)).Fz, zData, uint64(len1))
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pDest)).Fz + uintptr(len1))) = 0
*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pDest)).Fz + uintptr(len1+int32(1)))) = 0
(*TMem)(unsafe.Pointer(pDest)).Fflags = _aFlag1[*(*Tu32)(unsafe.Pointer(bp + 80))&uint32(1)]
}
} else {
(*TMem)(unsafe.Pointer(pDest)).Fenc = encoding
/* This branch happens only when content is on overflow pages */
v225 = uint8(int32((*TOp)(unsafe.Pointer(pOp)).Fp5) & libc.Int32FromInt32(OPFLAG_BYTELENARG))
p5 = v225
if int32(v225) != 0 && (int32(p5) == int32(OPFLAG_TYPEOFARG) || *(*Tu32)(unsafe.Pointer(bp + 80)) >= uint32(12) && (*(*Tu32)(unsafe.Pointer(bp + 80))&uint32(1) == uint32(0) || int32(p5) == int32(OPFLAG_BYTELENARG))) || _sqlite3VdbeSerialTypeLen(tls, *(*Tu32)(unsafe.Pointer(bp + 80))) == uint32(0) {
/* Content is irrelevant for
** 1. the typeof() function,
** 2. the length(X) function if X is a blob, and
** 3. if the content length is zero.
** So we might as well use bogus content rather than reading
** content from disk.
**
** Although sqlite3VdbeSerialGet() may read at most 8 bytes from the
** buffer passed to it, debugging function VdbeMemPrettyPrint() may
** read more. Use the global constant sqlite3CtypeMap[] as the array,
** as that array is 256 bytes long (plenty for VdbeMemPrettyPrint())
** and it begins with a bunch of zeros.
*/
_sqlite3VdbeSerialGet(tls, uintptr(unsafe.Pointer(&_sqlite3CtypeMap)), *(*Tu32)(unsafe.Pointer(bp + 80)), pDest)
} else {
rc = _vdbeColumnFromOverflow(tls, pC3, int32(p22), int32(*(*Tu32)(unsafe.Pointer(bp + 80))), int64(*(*Tu32)(unsafe.Pointer(aOffset + uintptr(p22)*4))), (*TVdbe)(unsafe.Pointer(p)).FcacheCtr, colCacheCtr, pDest)
if rc != 0 {
if rc == int32(SQLITE_NOMEM) {
goto no_mem
}
if rc == int32(SQLITE_TOOBIG) {
goto too_big
}
goto abort_due_to_error
}
}
}
goto op_column_out
op_column_out:
;
goto _187
goto op_column_corrupt
op_column_corrupt:
;
if (*(*TOp)(unsafe.Pointer(aOp))).Fp3 > 0 {
pOp = aOp + uintptr((*(*TOp)(unsafe.Pointer(aOp))).Fp3-int32(1))*24
goto _187
} else {
rc = _sqlite3CorruptError(tls, int32(95863))
goto abort_due_to_error
}
/* Opcode: TypeCheck P1 P2 P3 P4 *
** Synopsis: typecheck(r[P1@P2])
**
** Apply affinities to the range of P2 registers beginning with P1.
** Take the affinities from the Table object in P4. If any value
** cannot be coerced into the correct type, then raise an error.
**
** This opcode is similar to OP_Affinity except that this opcode
** forces the register type to the Table column type. This is used
** to implement "strict affinity".
**
** GENERATED ALWAYS AS ... STATIC columns are only checked if P3
** is zero. When P3 is non-zero, no type checking occurs for
** static generated columns. Virtual columns are computed at query time
** and so they are never checked.
**
** Preconditions:
**
**
** - P2 should be the number of non-virtual columns in the
** table of P4.
**
- Table P4 should be a STRICT table.
**
**
** If any precondition is false, an assertion fault occurs.
*/
_66:
;
pTab = *(*uintptr)(unsafe.Pointer(pOp + 16))
aCol = (*TTable)(unsafe.Pointer(pTab)).FaCol
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
i2 = 0
for {
if !(i2 < int32((*TTable)(unsafe.Pointer(pTab)).FnCol)) {
break
}
if int32((*(*TColumn)(unsafe.Pointer(aCol + uintptr(i2)*16))).FcolFlags)&int32(COLFLAG_GENERATED) != 0 {
if int32((*(*TColumn)(unsafe.Pointer(aCol + uintptr(i2)*16))).FcolFlags)&int32(COLFLAG_VIRTUAL) != 0 {
goto _226
}
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
pIn1 += 56
goto _226
}
}
_applyAffinity(tls, pIn1, (*(*TColumn)(unsafe.Pointer(aCol + uintptr(i2)*16))).Faffinity, encoding)
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) == 0 {
switch int32(uint32(*(*uint8)(unsafe.Pointer(aCol + uintptr(i2)*16 + 8)) & 0xf0 >> 4)) {
case int32(COLTYPE_BLOB):
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Blob) == 0 {
goto vdbe_type_error
}
case int32(COLTYPE_INTEGER):
fallthrough
case int32(COLTYPE_INT):
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Int) == 0 {
goto vdbe_type_error
}
case int32(COLTYPE_TEXT):
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Str) == 0 {
goto vdbe_type_error
}
case int32(COLTYPE_REAL):
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Int) != 0 {
/* When applying REAL affinity, if the result is still an MEM_Int
** that will fit in 6 bytes, then change the type to MEM_IntReal
** so that we keep the high-resolution integer value but know that
** the type really wants to be REAL. */
if *(*Ti64)(unsafe.Pointer(pIn1)) <= int64(140737488355327) && *(*Ti64)(unsafe.Pointer(pIn1)) >= -int64(140737488355328) {
p227 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p227)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p227))) | libc.Int32FromInt32(MEM_IntReal))
p228 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p228)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p228))) & ^libc.Int32FromInt32(MEM_Int))
} else {
*(*float64)(unsafe.Pointer(pIn1)) = float64(*(*Ti64)(unsafe.Pointer(pIn1)))
p229 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p229)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p229))) | libc.Int32FromInt32(MEM_Real))
p230 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p230)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p230))) & ^libc.Int32FromInt32(MEM_Int))
}
} else {
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&(libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)) == 0 {
goto vdbe_type_error
}
}
default:
/* COLTYPE_ANY. Accept anything. */
break
}
}
pIn1 += 56
goto _226
_226:
;
i2++
}
goto _187
goto vdbe_type_error
vdbe_type_error:
;
_sqlite3VdbeError(tls, p, __ccgo_ts+5668, libc.VaList(bp+944, _vdbeMemTypeName(tls, pIn1), _sqlite3StdType[int32(uint32(*(*uint8)(unsafe.Pointer(aCol + uintptr(i2)*16 + 8))&0xf0>>4))-int32(1)], (*TTable)(unsafe.Pointer(pTab)).FzName, (*(*TColumn)(unsafe.Pointer(aCol + uintptr(i2)*16))).FzCnName))
rc = libc.Int32FromInt32(SQLITE_CONSTRAINT) | libc.Int32FromInt32(12)<= -int64(140737488355328) {
p231 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p231)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p231))) | libc.Int32FromInt32(MEM_IntReal))
p232 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p232)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p232))) & ^libc.Int32FromInt32(MEM_Int))
} else {
*(*float64)(unsafe.Pointer(pIn1)) = float64(*(*Ti64)(unsafe.Pointer(pIn1)))
p233 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p233)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p233))) | libc.Int32FromInt32(MEM_Real))
p234 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p234)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p234))) & ^(libc.Int32FromInt32(MEM_Int) | libc.Int32FromInt32(MEM_Str)))
}
}
zAffinity++
if int32(*(*int8)(unsafe.Pointer(zAffinity))) == 0 {
break
}
pIn1 += 56
}
goto _187
/* Opcode: MakeRecord P1 P2 P3 P4 *
** Synopsis: r[P3]=mkrec(r[P1@P2])
**
** Convert P2 registers beginning with P1 into the [record format]
** use as a data record in a database table or as a key
** in an index. The OP_Column opcode can decode the record later.
**
** P4 may be a string that is P2 characters long. The N-th character of the
** string indicates the column affinity that should be used for the N-th
** field of the index key.
**
** The mapping from character to affinity is given by the SQLITE_AFF_
** macros defined in sqliteInt.h.
**
** If P4 is NULL then all index fields have the affinity BLOB.
**
** The meaning of P5 depends on whether or not the SQLITE_ENABLE_NULL_TRIM
** compile-time option is enabled:
**
** * If SQLITE_ENABLE_NULL_TRIM is enabled, then the P5 is the index
** of the right-most table that can be null-trimmed.
**
** * If SQLITE_ENABLE_NULL_TRIM is omitted, then P5 has the value
** OPFLAG_NOCHNG_MAGIC if the OP_MakeRecord opcode is allowed to
** accept no-change records with serial_type 10. This value is
** only used inside an assert() and does not affect the end result.
*/
_68:
; /* Where to write next byte of the payload */
/* Assuming the record contains N fields, the record format looks
** like this:
**
** ------------------------------------------------------------------------
** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 |
** ------------------------------------------------------------------------
**
** Data(0) is taken from register P1. Data(1) comes from register P1+1
** and so forth.
**
** Each type field is a varint representing the serial type of the
** corresponding data element (see sqlite3VdbeSerialType()). The
** hdr-size field is also a varint which is the offset from the beginning
** of the record to data0.
*/
nData = uint64(0) /* Number of bytes of data space */
nHdr = 0 /* Number of bytes of header space */
nZero = 0 /* Number of zero bytes at the end of the record */
nField = (*TOp)(unsafe.Pointer(pOp)).Fp1
zAffinity1 = *(*uintptr)(unsafe.Pointer(pOp + 16))
pData0 = aMem + uintptr(nField)*56
nField = (*TOp)(unsafe.Pointer(pOp)).Fp2
pLast = pData0 + uintptr(nField-int32(1))*56
/* Identify the output register */
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
/* Apply the requested affinity to all inputs
*/
if zAffinity1 != 0 {
pRec = pData0
for cond := true; cond; cond = *(*int8)(unsafe.Pointer(zAffinity1)) != 0 {
_applyAffinity(tls, pRec, *(*int8)(unsafe.Pointer(zAffinity1)), encoding)
if int32(*(*int8)(unsafe.Pointer(zAffinity1))) == int32(SQLITE_AFF_REAL) && int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Int) != 0 {
p235 = pRec + 20
*(*Tu16)(unsafe.Pointer(p235)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p235))) | libc.Int32FromInt32(MEM_IntReal))
p236 = pRec + 20
*(*Tu16)(unsafe.Pointer(p236)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p236))) & ^libc.Int32FromInt32(MEM_Int))
}
zAffinity1++
pRec += 56
}
}
/* Loop through the elements that will make up the record to figure
** out how much space is required for the new record. After this loop,
** the Mem.uTemp field of each term should hold the serial-type that will
** be used for that term in the generated record:
**
** Mem.uTemp value type
** --------------- ---------------
** 0 NULL
** 1 1-byte signed integer
** 2 2-byte signed integer
** 3 3-byte signed integer
** 4 4-byte signed integer
** 5 6-byte signed integer
** 6 8-byte signed integer
** 7 IEEE float
** 8 Integer constant 0
** 9 Integer constant 1
** 10,11 reserved for expansion
** N>=12 and even BLOB
** N>=13 and odd text
**
** The following additional values are computed:
** nHdr Number of bytes needed for the record header
** nData Number of bytes of data space needed for the record
** nZero Zero bytes at the end of the record
*/
pRec = pLast
for cond := true; cond; cond = int32(1) != 0 {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Null) != 0 {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Zero) != 0 {
/* Values with MEM_Null and MEM_Zero are created by xColumn virtual
** table methods that never invoke sqlite3_result_xxxxx() while
** computing an unchanging column value in an UPDATE statement.
** Give such values a special internal-use-only serial-type of 10
** so that they can be passed through to xUpdate and have
** a true sqlite3_value_nochange(). */
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(10)
} else {
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(0)
}
nHdr++
} else {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) != 0 {
/* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */
i3 = *(*Ti64)(unsafe.Pointer(pRec))
if i3 < 0 {
uu = uint64(^i3)
} else {
uu = uint64(i3)
}
nHdr++
if uu <= uint64(127) {
if i3&int64(1) == i3 && int32((*TVdbe)(unsafe.Pointer(p)).FminWriteFileFormat) >= int32(4) {
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(8) + uint32(uu)
} else {
nData++
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(1)
}
} else {
if uu <= uint64(32767) {
nData += uint64(2)
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(2)
} else {
if uu <= uint64(8388607) {
nData += uint64(3)
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(3)
} else {
if uu <= uint64(2147483647) {
nData += uint64(4)
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(4)
} else {
if uu <= uint64(140737488355327) {
nData += uint64(6)
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(5)
} else {
nData += uint64(8)
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_IntReal) != 0 {
/* If the value is IntReal and is going to take up 8 bytes to store
** as an integer, then we might as well make it an 8-byte floating
** point value */
*(*float64)(unsafe.Pointer(pRec)) = float64(*(*Ti64)(unsafe.Pointer(pRec)))
p237 = pRec + 20
*(*Tu16)(unsafe.Pointer(p237)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p237))) & ^libc.Int32FromInt32(MEM_IntReal))
p238 = pRec + 20
*(*Tu16)(unsafe.Pointer(p238)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p238))) | libc.Int32FromInt32(MEM_Real))
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(7)
} else {
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(6)
}
}
}
}
}
}
} else {
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Real) != 0 {
nHdr++
nData += uint64(8)
(*TMem)(unsafe.Pointer(pRec)).FuTemp = uint32(7)
} else {
len11 = uint32((*TMem)(unsafe.Pointer(pRec)).Fn)
serial_type = len11*uint32(2) + uint32(12) + libc.BoolUint32(int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&libc.Int32FromInt32(MEM_Str) != libc.Int32FromInt32(0))
if int32((*TMem)(unsafe.Pointer(pRec)).Fflags)&int32(MEM_Zero) != 0 {
serial_type += uint32(*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pRec)).Fu)) * int32(2))
if nData != 0 {
if _sqlite3VdbeMemExpandBlob(tls, pRec) != 0 {
goto no_mem
}
len11 += uint32(*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pRec)).Fu)))
} else {
nZero += int64(*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pRec)).Fu)))
}
}
nData += uint64(len11)
nHdr += _sqlite3VarintLen(tls, uint64(serial_type))
(*TMem)(unsafe.Pointer(pRec)).FuTemp = serial_type
}
}
}
if pRec == pData0 {
break
}
pRec -= 56
}
/* EVIDENCE-OF: R-22564-11647 The header begins with a single varint
** which determines the total number of bytes in the header. The varint
** value is the size of the header in bytes including the size varint
** itself. */
if nHdr <= int32(126) {
/* The common case */
nHdr += int32(1)
} else {
/* Rare case of a really large header */
nVarint = _sqlite3VarintLen(tls, uint64(nHdr))
nHdr += nVarint
if nVarint < _sqlite3VarintLen(tls, uint64(nHdr)) {
nHdr++
}
}
nByte1 = int64(uint64(nHdr) + nData)
/* Make sure the output register has a buffer large enough to store
** the new record. The output register (pOp->p3) is not allowed to
** be one of the input registers (because the following call to
** sqlite3VdbeMemClearAndResize() could clobber the value before it is used).
*/
if nByte1+nZero <= int64((*TMem)(unsafe.Pointer(pOut)).FszMalloc) {
/* The output register is already large enough to hold the record.
** No error checks or buffer enlargement is required */
(*TMem)(unsafe.Pointer(pOut)).Fz = (*TMem)(unsafe.Pointer(pOut)).FzMalloc
} else {
/* Need to make sure that the output is not too big and then enlarge
** the output register to hold the full result */
if nByte1+nZero > int64(*(*int32)(unsafe.Pointer(db + 136))) {
goto too_big
}
if _sqlite3VdbeMemClearAndResize(tls, pOut, int32(nByte1)) != 0 {
goto no_mem
}
}
(*TMem)(unsafe.Pointer(pOut)).Fn = int32(nByte1)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Blob)
if nZero != 0 {
*(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pOut)).Fu)) = int32(nZero)
p239 = pOut + 20
*(*Tu16)(unsafe.Pointer(p239)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p239))) | libc.Int32FromInt32(MEM_Zero))
}
zHdr1 = (*TMem)(unsafe.Pointer(pOut)).Fz
zPayload = zHdr1 + uintptr(nHdr)
/* Write the record */
if nHdr < int32(0x80) {
v240 = zHdr1
zHdr1++
*(*Tu8)(unsafe.Pointer(v240)) = uint8(nHdr)
} else {
zHdr1 += uintptr(_sqlite3PutVarint(tls, zHdr1, uint64(nHdr)))
}
pRec = pData0
for int32(1) != 0 {
serial_type = (*TMem)(unsafe.Pointer(pRec)).FuTemp
/* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more
** additional varints, one per column.
** EVIDENCE-OF: R-64536-51728 The values for each column in the record
** immediately follow the header. */
if serial_type <= uint32(7) {
v241 = zHdr1
zHdr1++
*(*Tu8)(unsafe.Pointer(v241)) = uint8(serial_type)
if serial_type == uint32(0) {
/* NULL value. No change in zPayload */
} else {
if serial_type == uint32(7) {
libc.Xmemcpy(tls, bp+88, pRec, uint64(8))
} else {
*(*Tu64)(unsafe.Pointer(bp + 88)) = uint64(*(*Ti64)(unsafe.Pointer(pRec)))
}
len11 = uint32(_sqlite3SmallTypeSizes[serial_type])
switch len11 {
default:
*(*Tu8)(unsafe.Pointer(zPayload + 7)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
*(*Tu8)(unsafe.Pointer(zPayload + 6)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
fallthrough
case uint32(6):
*(*Tu8)(unsafe.Pointer(zPayload + 5)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
*(*Tu8)(unsafe.Pointer(zPayload + 4)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
fallthrough
case uint32(4):
*(*Tu8)(unsafe.Pointer(zPayload + 3)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
fallthrough
case uint32(3):
*(*Tu8)(unsafe.Pointer(zPayload + 2)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
fallthrough
case uint32(2):
*(*Tu8)(unsafe.Pointer(zPayload + 1)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
*(*Tu64)(unsafe.Pointer(bp + 88)) >>= uint64(8)
fallthrough
case uint32(1):
*(*Tu8)(unsafe.Pointer(zPayload)) = uint8(*(*Tu64)(unsafe.Pointer(bp + 88)) & libc.Uint64FromInt32(0xff))
}
zPayload += uintptr(len11)
}
} else {
if serial_type < uint32(0x80) {
v242 = zHdr1
zHdr1++
*(*Tu8)(unsafe.Pointer(v242)) = uint8(serial_type)
if serial_type >= uint32(14) && (*TMem)(unsafe.Pointer(pRec)).Fn > 0 {
libc.Xmemcpy(tls, zPayload, (*TMem)(unsafe.Pointer(pRec)).Fz, uint64((*TMem)(unsafe.Pointer(pRec)).Fn))
zPayload += uintptr((*TMem)(unsafe.Pointer(pRec)).Fn)
}
} else {
zHdr1 += uintptr(_sqlite3PutVarint(tls, zHdr1, uint64(serial_type)))
if (*TMem)(unsafe.Pointer(pRec)).Fn != 0 {
libc.Xmemcpy(tls, zPayload, (*TMem)(unsafe.Pointer(pRec)).Fz, uint64((*TMem)(unsafe.Pointer(pRec)).Fn))
zPayload += uintptr((*TMem)(unsafe.Pointer(pRec)).Fn)
}
}
}
if pRec == pLast {
break
}
pRec += 56
}
goto _187
/* Opcode: Count P1 P2 P3 * *
** Synopsis: r[P2]=count()
**
** Store the number of entries (an integer value) in the table or index
** opened by cursor P1 in register P2.
**
** If P3==0, then an exact count is obtained, which involves visiting
** every btree page of the table. But if P3 is non-zero, an estimate
** is returned based on the current cursor position.
*/
_69:
;
pCrsr1 = *(*uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8)) + 48))
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
*(*Ti64)(unsafe.Pointer(bp + 96)) = _sqlite3BtreeRowCountEst(tls, pCrsr1)
} else {
*(*Ti64)(unsafe.Pointer(bp + 96)) = 0 /* Not needed. Only used to silence a warning. */
rc = _sqlite3BtreeCount(tls, db, pCrsr1, bp+96)
if rc != 0 {
goto abort_due_to_error
}
}
pOut = _out2Prerelease(tls, p, pOp)
*(*Ti64)(unsafe.Pointer(pOut)) = *(*Ti64)(unsafe.Pointer(bp + 96))
goto check_for_interrupt
/* Opcode: Savepoint P1 * * P4 *
**
** Open, release or rollback the savepoint named by parameter P4, depending
** on the value of P1. To open a new savepoint set P1==0 (SAVEPOINT_BEGIN).
** To release (commit) an existing savepoint set P1==1 (SAVEPOINT_RELEASE).
** To rollback an existing savepoint set P1==2 (SAVEPOINT_ROLLBACK).
*/
_70:
;
p12 = (*TOp)(unsafe.Pointer(pOp)).Fp1
zName = *(*uintptr)(unsafe.Pointer(pOp + 16))
/* Assert that the p1 parameter is valid. Also that if there is no open
** transaction, then there cannot be any savepoints.
*/
if p12 == SAVEPOINT_BEGIN {
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeWrite > 0 {
/* A new savepoint cannot be created if there are active write
** statements (i.e. open read/write incremental blob handles).
*/
_sqlite3VdbeError(tls, p, __ccgo_ts+5709, 0)
rc = int32(SQLITE_BUSY)
} else {
nName = _sqlite3Strlen30(tls, zName)
/* This call is Ok even if this savepoint is actually a transaction
** savepoint (and therefore should not prompt xSavepoint()) callbacks.
** If this is a transaction savepoint being opened, it is guaranteed
** that the db->aVTrans[] array is empty. */
rc = _sqlite3VtabSavepoint(tls, db, SAVEPOINT_BEGIN, (*Tsqlite3)(unsafe.Pointer(db)).FnStatement+(*Tsqlite3)(unsafe.Pointer(db)).FnSavepoint)
if rc != SQLITE_OK {
goto abort_due_to_error
}
/* Create a new savepoint structure. */
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(32)+uint64(nName)+uint64(1))
if pNew != 0 {
(*TSavepoint)(unsafe.Pointer(pNew)).FzName = pNew + 1*32
libc.Xmemcpy(tls, (*TSavepoint)(unsafe.Pointer(pNew)).FzName, zName, uint64(nName+int32(1)))
/* If there is no open transaction, then mark this as a special
** "transaction savepoint". */
if (*Tsqlite3)(unsafe.Pointer(db)).FautoCommit != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(0)
(*Tsqlite3)(unsafe.Pointer(db)).FisTransactionSavepoint = uint8(1)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FnSavepoint++
}
/* Link the new savepoint into the database handle's list. */
(*TSavepoint)(unsafe.Pointer(pNew)).FpNext = (*Tsqlite3)(unsafe.Pointer(db)).FpSavepoint
(*Tsqlite3)(unsafe.Pointer(db)).FpSavepoint = pNew
(*TSavepoint)(unsafe.Pointer(pNew)).FnDeferredCons = (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons
(*TSavepoint)(unsafe.Pointer(pNew)).FnDeferredImmCons = (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons
}
}
} else {
iSavepoint = 0
/* Find the named savepoint. If there is no such savepoint, then an
** an error is returned to the user. */
pSavepoint = (*Tsqlite3)(unsafe.Pointer(db)).FpSavepoint
for {
if !(pSavepoint != 0 && _sqlite3StrICmp(tls, (*TSavepoint)(unsafe.Pointer(pSavepoint)).FzName, zName) != 0) {
break
}
iSavepoint++
goto _243
_243:
;
pSavepoint = (*TSavepoint)(unsafe.Pointer(pSavepoint)).FpNext
}
if !(pSavepoint != 0) {
_sqlite3VdbeError(tls, p, __ccgo_ts+5760, libc.VaList(bp+944, zName))
rc = int32(SQLITE_ERROR)
} else {
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeWrite > 0 && p12 == int32(SAVEPOINT_RELEASE) {
/* It is not possible to release (commit) a savepoint if there are
** active write statements.
*/
_sqlite3VdbeError(tls, p, __ccgo_ts+5782, 0)
rc = int32(SQLITE_BUSY)
} else {
/* Determine whether or not this is a transaction savepoint. If so,
** and this is a RELEASE command, then the current transaction
** is committed.
*/
isTransaction = libc.BoolInt32((*TSavepoint)(unsafe.Pointer(pSavepoint)).FpNext == uintptr(0) && (*Tsqlite3)(unsafe.Pointer(db)).FisTransactionSavepoint != 0)
if isTransaction != 0 && p12 == int32(SAVEPOINT_RELEASE) {
v244 = _sqlite3VdbeCheckFk(tls, p, int32(1))
rc = v244
if v244 != SQLITE_OK {
goto vdbe_return
}
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(1)
if _sqlite3VdbeHalt(tls, p) == int32(SQLITE_BUSY) {
(*TVdbe)(unsafe.Pointer(p)).Fpc = int32((int64(pOp) - int64(aOp)) / 24)
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(0)
v245 = libc.Int32FromInt32(SQLITE_BUSY)
rc = v245
(*TVdbe)(unsafe.Pointer(p)).Frc = v245
goto vdbe_return
}
rc = (*TVdbe)(unsafe.Pointer(p)).Frc
if rc != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(0)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FisTransactionSavepoint = uint8(0)
}
} else {
iSavepoint = (*Tsqlite3)(unsafe.Pointer(db)).FnSavepoint - iSavepoint - int32(1)
if p12 == int32(SAVEPOINT_ROLLBACK) {
isSchemaChange = libc.BoolInt32((*Tsqlite3)(unsafe.Pointer(db)).FmDbFlags&uint32(DBFLAG_SchemaChange) != uint32(0))
ii = 0
for {
if !(ii < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
rc = _sqlite3BtreeTripAllCursors(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(ii)*32))).FpBt, libc.Int32FromInt32(SQLITE_ABORT)|libc.Int32FromInt32(2)< 0 {
/* If this instruction implements a COMMIT and other VMs are writing
** return an error indicating that the other VMs must complete first.
*/
_sqlite3VdbeError(tls, p, __ccgo_ts+5836, 0)
rc = int32(SQLITE_BUSY)
goto abort_due_to_error
} else {
v248 = _sqlite3VdbeCheckFk(tls, p, int32(1))
rc = v248
if v248 != SQLITE_OK {
goto vdbe_return
} else {
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(desiredAutoCommit)
}
}
}
if _sqlite3VdbeHalt(tls, p) == int32(SQLITE_BUSY) {
(*TVdbe)(unsafe.Pointer(p)).Fpc = int32((int64(pOp) - int64(aOp)) / 24)
(*Tsqlite3)(unsafe.Pointer(db)).FautoCommit = uint8(libc.Int32FromInt32(1) - desiredAutoCommit)
v249 = libc.Int32FromInt32(SQLITE_BUSY)
rc = v249
(*TVdbe)(unsafe.Pointer(p)).Frc = v249
goto vdbe_return
}
_sqlite3CloseSavepoints(tls, db)
if (*TVdbe)(unsafe.Pointer(p)).Frc == SQLITE_OK {
rc = int32(SQLITE_DONE)
} else {
rc = int32(SQLITE_ERROR)
}
goto vdbe_return
} else {
if !(desiredAutoCommit != 0) {
v250 = __ccgo_ts + 5891
} else {
if iRollback != 0 {
v251 = __ccgo_ts + 5939
} else {
v251 = __ccgo_ts + 5982
}
v250 = v251
}
_sqlite3VdbeError(tls, p, v250, 0)
rc = int32(SQLITE_ERROR)
goto abort_due_to_error
}
/* Opcode: Transaction P1 P2 P3 P4 P5
**
** Begin a transaction on database P1 if a transaction is not already
** active.
** If P2 is non-zero, then a write-transaction is started, or if a
** read-transaction is already active, it is upgraded to a write-transaction.
** If P2 is zero, then a read-transaction is started. If P2 is 2 or more
** then an exclusive transaction is started.
**
** P1 is the index of the database file on which the transaction is
** started. Index 0 is the main database file and index 1 is the
** file used for temporary tables. Indices of 2 or more are used for
** attached databases.
**
** If a write-transaction is started and the Vdbe.usesStmtJournal flag is
** true (this flag is set if the Vdbe may modify more than one row and may
** throw an ABORT exception), a statement transaction may also be opened.
** More specifically, a statement transaction is opened iff the database
** connection is currently not in autocommit mode, or if there are other
** active statements. A statement transaction allows the changes made by this
** VDBE to be rolled back after an error without having to roll back the
** entire transaction. If no error is encountered, the statement transaction
** will automatically commit when the VDBE halts.
**
** If P5!=0 then this opcode also checks the schema cookie against P3
** and the schema generation counter against P4.
** The cookie changes its value whenever the database schema changes.
** This operation is used to detect when that the cookie has changed
** and that the current process needs to reread the schema. If the schema
** cookie in P3 differs from the schema cookie in the database header or
** if the schema generation counter in P4 differs from the current
** generation counter, then an SQLITE_SCHEMA error is raised and execution
** halts. The sqlite3_step() wrapper function might then reprepare the
** statement and rerun it from the beginning.
*/
_72:
;
*(*int32)(unsafe.Pointer(bp + 104)) = 0
if (*TOp)(unsafe.Pointer(pOp)).Fp2 != 0 && (*Tsqlite3)(unsafe.Pointer(db)).Fflags&(libc.Uint64FromInt32(SQLITE_QueryOnly)|uint64(libc.Int32FromInt32(0x00002))<>5)) != 0 && (*TOp)(unsafe.Pointer(pOp)).Fp2 != 0 && (int32((*Tsqlite3)(unsafe.Pointer(db)).FautoCommit) == 0 || (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead > int32(1)) {
if (*TVdbe)(unsafe.Pointer(p)).FiStatement == 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FnStatement++
(*TVdbe)(unsafe.Pointer(p)).FiStatement = (*Tsqlite3)(unsafe.Pointer(db)).FnSavepoint + (*Tsqlite3)(unsafe.Pointer(db)).FnStatement
}
rc = _sqlite3VtabSavepoint(tls, db, SAVEPOINT_BEGIN, (*TVdbe)(unsafe.Pointer(p)).FiStatement-int32(1))
if rc == SQLITE_OK {
rc = _sqlite3BtreeBeginStmt(tls, pBt, (*TVdbe)(unsafe.Pointer(p)).FiStatement)
}
/* Store the current value of the database handles deferred constraint
** counter. If the statement transaction needs to be rolled back,
** the value of this counter needs to be restored too. */
(*TVdbe)(unsafe.Pointer(p)).FnStmtDefCons = (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons
(*TVdbe)(unsafe.Pointer(p)).FnStmtDefImmCons = (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons
}
}
if rc == SQLITE_OK && (*TOp)(unsafe.Pointer(pOp)).Fp5 != 0 && (*(*int32)(unsafe.Pointer(bp + 104)) != (*TOp)(unsafe.Pointer(pOp)).Fp3 || (*TSchema)(unsafe.Pointer((*TDb)(unsafe.Pointer(pDb)).FpSchema)).FiGeneration != (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi) {
/*
** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema
** version is checked to ensure that the schema has not changed since the
** SQL statement was prepared.
*/
_sqlite3DbFree(tls, db, (*TVdbe)(unsafe.Pointer(p)).FzErrMsg)
(*TVdbe)(unsafe.Pointer(p)).FzErrMsg = _sqlite3DbStrDup(tls, db, __ccgo_ts+6023)
/* If the schema-cookie from the database file matches the cookie
** stored with the in-memory representation of the schema, do
** not reload the schema from the database file.
**
** If virtual-tables are in use, this is not just an optimization.
** Often, v-tables store their data in other SQLite tables, which
** are queried from within xNext() and other v-table methods using
** prepared queries. If such a query is out-of-date, we do not want to
** discard the database schema, as the user code implementing the
** v-table would have to be ready for the sqlite3_vtab structure itself
** to be invalidated whenever sqlite3_step() is called from within
** a v-table method.
*/
if (*TSchema)(unsafe.Pointer((*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32))).FpSchema)).Fschema_cookie != *(*int32)(unsafe.Pointer(bp + 104)) {
_sqlite3ResetOneSchema(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1)
}
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(1), 0, 0x3)
rc = int32(SQLITE_SCHEMA)
/* Set changeCntOn to 0 to prevent the value returned by sqlite3_changes()
** from being modified in sqlite3VdbeHalt(). If this statement is
** reprepared, changeCntOn will be set again. */
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 4, 0x10)
}
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: ReadCookie P1 P2 P3 * *
**
** Read cookie number P3 from database P1 and write it into register P2.
** P3==1 is the schema version. P3==2 is the database format.
** P3==3 is the recommended pager cache size, and so forth. P1==0 is
** the main database file and P1==1 is the database file used to store
** temporary tables.
**
** There must be a read-lock on the database (either a transaction
** must be started or there must be an open cursor) before
** executing this instruction.
*/
_73:
;
iDb = (*TOp)(unsafe.Pointer(pOp)).Fp1
iCookie = (*TOp)(unsafe.Pointer(pOp)).Fp3
_sqlite3BtreeGetMeta(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb)*32))).FpBt, iCookie, bp+108)
pOut = _out2Prerelease(tls, p, pOp)
*(*Ti64)(unsafe.Pointer(pOut)) = int64(*(*int32)(unsafe.Pointer(bp + 108)))
goto _187
/* Opcode: SetCookie P1 P2 P3 * P5
**
** Write the integer value P3 into cookie number P2 of database P1.
** P2==1 is the schema version. P2==2 is the database format.
** P2==3 is the recommended pager cache
** size, and so forth. P1==0 is the main database file and P1==1 is the
** database file used to store temporary tables.
**
** A transaction must be started before executing this opcode.
**
** If P2 is the SCHEMA_VERSION cookie (cookie number 1) then the internal
** schema version is set to P3-P5. The "PRAGMA schema_version=N" statement
** has P5 set to 1, so that the internal schema version will be different
** from the database schema version, resulting in a schema reset.
*/
_74:
;
pDb1 = (*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32
/* See note about index shifting on OP_ReadCookie */
rc = _sqlite3BtreeUpdateMeta(tls, (*TDb)(unsafe.Pointer(pDb1)).FpBt, (*TOp)(unsafe.Pointer(pOp)).Fp2, uint32((*TOp)(unsafe.Pointer(pOp)).Fp3))
if (*TOp)(unsafe.Pointer(pOp)).Fp2 == int32(BTREE_SCHEMA_VERSION) {
/* When the schema cookie changes, record the new cookie internally */
*(*Tu32)(unsafe.Pointer((*TDb)(unsafe.Pointer(pDb1)).FpSchema)) = *(*Tu32)(unsafe.Pointer(pOp + 12)) - uint32((*TOp)(unsafe.Pointer(pOp)).Fp5)
*(*Tu32)(unsafe.Pointer(db + 44)) |= uint32(DBFLAG_SchemaChange)
_sqlite3FkClearTriggerCache(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1)
} else {
if (*TOp)(unsafe.Pointer(pOp)).Fp2 == int32(BTREE_FILE_FORMAT) {
/* Record changes in the file format */
(*TSchema)(unsafe.Pointer((*TDb)(unsafe.Pointer(pDb1)).FpSchema)).Ffile_format = uint8((*TOp)(unsafe.Pointer(pOp)).Fp3)
}
}
if (*TOp)(unsafe.Pointer(pOp)).Fp1 == int32(1) {
/* Invalidate all prepared statements whenever the TEMP database
** schema is changed. Ticket #1644 */
_sqlite3ExpirePreparedStatements(tls, db, 0)
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 0, 0x3)
}
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: OpenRead P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
** Open a read-only cursor for the database table whose root page is
** P2 in a database file. The database file is determined by P3.
** P3==0 means the main database, P3==1 means the database used for
** temporary tables, and P3>1 means used the corresponding attached
** database. Give the new cursor an identifier of P1. The P1
** values need not be contiguous but all P1 values should be small integers.
** It is an error for P1 to be negative.
**
** Allowed P5 bits:
**
** - 0x02 OPFLAG_SEEKEQ: This cursor will only be used for
** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
** of OP_SeekLE/OP_IdxLT)
**
**
** The P4 value may be either an integer (P4_INT32) or a pointer to
** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
** object, then table being opened must be an [index b-tree] where the
** KeyInfo object defines the content and collating
** sequence of that index b-tree. Otherwise, if P4 is an integer
** value, then the table being opened must be a [table b-tree] with a
** number of columns no less than the value of P4.
**
** See also: OpenWrite, ReopenIdx
*/
/* Opcode: ReopenIdx P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
** The ReopenIdx opcode works like OP_OpenRead except that it first
** checks to see if the cursor on P1 is already open on the same
** b-tree and if it is this opcode becomes a no-op. In other words,
** if the cursor is already open, do not reopen it.
**
** The ReopenIdx opcode may only be used with P5==0 or P5==OPFLAG_SEEKEQ
** and with P4 being a P4_KEYINFO object. Furthermore, the P3 value must
** be the same as every other ReopenIdx or OpenRead for the same cursor
** number.
**
** Allowed P5 bits:
**
** - 0x02 OPFLAG_SEEKEQ: This cursor will only be used for
** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
** of OP_SeekLE/OP_IdxLT)
**
**
** See also: OP_OpenRead, OP_OpenWrite
*/
/* Opcode: OpenWrite P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
** Open a read/write cursor named P1 on the table or index whose root
** page is P2 (or whose root page is held in register P2 if the
** OPFLAG_P2ISREG bit is set in P5 - see below).
**
** The P4 value may be either an integer (P4_INT32) or a pointer to
** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
** object, then table being opened must be an [index b-tree] where the
** KeyInfo object defines the content and collating
** sequence of that index b-tree. Otherwise, if P4 is an integer
** value, then the table being opened must be a [table b-tree] with a
** number of columns no less than the value of P4.
**
** Allowed P5 bits:
**
** - 0x02 OPFLAG_SEEKEQ: This cursor will only be used for
** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
** of OP_SeekLE/OP_IdxLT)
**
- 0x08 OPFLAG_FORDELETE: This cursor is used only to seek
** and subsequently delete entries in an index btree. This is a
** hint to the storage engine that the storage engine is allowed to
** ignore. The hint is not used by the official SQLite b*tree storage
** engine, but is used by COMDB2.
**
- 0x10 OPFLAG_P2ISREG: Use the content of register P2
** as the root page, not the value of P2 itself.
**
**
** This instruction works like OpenRead except that it opens the cursor
** in read/write mode.
**
** See also: OP_OpenRead, OP_ReopenIdx
*/
_77:
;
pCur = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if pCur != 0 && (*TVdbeCursor)(unsafe.Pointer(pCur)).FpgnoRoot == uint32((*TOp)(unsafe.Pointer(pOp)).Fp2) {
/* Guaranteed by the code generator */
_sqlite3BtreeClearCursor(tls, *(*uintptr)(unsafe.Pointer(pCur + 48)))
goto open_cursor_set_hints
}
/* If the cursor is not currently open or is open on a different
** index, then fall through into OP_OpenRead to force a reopen */
_76:
; /* ncycle */
_75:
;
if int32(Tbft(*(*uint16)(unsafe.Pointer(p + 200))&0x3>>0)) == int32(1) {
rc = libc.Int32FromInt32(SQLITE_ABORT) | libc.Int32FromInt32(2)<>2))), 2, 0x4)
*(*uintptr)(unsafe.Pointer(pCx + 16)) = *(*uintptr)(unsafe.Pointer(pOrig + 16))
libc.SetBitFieldPtr8Uint32(pCx+8, libc.Uint32FromInt32(1), 3, 0x8)
libc.SetBitFieldPtr8Uint32(pOrig+8, libc.Uint32FromInt32(1), 3, 0x8)
rc = _sqlite3BtreeCursor(tls, *(*uintptr)(unsafe.Pointer(pCx + 16)), (*TVdbeCursor)(unsafe.Pointer(pCx)).FpgnoRoot, int32(BTREE_WRCSR), (*TVdbeCursor)(unsafe.Pointer(pCx)).FpKeyInfo, *(*uintptr)(unsafe.Pointer(pCx + 48)))
/* The sqlite3BtreeCursor() routine can only fail for the first cursor
** opened for a database. Since there is already an open cursor when this
** opcode is run, the sqlite3BtreeCursor() cannot fail */
goto _187
/* Opcode: OpenEphemeral P1 P2 P3 P4 P5
** Synopsis: nColumn=P2
**
** Open a new cursor P1 to a transient table.
** The cursor is always opened read/write even if
** the main database is read-only. The ephemeral
** table is deleted automatically when the cursor is closed.
**
** If the cursor P1 is already opened on an ephemeral table, the table
** is cleared (all content is erased).
**
** P2 is the number of columns in the ephemeral table.
** The cursor points to a BTree table if P4==0 and to a BTree index
** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure
** that defines the format of keys in the index.
**
** The P5 parameter can be a mask of the BTREE_* flags defined
** in btree.h. These flags control aspects of the operation of
** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are
** added automatically.
**
** If P3 is positive, then reg[P3] is modified slightly so that it
** can be used as zero-length data for OP_Insert. This is an optimization
** that avoids an extra OP_Blob opcode to initialize that register.
*/
/* Opcode: OpenAutoindex P1 P2 * P4 *
** Synopsis: nColumn=P2
**
** This opcode works the same as OP_OpenEphemeral. It has a
** different name to distinguish its use. Tables created using
** by this opcode will be used for automatically created transient
** indices in joins.
*/
_80:
; /* ncycle */
_79:
;
if (*TOp)(unsafe.Pointer(pOp)).Fp3 > 0 {
/* Make register reg[P3] into a value that can be used as the data
** form sqlite3BtreeInsert() where the length of the data is zero. */
/* Only used when number of columns is zero */
(*(*TMem)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56))).Fn = 0
(*(*TMem)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56))).Fz = __ccgo_ts + 1650
}
pCx1 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if pCx1 != 0 && !(int32(TBool(*(*uint8)(unsafe.Pointer(pCx1 + 8))&0x8>>3)) != 0) && (*TOp)(unsafe.Pointer(pOp)).Fp2 <= int32((*TVdbeCursor)(unsafe.Pointer(pCx1)).FnField) {
/* If the ephemeral table is already open and has no duplicates from
** OP_OpenDup, then erase all existing content so that the table is
** empty again, rather than creating a new table. */
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FseqCount = 0
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FcacheStatus = uint32(CACHE_STALE)
rc = _sqlite3BtreeClearTable(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)), int32((*TVdbeCursor)(unsafe.Pointer(pCx1)).FpgnoRoot), uintptr(0))
} else {
pCx1 = _allocateCursor(tls, p, (*TOp)(unsafe.Pointer(pOp)).Fp1, (*TOp)(unsafe.Pointer(pOp)).Fp2, uint8(CURTYPE_BTREE))
if pCx1 == uintptr(0) {
goto no_mem
}
libc.SetBitFieldPtr8Uint32(pCx1+8, libc.Uint32FromInt32(1), 0, 0x1)
rc = _sqlite3BtreeOpen(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpVfs, uintptr(0), db, pCx1+16, libc.Int32FromInt32(BTREE_OMIT_JOURNAL)|libc.Int32FromInt32(BTREE_SINGLE)|int32((*TOp)(unsafe.Pointer(pOp)).Fp5), _vfsFlags)
if rc == SQLITE_OK {
rc = _sqlite3BtreeBeginTrans(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)), int32(1), uintptr(0))
if rc == SQLITE_OK {
/* If a transient index is required, create it by calling
** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before
** opening it. If a transient table is required, just use the
** automatically created table with root-page 1 (an BLOB_INTKEY table).
*/
v253 = *(*uintptr)(unsafe.Pointer(pOp + 16))
pKeyInfo2 = v253
v252 = v253
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FpKeyInfo = v252
if v252 != uintptr(0) {
rc = _sqlite3BtreeCreateTable(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)), pCx1+68, int32(BTREE_BLOBKEY)|int32((*TOp)(unsafe.Pointer(pOp)).Fp5))
if rc == SQLITE_OK {
rc = _sqlite3BtreeCursor(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)), (*TVdbeCursor)(unsafe.Pointer(pCx1)).FpgnoRoot, int32(BTREE_WRCSR), pKeyInfo2, *(*uintptr)(unsafe.Pointer(pCx1 + 48)))
}
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FisTable = uint8(0)
} else {
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FpgnoRoot = uint32(SCHEMA_ROOT)
rc = _sqlite3BtreeCursor(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)), uint32(SCHEMA_ROOT), int32(BTREE_WRCSR), uintptr(0), *(*uintptr)(unsafe.Pointer(pCx1 + 48)))
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FisTable = uint8(1)
}
}
libc.SetBitFieldPtr8Uint32(pCx1+8, libc.BoolUint32(int32((*TOp)(unsafe.Pointer(pOp)).Fp5) != libc.Int32FromInt32(BTREE_UNORDERED)), 2, 0x4)
if rc != 0 {
_sqlite3BtreeClose(tls, *(*uintptr)(unsafe.Pointer(pCx1 + 16)))
}
}
}
if rc != 0 {
goto abort_due_to_error
}
(*TVdbeCursor)(unsafe.Pointer(pCx1)).FnullRow = uint8(1)
goto _187
/* Opcode: SorterOpen P1 P2 P3 P4 *
**
** This opcode works like OP_OpenEphemeral except that it opens
** a transient index that is specifically designed to sort large
** tables using an external merge-sort algorithm.
**
** If argument P3 is non-zero, then it indicates that the sorter may
** assume that a stable sort considering the first P3 fields of each
** key is sufficient to produce the required results.
*/
_81:
;
pCx2 = _allocateCursor(tls, p, (*TOp)(unsafe.Pointer(pOp)).Fp1, (*TOp)(unsafe.Pointer(pOp)).Fp2, uint8(CURTYPE_SORTER))
if pCx2 == uintptr(0) {
goto no_mem
}
(*TVdbeCursor)(unsafe.Pointer(pCx2)).FpKeyInfo = *(*uintptr)(unsafe.Pointer(pOp + 16))
rc = _sqlite3VdbeSorterInit(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp3, pCx2)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: SequenceTest P1 P2 * * *
** Synopsis: if( cursor[P1].ctr++ ) pc = P2
**
** P1 is a sorter cursor. If the sequence counter is currently zero, jump
** to P2. Regardless of whether or not the jump is taken, increment the
** the sequence value.
*/
_82:
;
pC4 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
v255 = pC4 + 24
v254 = *(*Ti64)(unsafe.Pointer(v255))
*(*Ti64)(unsafe.Pointer(v255))++
if v254 == 0 {
goto jump_to_p2
}
goto _187
/* Opcode: OpenPseudo P1 P2 P3 * *
** Synopsis: P3 columns in r[P2]
**
** Open a new cursor that points to a fake table that contains a single
** row of data. The content of that one row is the content of memory
** register P2. In other words, cursor P1 becomes an alias for the
** MEM_Blob content contained in register P2.
**
** A pseudo-table created by this opcode is used to hold a single
** row output from the sorter so that the row can be decomposed into
** individual columns using the OP_Column opcode. The OP_Column opcode
** is the only cursor opcode that works with a pseudo-table.
**
** P3 is the number of fields in the records that will be stored by
** the pseudo-table.
*/
_83:
;
pCx3 = _allocateCursor(tls, p, (*TOp)(unsafe.Pointer(pOp)).Fp1, (*TOp)(unsafe.Pointer(pOp)).Fp3, uint8(CURTYPE_PSEUDO))
if pCx3 == uintptr(0) {
goto no_mem
}
(*TVdbeCursor)(unsafe.Pointer(pCx3)).FnullRow = uint8(1)
(*TVdbeCursor)(unsafe.Pointer(pCx3)).FseekResult = (*TOp)(unsafe.Pointer(pOp)).Fp2
(*TVdbeCursor)(unsafe.Pointer(pCx3)).FisTable = uint8(1)
/* Give this pseudo-cursor a fake BtCursor pointer so that pCx
** can be safely passed to sqlite3VdbeCursorMoveto(). This avoids a test
** for pCx->eCurType==CURTYPE_BTREE inside of sqlite3VdbeCursorMoveto()
** which is a performance optimization */
*(*uintptr)(unsafe.Pointer(pCx3 + 48)) = _sqlite3BtreeFakeValidCursor(tls)
goto _187
/* Opcode: Close P1 * * * *
**
** Close a cursor previously opened as P1. If P1 is not
** currently open, this instruction is a no-op.
*/
_84:
; /* ncycle */
_sqlite3VdbeFreeCursor(tls, p, *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8)))
*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8)) = uintptr(0)
goto _187
/* Opcode: SeekGE P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
** use the value in register P3 as the key. If cursor P1 refers
** to an SQL index, then P3 is the first in an array of P4 registers
** that are used as an unpacked index key.
**
** Reposition cursor P1 so that it points to the smallest entry that
** is greater than or equal to the key value. If there are no records
** greater than or equal to the key and P2 is not zero, then jump to P2.
**
** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this
** opcode will either land on a record that exactly matches the key, or
** else it will cause a jump to P2. When the cursor is OPFLAG_SEEKEQ,
** this opcode must be followed by an IdxLE opcode with the same arguments.
** The IdxGT opcode will be skipped if this opcode succeeds, but the
** IdxGT opcode will be used on subsequent loop iterations. The
** OPFLAG_SEEKEQ flags is a hint to the btree layer to say that this
** is an equality search.
**
** This opcode leaves the cursor configured to move in forward order,
** from the beginning toward the end. In other words, the cursor is
** configured to use Next, not Prev.
**
** See also: Found, NotFound, SeekLt, SeekGt, SeekLe
*/
/* Opcode: SeekGT P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
** use the value in register P3 as a key. If cursor P1 refers
** to an SQL index, then P3 is the first in an array of P4 registers
** that are used as an unpacked index key.
**
** Reposition cursor P1 so that it points to the smallest entry that
** is greater than the key value. If there are no records greater than
** the key and P2 is not zero, then jump to P2.
**
** This opcode leaves the cursor configured to move in forward order,
** from the beginning toward the end. In other words, the cursor is
** configured to use Next, not Prev.
**
** See also: Found, NotFound, SeekLt, SeekGe, SeekLe
*/
/* Opcode: SeekLT P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
** use the value in register P3 as a key. If cursor P1 refers
** to an SQL index, then P3 is the first in an array of P4 registers
** that are used as an unpacked index key.
**
** Reposition cursor P1 so that it points to the largest entry that
** is less than the key value. If there are no records less than
** the key and P2 is not zero, then jump to P2.
**
** This opcode leaves the cursor configured to move in reverse order,
** from the end toward the beginning. In other words, the cursor is
** configured to use Prev, not Next.
**
** See also: Found, NotFound, SeekGt, SeekGe, SeekLe
*/
/* Opcode: SeekLE P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If cursor P1 refers to an SQL table (B-Tree that uses integer keys),
** use the value in register P3 as a key. If cursor P1 refers
** to an SQL index, then P3 is the first in an array of P4 registers
** that are used as an unpacked index key.
**
** Reposition cursor P1 so that it points to the largest entry that
** is less than or equal to the key value. If there are no records
** less than or equal to the key and P2 is not zero, then jump to P2.
**
** This opcode leaves the cursor configured to move in reverse order,
** from the end toward the beginning. In other words, the cursor is
** configured to use Prev, not Next.
**
** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this
** opcode will either land on a record that exactly matches the key, or
** else it will cause a jump to P2. When the cursor is OPFLAG_SEEKEQ,
** this opcode must be followed by an IdxLE opcode with the same arguments.
** The IdxGE opcode will be skipped if this opcode succeeds, but the
** IdxGE opcode will be used on subsequent loop iterations. The
** OPFLAG_SEEKEQ flags is a hint to the btree layer to say that this
** is an equality search.
**
** See also: Found, NotFound, SeekGt, SeekGe, SeekLt
*/
_88:
; /* jump, in3, group, ncycle */
_87:
; /* jump, in3, group, ncycle */
_86:
; /* jump, in3, group, ncycle */
_85:
; /* Only interested in == results */
pC5 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
oc = int32((*TOp)(unsafe.Pointer(pOp)).Fopcode)
eqOnly = 0
(*TVdbeCursor)(unsafe.Pointer(pC5)).FnullRow = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC5)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC5)).FcacheStatus = uint32(CACHE_STALE)
if (*TVdbeCursor)(unsafe.Pointer(pC5)).FisTable != 0 {
/* The OPFLAG_SEEKEQ/BTREE_SEEK_EQ flag is only set on index cursors */
/* The input value in P3 might be of any type: integer, real, string,
** blob, or NULL. But it needs to be an integer before we can do
** the seek, so convert it. */
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
flags31 = (*TMem)(unsafe.Pointer(pIn3)).Fflags
if int32(flags31)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_Real)|libc.Int32FromInt32(MEM_IntReal)|libc.Int32FromInt32(MEM_Str)) == int32(MEM_Str) {
_applyNumericAffinity(tls, pIn3, 0)
}
iKey = _sqlite3VdbeIntValue(tls, pIn3) /* Get the integer key value */
newType = (*TMem)(unsafe.Pointer(pIn3)).Fflags /* Record the type after applying numeric affinity */
(*TMem)(unsafe.Pointer(pIn3)).Fflags = flags31 /* But convert the type back to its original */
/* If the P3 value could not be converted into an integer without
** loss of information, then special processing is required... */
if int32(newType)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) == 0 {
if int32(newType)&int32(MEM_Real) == 0 {
if int32(newType)&int32(MEM_Null) != 0 || oc >= int32(OP_SeekGE) {
goto jump_to_p2
} else {
rc = _sqlite3BtreeLast(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), bp+112)
if rc != SQLITE_OK {
goto abort_due_to_error
}
goto seek_not_found
}
}
c2 = _sqlite3IntFloatCompare(tls, iKey, *(*float64)(unsafe.Pointer(pIn3)))
/* If the approximation iKey is larger than the actual real search
** term, substitute >= for > and < for <=. e.g. if the search term
** is 4.9 and the integer approximation 5:
**
** (x > 4.9) -> (x >= 5)
** (x <= 4.9) -> (x < 5)
*/
if c2 > 0 {
if oc&int32(0x0001) == libc.Int32FromInt32(OP_SeekGT)&libc.Int32FromInt32(0x0001) {
oc--
}
} else {
if c2 < 0 {
if oc&int32(0x0001) == libc.Int32FromInt32(OP_SeekLT)&libc.Int32FromInt32(0x0001) {
oc++
}
}
}
}
rc = _sqlite3BtreeTableMoveto(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), int64(uint64(iKey)), 0, bp+112)
(*TVdbeCursor)(unsafe.Pointer(pC5)).FmovetoTarget = iKey /* Used by OP_Delete */
if rc != SQLITE_OK {
goto abort_due_to_error
}
} else {
/* For a cursor with the OPFLAG_SEEKEQ/BTREE_SEEK_EQ hint, only the
** OP_SeekGE and OP_SeekLE opcodes are allowed, and these must be
** immediately followed by an OP_IdxGT or OP_IdxLT opcode, respectively,
** with the same key.
*/
if _sqlite3BtreeCursorHasHint(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), uint32(BTREE_SEEK_EQ)) != 0 {
eqOnly = int32(1)
}
nField2 = (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).FpKeyInfo = (*TVdbeCursor)(unsafe.Pointer(pC5)).FpKeyInfo
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).FnField = uint16(nField2)
/* The next line of code computes as follows, only faster:
** if( oc==OP_SeekGT || oc==OP_SeekLE ){
** r.default_rc = -1;
** }else{
** r.default_rc = +1;
** }
*/
if int32(1)&(oc-int32(OP_SeekLT)) != 0 {
v256 = -int32(1)
} else {
v256 = +libc.Int32FromInt32(1)
}
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).Fdefault_rc = int8(v256)
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).FaMem = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).FeqSeen = uint8(0)
rc = _sqlite3BtreeIndexMoveto(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), bp+120, bp+112)
if rc != SQLITE_OK {
goto abort_due_to_error
}
if eqOnly != 0 && int32((*(*TUnpackedRecord)(unsafe.Pointer(bp + 120))).FeqSeen) == 0 {
goto seek_not_found
}
}
if oc >= int32(OP_SeekGE) {
if *(*int32)(unsafe.Pointer(bp + 112)) < 0 || *(*int32)(unsafe.Pointer(bp + 112)) == 0 && oc == int32(OP_SeekGT) {
*(*int32)(unsafe.Pointer(bp + 112)) = 0
rc = _sqlite3BtreeNext(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), 0)
if rc != SQLITE_OK {
if rc == int32(SQLITE_DONE) {
rc = SQLITE_OK
*(*int32)(unsafe.Pointer(bp + 112)) = int32(1)
} else {
goto abort_due_to_error
}
}
} else {
*(*int32)(unsafe.Pointer(bp + 112)) = 0
}
} else {
if *(*int32)(unsafe.Pointer(bp + 112)) > 0 || *(*int32)(unsafe.Pointer(bp + 112)) == 0 && oc == int32(OP_SeekLT) {
*(*int32)(unsafe.Pointer(bp + 112)) = 0
rc = _sqlite3BtreePrevious(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)), 0)
if rc != SQLITE_OK {
if rc == int32(SQLITE_DONE) {
rc = SQLITE_OK
*(*int32)(unsafe.Pointer(bp + 112)) = int32(1)
} else {
goto abort_due_to_error
}
}
} else {
/* res might be negative because the table is empty. Check to
** see if this is the case.
*/
*(*int32)(unsafe.Pointer(bp + 112)) = _sqlite3BtreeEof(tls, *(*uintptr)(unsafe.Pointer(pC5 + 48)))
}
}
goto seek_not_found
seek_not_found:
;
if *(*int32)(unsafe.Pointer(bp + 112)) != 0 {
goto jump_to_p2
} else {
if eqOnly != 0 {
pOp += 24 /* Skip the OP_IdxLt or OP_IdxGT that follows */
}
}
goto _187
/* Opcode: SeekScan P1 P2 * * P5
** Synopsis: Scan-ahead up to P1 rows
**
** This opcode is a prefix opcode to OP_SeekGE. In other words, this
** opcode must be immediately followed by OP_SeekGE. This constraint is
** checked by assert() statements.
**
** This opcode uses the P1 through P4 operands of the subsequent
** OP_SeekGE. In the text that follows, the operands of the subsequent
** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only
** the P1, P2 and P5 operands of this opcode are also used, and are called
** This.P1, This.P2 and This.P5.
**
** This opcode helps to optimize IN operators on a multi-column index
** where the IN operator is on the later terms of the index by avoiding
** unnecessary seeks on the btree, substituting steps to the next row
** of the b-tree instead. A correct answer is obtained if this opcode
** is omitted or is a no-op.
**
** The SeekGE.P3 and SeekGE.P4 operands identify an unpacked key which
** is the desired entry that we want the cursor SeekGE.P1 to be pointing
** to. Call this SeekGE.P3/P4 row the "target".
**
** If the SeekGE.P1 cursor is not currently pointing to a valid row,
** then this opcode is a no-op and control passes through into the OP_SeekGE.
**
** If the SeekGE.P1 cursor is pointing to a valid row, then that row
** might be the target row, or it might be near and slightly before the
** target row, or it might be after the target row. If the cursor is
** currently before the target row, then this opcode attempts to position
** the cursor on or after the target row by invoking sqlite3BtreeStep()
** on the cursor between 1 and This.P1 times.
**
** The This.P5 parameter is a flag that indicates what to do if the
** cursor ends up pointing at a valid row that is past the target
** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If
** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0
** case occurs when there are no inequality constraints to the right of
** the IN constraint. The jump to SeekGE.P2 ends the loop. The P5!=0 case
** occurs when there are inequality constraints to the right of the IN
** operator. In that case, the This.P2 will point either directly to or
** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for
** loop terminate.
**
** Possible outcomes from this opcode:
**
** - If the cursor is initially not pointed to any valid row, then
** fall through into the subsequent OP_SeekGE opcode.
**
**
- If the cursor is left pointing to a row that is before the target
** row, even after making as many as This.P1 calls to
** sqlite3BtreeNext(), then also fall through into OP_SeekGE.
**
**
- If the cursor is left pointing at the target row, either because it
** was at the target row to begin with or because one or more
** sqlite3BtreeNext() calls moved the cursor to the target row,
** then jump to This.P2..,
**
**
- If the cursor started out before the target row and a call to
** to sqlite3BtreeNext() moved the cursor off the end of the index
** (indicating that the target row definitely does not exist in the
** btree) then jump to SeekGE.P2, ending the loop.
**
**
- If the cursor ends up on a valid row that is past the target row
** (indicating that the target row does not exist in the btree) then
** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0.
**
*/
_89:
;
/* If pOp->p5 is clear, then pOp->p2 points to the first instruction past the
** OP_IdxGT that follows the OP_SeekGE. Otherwise, it points to the first
** opcode past the OP_SeekGE itself. */
pC6 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*(*TOp)(unsafe.Pointer(pOp + 1*24))).Fp1)*8))
if !(_sqlite3BtreeCursorIsValidNN(tls, *(*uintptr)(unsafe.Pointer(pC6 + 48))) != 0) {
goto _187
}
nStep = (*TOp)(unsafe.Pointer(pOp)).Fp1
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 168))).FpKeyInfo = (*TVdbeCursor)(unsafe.Pointer(pC6)).FpKeyInfo
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 168))).FnField = uint16(*(*int32)(unsafe.Pointer(pOp + 1*24 + 16)))
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 168))).Fdefault_rc = 0
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 168))).FaMem = aMem + uintptr((*(*TOp)(unsafe.Pointer(pOp + 1*24))).Fp3)*56
*(*int32)(unsafe.Pointer(bp + 160)) = 0 /* Not needed. Only used to silence a warning. */
_258:
;
if !(int32(1) != 0) {
goto _257
}
rc = _sqlite3VdbeIdxKeyCompare(tls, db, pC6, bp+168, bp+160)
if rc != 0 {
goto abort_due_to_error
}
if !(*(*int32)(unsafe.Pointer(bp + 160)) > 0 && int32((*TOp)(unsafe.Pointer(pOp)).Fp5) == 0) {
goto _259
}
goto seekscan_search_fail
seekscan_search_fail:
;
/* Jump to SeekGE.P2, ending the loop */
pOp += 24
goto jump_to_p2
_259:
;
if *(*int32)(unsafe.Pointer(bp + 160)) >= 0 {
/* Jump to This.P2, bypassing the OP_SeekGE opcode */
goto jump_to_p2
goto _257
}
if nStep <= 0 {
goto _257
}
nStep--
(*TVdbeCursor)(unsafe.Pointer(pC6)).FcacheStatus = uint32(CACHE_STALE)
rc = _sqlite3BtreeNext(tls, *(*uintptr)(unsafe.Pointer(pC6 + 48)), 0)
if rc != 0 {
if rc == int32(SQLITE_DONE) {
rc = SQLITE_OK
goto seekscan_search_fail
} else {
goto abort_due_to_error
}
}
goto _258
_257:
;
goto _187
/* Opcode: SeekHit P1 P2 P3 * *
** Synopsis: set P2<=seekHit<=P3
**
** Increase or decrease the seekHit value for cursor P1, if necessary,
** so that it is no less than P2 and no greater than P3.
**
** The seekHit integer represents the maximum of terms in an index for which
** there is known to be at least one match. If the seekHit value is smaller
** than the total number of equality terms in an index lookup, then the
** OP_IfNoHope opcode might run to see if the IN loop can be abandoned
** early, thus saving work. This is part of the IN-early-out optimization.
**
** P1 must be a valid b-tree cursor.
*/
_90:
;
pC7 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if int32((*TVdbeCursor)(unsafe.Pointer(pC7)).FseekHit) < (*TOp)(unsafe.Pointer(pOp)).Fp2 {
(*TVdbeCursor)(unsafe.Pointer(pC7)).FseekHit = uint16((*TOp)(unsafe.Pointer(pOp)).Fp2)
} else {
if int32((*TVdbeCursor)(unsafe.Pointer(pC7)).FseekHit) > (*TOp)(unsafe.Pointer(pOp)).Fp3 {
(*TVdbeCursor)(unsafe.Pointer(pC7)).FseekHit = uint16((*TOp)(unsafe.Pointer(pOp)).Fp3)
}
}
goto _187
/* Opcode: IfNotOpen P1 P2 * * *
** Synopsis: if( !csr[P1] ) goto P2
**
** If cursor P1 is not open or if P1 is set to a NULL row using the
** OP_NullRow opcode, then jump to instruction P2. Otherwise, fall through.
*/
_91:
;
pCur1 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if pCur1 == uintptr(0) || (*TVdbeCursor)(unsafe.Pointer(pCur1)).FnullRow != 0 {
goto jump_to_p2_and_check_for_interrupt
}
goto _187
/* Opcode: Found P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
** P4>0 then register P3 is the first of P4 registers that form an unpacked
** record.
**
** Cursor P1 is on an index btree. If the record identified by P3 and P4
** is a prefix of any entry in P1 then a jump is made to P2 and
** P1 is left pointing at the matching entry.
**
** This operation leaves the cursor in a state where it can be
** advanced in the forward direction. The Next instruction will work,
** but not the Prev instruction.
**
** See also: NotFound, NoConflict, NotExists. SeekGe
*/
/* Opcode: NotFound P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
** P4>0 then register P3 is the first of P4 registers that form an unpacked
** record.
**
** Cursor P1 is on an index btree. If the record identified by P3 and P4
** is not the prefix of any entry in P1 then a jump is made to P2. If P1
** does contain an entry whose prefix matches the P3/P4 record then control
** falls through to the next instruction and P1 is left pointing at the
** matching entry.
**
** This operation leaves the cursor in a state where it cannot be
** advanced in either direction. In other words, the Next and Prev
** opcodes do not work after this operation.
**
** See also: Found, NotExists, NoConflict, IfNoHope
*/
/* Opcode: IfNoHope P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** Register P3 is the first of P4 registers that form an unpacked
** record. Cursor P1 is an index btree. P2 is a jump destination.
** In other words, the operands to this opcode are the same as the
** operands to OP_NotFound and OP_IdxGT.
**
** This opcode is an optimization attempt only. If this opcode always
** falls through, the correct answer is still obtained, but extra work
** is performed.
**
** A value of N in the seekHit flag of cursor P1 means that there exists
** a key P3:N that will match some record in the index. We want to know
** if it is possible for a record P3:P4 to match some record in the
** index. If it is not possible, we can skip some work. So if seekHit
** is less than P4, attempt to find out if a match is possible by running
** OP_NotFound.
**
** This opcode is used in IN clause processing for a multi-column key.
** If an IN clause is attached to an element of the key other than the
** left-most element, and if there are no matches on the most recent
** seek over the whole key, then it might be that one of the key element
** to the left is prohibiting a match, and hence there is "no hope" of
** any match regardless of how many IN clause elements are checked.
** In such a case, we abandon the IN clause search early, using this
** opcode. The opcode name comes from the fact that the
** jump is taken if there is "no hope" of achieving a match.
**
** See also: NotFound, SeekHit
*/
/* Opcode: NoConflict P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
** If P4==0 then register P3 holds a blob constructed by MakeRecord. If
** P4>0 then register P3 is the first of P4 registers that form an unpacked
** record.
**
** Cursor P1 is on an index btree. If the record identified by P3 and P4
** contains any NULL value, jump immediately to P2. If all terms of the
** record are not-NULL then a check is done to determine if any row in the
** P1 index btree has a matching key prefix. If there are no matches, jump
** immediately to P2. If there is a match, fall through and leave the P1
** cursor pointing to the matching row.
**
** This opcode is similar to OP_NotFound with the exceptions that the
** branch is always taken if any part of the search key input is NULL.
**
** This operation leaves the cursor in a state where it cannot be
** advanced in either direction. In other words, the Next and Prev
** opcodes do not work after this operation.
**
** See also: NotFound, Found, NotExists
*/
_92:
;
pC8 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if int32((*TVdbeCursor)(unsafe.Pointer(pC8)).FseekHit) >= (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi {
goto _187
}
/* Fall through into OP_NotFound */
_95:
; /* jump, in3, ncycle */
_94:
; /* jump, in3, ncycle */
_93:
;
pC9 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FnField = uint16((*TOp)(unsafe.Pointer(pOp)).Fp4.Fi)
if int32((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FnField) > 0 {
/* Key values in an array of registers */
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FpKeyInfo = (*TVdbeCursor)(unsafe.Pointer(pC9)).FpKeyInfo
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).Fdefault_rc = 0
rc = _sqlite3BtreeIndexMoveto(tls, *(*uintptr)(unsafe.Pointer(pC9 + 48)), bp+208, pC9+36)
} else {
/* Composite key generated by OP_MakeRecord */
if int32((*TMem)(unsafe.Pointer((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem)).Fflags)&int32(MEM_Zero) != 0 {
v260 = _sqlite3VdbeMemExpandBlob(tls, (*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem)
} else {
v260 = 0
}
rc = v260
if rc != 0 {
goto no_mem
}
pIdxKey = _sqlite3VdbeAllocUnpackedRecord(tls, (*TVdbeCursor)(unsafe.Pointer(pC9)).FpKeyInfo)
if pIdxKey == uintptr(0) {
goto no_mem
}
_sqlite3VdbeRecordUnpack(tls, (*TVdbeCursor)(unsafe.Pointer(pC9)).FpKeyInfo, (*TMem)(unsafe.Pointer((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem)).Fn, (*TMem)(unsafe.Pointer((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem)).Fz, pIdxKey)
(*TUnpackedRecord)(unsafe.Pointer(pIdxKey)).Fdefault_rc = 0
rc = _sqlite3BtreeIndexMoveto(tls, *(*uintptr)(unsafe.Pointer(pC9 + 48)), pIdxKey, pC9+36)
_sqlite3DbFreeNN(tls, db, pIdxKey)
}
if rc != SQLITE_OK {
goto abort_due_to_error
}
alreadyExists = libc.BoolInt32((*TVdbeCursor)(unsafe.Pointer(pC9)).FseekResult == 0)
(*TVdbeCursor)(unsafe.Pointer(pC9)).FnullRow = uint8(int32(1) - alreadyExists)
(*TVdbeCursor)(unsafe.Pointer(pC9)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC9)).FcacheStatus = uint32(CACHE_STALE)
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_Found) {
if alreadyExists != 0 {
goto jump_to_p2
}
} else {
if !(alreadyExists != 0) {
goto jump_to_p2
}
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_NoConflict) {
/* For the OP_NoConflict opcode, take the jump if any of the
** input fields are NULL, since any key with a NULL will not
** conflict */
ii1 = 0
for {
if !(ii1 < int32((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FnField)) {
break
}
if int32((*(*TMem)(unsafe.Pointer((*(*TUnpackedRecord)(unsafe.Pointer(bp + 208))).FaMem + uintptr(ii1)*56))).Fflags)&int32(MEM_Null) != 0 {
goto jump_to_p2
}
goto _261
_261:
;
ii1++
}
}
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_IfNoHope) {
(*TVdbeCursor)(unsafe.Pointer(pC9)).FseekHit = uint16((*TOp)(unsafe.Pointer(pOp)).Fp4.Fi)
}
}
goto _187
/* Opcode: SeekRowid P1 P2 P3 * *
** Synopsis: intkey=r[P3]
**
** P1 is the index of a cursor open on an SQL table btree (with integer
** keys). If register P3 does not contain an integer or if P1 does not
** contain a record with rowid P3 then jump immediately to P2.
** Or, if P2 is 0, raise an SQLITE_CORRUPT error. If P1 does contain
** a record with rowid P3 then
** leave the cursor pointing at that record and fall through to the next
** instruction.
**
** The OP_NotExists opcode performs the same operation, but with OP_NotExists
** the P3 register must be guaranteed to contain an integer value. With this
** opcode, register P3 might not contain an integer.
**
** The OP_NotFound opcode performs the same operation on index btrees
** (with arbitrary multi-value keys).
**
** This opcode leaves the cursor in a state where it cannot be advanced
** in either direction. In other words, the Next and Prev opcodes will
** not work following this opcode.
**
** See also: Found, NotFound, NoConflict, SeekRowid
*/
/* Opcode: NotExists P1 P2 P3 * *
** Synopsis: intkey=r[P3]
**
** P1 is the index of a cursor open on an SQL table btree (with integer
** keys). P3 is an integer rowid. If P1 does not contain a record with
** rowid P3 then jump immediately to P2. Or, if P2 is 0, raise an
** SQLITE_CORRUPT error. If P1 does contain a record with rowid P3 then
** leave the cursor pointing at that record and fall through to the next
** instruction.
**
** The OP_SeekRowid opcode performs the same operation but also allows the
** P3 register to contain a non-integer value, in which case the jump is
** always taken. This opcode requires that P3 always contain an integer.
**
** The OP_NotFound opcode performs the same operation on index btrees
** (with arbitrary multi-value keys).
**
** This opcode leaves the cursor in a state where it cannot be advanced
** in either direction. In other words, the Next and Prev opcodes will
** not work following this opcode.
**
** See also: Found, NotFound, NoConflict, SeekRowid
*/
_97:
;
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if int32((*TMem)(unsafe.Pointer(pIn3)).Fflags)&(libc.Int32FromInt32(MEM_Int)|libc.Int32FromInt32(MEM_IntReal)) == 0 {
/* If pIn3->u.i does not contain an integer, compute iKey as the
** integer value of pIn3. Jump to P2 if pIn3 cannot be converted
** into an integer without loss of information. Take care to avoid
** changing the datatype of pIn3, however, as it is used by other
** parts of the prepared statement. */
*(*TMem)(unsafe.Pointer(bp + 256)) = TMem{}
/* If pIn3->u.i does not contain an integer, compute iKey as the
** integer value of pIn3. Jump to P2 if pIn3 cannot be converted
** into an integer without loss of information. Take care to avoid
** changing the datatype of pIn3, however, as it is used by other
** parts of the prepared statement. */
*(*Tsqlite3_value1)(unsafe.Pointer(bp + 256)) = *(*TMem)(unsafe.Pointer(pIn3))
_applyAffinity(tls, bp+256, int8(SQLITE_AFF_NUMERIC), encoding)
if int32((*(*TMem)(unsafe.Pointer(bp + 256))).Fflags)&int32(MEM_Int) == 0 {
goto jump_to_p2
}
iKey1 = uint64(*(*Ti64)(unsafe.Pointer(bp + 256)))
goto notExistsWithKey
}
/* Fall through into OP_NotExists */
_96:
; /* jump, in3, ncycle */
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
iKey1 = uint64(*(*Ti64)(unsafe.Pointer(pIn3)))
goto notExistsWithKey
notExistsWithKey:
;
pC10 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pCrsr2 = *(*uintptr)(unsafe.Pointer(pC10 + 48))
*(*int32)(unsafe.Pointer(bp + 248)) = 0
rc = _sqlite3BtreeTableMoveto(tls, pCrsr2, int64(iKey1), 0, bp+248)
(*TVdbeCursor)(unsafe.Pointer(pC10)).FmovetoTarget = int64(iKey1) /* Used by OP_Delete */
(*TVdbeCursor)(unsafe.Pointer(pC10)).FnullRow = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC10)).FcacheStatus = uint32(CACHE_STALE)
(*TVdbeCursor)(unsafe.Pointer(pC10)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC10)).FseekResult = *(*int32)(unsafe.Pointer(bp + 248))
if *(*int32)(unsafe.Pointer(bp + 248)) != 0 {
if (*TOp)(unsafe.Pointer(pOp)).Fp2 == 0 {
rc = _sqlite3CorruptError(tls, int32(98111))
} else {
goto jump_to_p2
}
}
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: Sequence P1 P2 * * *
** Synopsis: r[P2]=cursor[P1].ctr++
**
** Find the next available sequence number for cursor P1.
** Write the sequence number into register P2.
** The sequence number on the cursor is incremented after this
** instruction.
*/
_98:
; /* out2 */
pOut = _out2Prerelease(tls, p, pOp)
v263 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8)) + 24
v262 = *(*Ti64)(unsafe.Pointer(v263))
*(*Ti64)(unsafe.Pointer(v263))++
*(*Ti64)(unsafe.Pointer(pOut)) = v262
goto _187
/* Opcode: NewRowid P1 P2 P3 * *
** Synopsis: r[P2]=rowid
**
** Get a new integer record number (a.k.a "rowid") used as the key to a table.
** The record number is not previously used as a key in the database
** table that cursor P1 points to. The new record number is written
** written to register P2.
**
** If P3>0 then P3 is a register in the root frame of this VDBE that holds
** the largest previously generated record number. No new record numbers are
** allowed to be less than this value. When this value reaches its maximum,
** an SQLITE_FULL error is generated. The P3 register is updated with the '
** generated record number. This P3 mechanism is used to help implement the
** AUTOINCREMENT feature.
*/
_99:
; /* Root frame of VDBE */
*(*Ti64)(unsafe.Pointer(bp + 312)) = 0
*(*int32)(unsafe.Pointer(bp + 320)) = 0
pOut = _out2Prerelease(tls, p, pOp)
pC11 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
/* The next rowid or record number (different terms for the same
** thing) is obtained in a two-step algorithm.
**
** First we attempt to find the largest existing rowid and add one
** to that. But if the largest existing rowid is already the maximum
** positive integer, we have to fall through to the second
** probabilistic algorithm
**
** The second algorithm is to select a rowid at random and see if
** it already exists in the table. If it does not exist, we have
** succeeded. If the random rowid does exist, we select a new one
** and try again, up to 100 times.
*/
/* Some compilers complain about constants of the form 0x7fffffffffffffff.
** Others complain about 0x7ffffffffffffffffLL. The following macro seems
** to provide the constant while making all compilers happy.
*/
if !(int32(TBool(*(*uint8)(unsafe.Pointer(pC11 + 8))&0x2>>1)) != 0) {
rc = _sqlite3BtreeLast(tls, *(*uintptr)(unsafe.Pointer(pC11 + 48)), bp+320)
if rc != SQLITE_OK {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 320)) != 0 {
*(*Ti64)(unsafe.Pointer(bp + 312)) = int64(1) /* IMP: R-61914-48074 */
} else {
*(*Ti64)(unsafe.Pointer(bp + 312)) = _sqlite3BtreeIntegerKey(tls, *(*uintptr)(unsafe.Pointer(pC11 + 48)))
if *(*Ti64)(unsafe.Pointer(bp + 312)) >= int64(libc.Uint64FromInt32(0x7fffffff)<>1)) != 0 {
rc = int32(SQLITE_FULL) /* IMP: R-17817-00630 */
goto abort_due_to_error
}
if *(*Ti64)(unsafe.Pointer(bp + 312)) < *(*Ti64)(unsafe.Pointer(pMem))+int64(1) {
*(*Ti64)(unsafe.Pointer(bp + 312)) = *(*Ti64)(unsafe.Pointer(pMem)) + int64(1)
}
*(*Ti64)(unsafe.Pointer(pMem)) = *(*Ti64)(unsafe.Pointer(bp + 312))
}
if int32(TBool(*(*uint8)(unsafe.Pointer(pC11 + 8))&0x2>>1)) != 0 {
/* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the
** largest possible integer (9223372036854775807) then the database
** engine starts picking positive candidate ROWIDs at random until
** it finds one that is not previously used. */
/* We cannot be in random rowid mode if this is
** an AUTOINCREMENT table. */
cnt1 = 0
for {
Xsqlite3_randomness(tls, int32(8), bp+312)
*(*Ti64)(unsafe.Pointer(bp + 312)) &= int64(libc.Uint64FromInt32(0x7fffffff)<> libc.Int32FromInt32(1)
*(*Ti64)(unsafe.Pointer(bp + 312))++ /* Ensure that v is greater than zero */
goto _268
_268:
;
v265 = _sqlite3BtreeTableMoveto(tls, *(*uintptr)(unsafe.Pointer(pC11 + 48)), int64(uint64(*(*Ti64)(unsafe.Pointer(bp + 312)))), 0, bp+320)
rc = v265
if v267 = v265 == SQLITE_OK && *(*int32)(unsafe.Pointer(bp + 320)) == 0; v267 {
cnt1++
v266 = cnt1
}
if !(v267 && v266 < int32(100)) {
break
}
}
if rc != 0 {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 320)) == 0 {
rc = int32(SQLITE_FULL) /* IMP: R-38219-53002 */
goto abort_due_to_error
}
/* EV: R-40812-03570 */
}
(*TVdbeCursor)(unsafe.Pointer(pC11)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC11)).FcacheStatus = uint32(CACHE_STALE)
*(*Ti64)(unsafe.Pointer(pOut)) = *(*Ti64)(unsafe.Pointer(bp + 312))
goto _187
/* Opcode: Insert P1 P2 P3 P4 P5
** Synopsis: intkey=r[P3] data=r[P2]
**
** Write an entry into the table of cursor P1. A new entry is
** created if it doesn't already exist or the data for an existing
** entry is overwritten. The data is the value MEM_Blob stored in register
** number P2. The key is stored in register P3. The key must
** be a MEM_Int.
**
** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is
** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set,
** then rowid is stored for subsequent return by the
** sqlite3_last_insert_rowid() function (otherwise it is unmodified).
**
** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
** run faster by avoiding an unnecessary seek on cursor P1. However,
** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
** seeks on the cursor or if the most recent seek used a key equal to P3.
**
** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an
** UPDATE operation. Otherwise (if the flag is clear) then this opcode
** is part of an INSERT operation. The difference is only important to
** the update hook.
**
** Parameter P4 may point to a Table structure, or may be NULL. If it is
** not NULL, then the update-hook (sqlite3.xUpdateCallback) is invoked
** following a successful insert.
**
** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically
** allocated, then ownership of P2 is transferred to the pseudo-cursor
** and register P2 becomes ephemeral. If the cursor is changed, the
** value of register P2 will then change. Make sure this does not
** cause any problems.)
**
** This instruction only works on tables. The equivalent instruction
** for indices is OP_IdxInsert.
*/
_100:
; /* Payload to be inserted */
pData = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
pC12 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pKey = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnKey = *(*Ti64)(unsafe.Pointer(pKey))
if int32((*TOp)(unsafe.Pointer(pOp)).Fp4type) == -int32(5) && ((*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback != 0 || (*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback != 0) {
zDb = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TVdbeCursor)(unsafe.Pointer(pC12)).FiDb)*32))).FzDbSName
pTab1 = *(*uintptr)(unsafe.Pointer(pOp + 16))
} else {
pTab1 = uintptr(0)
zDb = uintptr(0)
}
/* Invoke the pre-update hook, if any */
if pTab1 != 0 {
if (*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback != 0 && !(int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&libc.Int32FromInt32(OPFLAG_ISUPDATE) != 0) {
_sqlite3VdbePreUpdateHook(tls, p, pC12, int32(SQLITE_INSERT), zDb, pTab1, (*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnKey, (*TOp)(unsafe.Pointer(pOp)).Fp2, -int32(1))
}
if (*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback == uintptr(0) || (*TTable)(unsafe.Pointer(pTab1)).FaCol == uintptr(0) {
/* Prevent post-update hook from running in cases when it should not */
pTab1 = uintptr(0)
}
}
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_ISNOOP) != 0 {
goto _187
}
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_NCHANGE) != 0 {
(*TVdbe)(unsafe.Pointer(p)).FnChange++
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_LASTROWID) != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FlastRowid = (*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnKey
}
}
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FpData = (*TMem)(unsafe.Pointer(pData)).Fz
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnData = (*TMem)(unsafe.Pointer(pData)).Fn
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_USESEEKRESULT) != 0 {
v269 = (*TVdbeCursor)(unsafe.Pointer(pC12)).FseekResult
} else {
v269 = 0
}
seekResult = v269
if int32((*TMem)(unsafe.Pointer(pData)).Fflags)&int32(MEM_Zero) != 0 {
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnZero = *(*int32)(unsafe.Pointer(&(*TMem)(unsafe.Pointer(pData)).Fu))
} else {
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnZero = 0
}
(*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FpKey = uintptr(0)
rc = _sqlite3BtreeInsert(tls, *(*uintptr)(unsafe.Pointer(pC12 + 48)), bp+328, int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&(libc.Int32FromInt32(OPFLAG_APPEND)|libc.Int32FromInt32(OPFLAG_SAVEPOSITION)|libc.Int32FromInt32(OPFLAG_PREFORMAT)), seekResult)
(*TVdbeCursor)(unsafe.Pointer(pC12)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC12)).FcacheStatus = uint32(CACHE_STALE)
colCacheCtr++
/* Invoke the update-hook if required. */
if rc != 0 {
goto abort_due_to_error
}
if pTab1 != 0 {
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_ISUPDATE) != 0 {
v270 = int32(SQLITE_UPDATE)
} else {
v270 = int32(SQLITE_INSERT)
}
(*(*func(*libc.TLS, uintptr, int32, uintptr, uintptr, Tsqlite_int64))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpUpdateArg, v270, zDb, (*TTable)(unsafe.Pointer(pTab1)).FzName, (*(*TBtreePayload)(unsafe.Pointer(bp + 328))).FnKey)
}
goto _187
/* Opcode: RowCell P1 P2 P3 * *
**
** P1 and P2 are both open cursors. Both must be opened on the same type
** of table - intkey or index. This opcode is used as part of copying
** the current row from P2 into P1. If the cursors are opened on intkey
** tables, register P3 contains the rowid to use with the new record in
** P1. If they are opened on index tables, P3 is not used.
**
** This opcode must be followed by either an Insert or InsertIdx opcode
** with the OPFLAG_PREFORMAT flag set to complete the insert operation.
*/
_101:
; /* Rowid value to insert with */
pDest1 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pSrc = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*8))
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
v271 = *(*Ti64)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56))
} else {
v271 = 0
}
iKey2 = v271
rc = _sqlite3BtreeTransferRow(tls, *(*uintptr)(unsafe.Pointer(pDest1 + 48)), *(*uintptr)(unsafe.Pointer(pSrc + 48)), iKey2)
if rc != SQLITE_OK {
goto abort_due_to_error
}
goto _187
/* Opcode: Delete P1 P2 P3 P4 P5
**
** Delete the record at which the P1 cursor is currently pointing.
**
** If the OPFLAG_SAVEPOSITION bit of the P5 parameter is set, then
** the cursor will be left pointing at either the next or the previous
** record in the table. If it is left pointing at the next record, then
** the next Next instruction will be a no-op. As a result, in this case
** it is ok to delete a record from within a Next loop. If
** OPFLAG_SAVEPOSITION bit of P5 is clear, then the cursor will be
** left in an undefined state.
**
** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this
** delete is one of several associated with deleting a table row and
** all its associated index entries. Exactly one of those deletes is
** the "primary" delete. The others are all on OPFLAG_FORDELETE
** cursors or else are marked with the AUXDELETE flag.
**
** If the OPFLAG_NCHANGE (0x01) flag of P2 (NB: P2 not P5) is set, then
** the row change count is incremented (otherwise not).
**
** If the OPFLAG_ISNOOP (0x40) flag of P2 (not P5!) is set, then the
** pre-update-hook for deletes is run, but the btree is otherwise unchanged.
** This happens when the OP_Delete is to be shortly followed by an OP_Insert
** with the same key, causing the btree entry to be overwritten.
**
** P1 must not be pseudo-table. It has to be a real table with
** multiple rows.
**
** If P4 is not NULL then it points to a Table object. In this case either
** the update or pre-update hook, or both, may be invoked. The P1 cursor must
** have been positioned using OP_NotFound prior to invoking this opcode in
** this case. Specifically, if one is configured, the pre-update hook is
** invoked if P4 is not NULL. The update-hook is invoked if one is configured,
** P4 is not NULL, and the OPFLAG_NCHANGE flag is set in P2.
**
** If the OPFLAG_ISUPDATE flag is set in P2, then P3 contains the address
** of the memory cell that contains the value that the rowid of the row will
** be set to by the update.
*/
_102:
;
opflags = (*TOp)(unsafe.Pointer(pOp)).Fp2
pC13 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
/* If the update-hook or pre-update-hook will be invoked, set zDb to
** the name of the db to pass as to it. Also set local pTab to a copy
** of p4.pTab. Finally, if p5 is true, indicating that this cursor was
** last moved with OP_Next or OP_Prev, not Seek or NotFound, set
** VdbeCursor.movetoTarget to the current rowid. */
if int32((*TOp)(unsafe.Pointer(pOp)).Fp4type) == -int32(5) && ((*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback != 0 || (*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback != 0) {
zDb1 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TVdbeCursor)(unsafe.Pointer(pC13)).FiDb)*32))).FzDbSName
pTab2 = *(*uintptr)(unsafe.Pointer(pOp + 16))
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_SAVEPOSITION) != 0 && (*TVdbeCursor)(unsafe.Pointer(pC13)).FisTable != 0 {
(*TVdbeCursor)(unsafe.Pointer(pC13)).FmovetoTarget = _sqlite3BtreeIntegerKey(tls, *(*uintptr)(unsafe.Pointer(pC13 + 48)))
}
} else {
zDb1 = uintptr(0)
pTab2 = uintptr(0)
}
/* Invoke the pre-update-hook if required. */
if (*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback != 0 && pTab2 != 0 {
if opflags&int32(OPFLAG_ISUPDATE) != 0 {
v272 = int32(SQLITE_UPDATE)
} else {
v272 = int32(SQLITE_DELETE)
}
_sqlite3VdbePreUpdateHook(tls, p, pC13, v272, zDb1, pTab2, (*TVdbeCursor)(unsafe.Pointer(pC13)).FmovetoTarget, (*TOp)(unsafe.Pointer(pOp)).Fp3, -int32(1))
}
if opflags&int32(OPFLAG_ISNOOP) != 0 {
goto _187
}
/* Only flags that can be set are SAVEPOISTION and AUXDELETE */
rc = _sqlite3BtreeDelete(tls, *(*uintptr)(unsafe.Pointer(pC13 + 48)), uint8((*TOp)(unsafe.Pointer(pOp)).Fp5))
(*TVdbeCursor)(unsafe.Pointer(pC13)).FcacheStatus = uint32(CACHE_STALE)
colCacheCtr++
(*TVdbeCursor)(unsafe.Pointer(pC13)).FseekResult = 0
if rc != 0 {
goto abort_due_to_error
}
/* Invoke the update-hook if required. */
if opflags&int32(OPFLAG_NCHANGE) != 0 {
(*TVdbe)(unsafe.Pointer(p)).FnChange++
if (*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback != 0 && pTab2 != uintptr(0) && (*TTable)(unsafe.Pointer(pTab2)).FtabFlags&uint32(TF_WithoutRowid) == uint32(0) {
(*(*func(*libc.TLS, uintptr, int32, uintptr, uintptr, Tsqlite_int64))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxUpdateCallback})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpUpdateArg, int32(SQLITE_DELETE), zDb1, (*TTable)(unsafe.Pointer(pTab2)).FzName, (*TVdbeCursor)(unsafe.Pointer(pC13)).FmovetoTarget)
}
}
goto _187
/* Opcode: ResetCount * * * * *
**
** The value of the change counter is copied to the database handle
** change counter (returned by subsequent calls to sqlite3_changes()).
** Then the VMs internal change counter resets to 0.
** This is used by trigger programs.
*/
_103:
;
_sqlite3VdbeSetChanges(tls, db, (*TVdbe)(unsafe.Pointer(p)).FnChange)
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
goto _187
/* Opcode: SorterCompare P1 P2 P3 P4
** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2
**
** P1 is a sorter cursor. This instruction compares a prefix of the
** record blob in register P3 against a prefix of the entry that
** the sorter cursor currently points to. Only the first P4 fields
** of r[P3] and the sorter record are compared.
**
** If either P3 or the sorter contains a NULL in one of their significant
** fields (not counting the P4 fields at the end which are ignored) then
** the comparison is assumed to be equal.
**
** Fall through to next instruction if the two records compare equal to
** each other. Jump to P2 if they are different.
*/
_104:
;
pC14 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
nKeyCol = (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi
*(*int32)(unsafe.Pointer(bp + 376)) = 0
rc = _sqlite3VdbeSorterCompare(tls, pC14, pIn3, nKeyCol, bp+376)
if rc != 0 {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 376)) != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: SorterData P1 P2 P3 * *
** Synopsis: r[P2]=data
**
** Write into register P2 the current sorter data for sorter cursor P1.
** Then clear the column header cache on cursor P3.
**
** This opcode is normally used to move a record out of the sorter and into
** a register that is the source for a pseudo-table cursor created using
** OpenPseudo. That pseudo-table cursor is the one that is identified by
** parameter P3. Clearing the P3 column cache as part of this opcode saves
** us from having to issue a separate NullRow instruction to clear that cache.
*/
_105:
;
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
pC15 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
rc = _sqlite3VdbeSorterRowkey(tls, pC15, pOut)
if rc != 0 {
goto abort_due_to_error
}
(*TVdbeCursor)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*8)))).FcacheStatus = uint32(CACHE_STALE)
goto _187
/* Opcode: RowData P1 P2 P3 * *
** Synopsis: r[P2]=data
**
** Write into register P2 the complete row content for the row at
** which cursor P1 is currently pointing.
** There is no interpretation of the data.
** It is just copied onto the P2 register exactly as
** it is found in the database file.
**
** If cursor P1 is an index, then the content is the key of the row.
** If cursor P2 is a table, then the content extracted is the data.
**
** If the P1 cursor must be pointing to a valid row (not a NULL row)
** of a real table, not a pseudo-table.
**
** If P3!=0 then this opcode is allowed to make an ephemeral pointer
** into the database page. That means that the content of the output
** register will be invalidated as soon as the cursor moves - including
** moves caused by other cursors that "save" the current cursors
** position in order that they can write to the same table. If P3==0
** then a copy of the data is made into memory. P3!=0 is faster, but
** P3==0 is safer.
**
** If P3!=0 then the content of the P2 register is unsuitable for use
** in OP_Result and any OP_Result will invalidate the P2 register content.
** The P2 register content is invalidated by opcodes like OP_Function or
** by any use of another cursor pointing to the same table.
*/
_106:
;
pOut = _out2Prerelease(tls, p, pOp)
pC16 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pCrsr3 = *(*uintptr)(unsafe.Pointer(pC16 + 48))
/* The OP_RowData opcodes always follow OP_NotExists or
** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions
** that might invalidate the cursor.
** If this where not the case, on of the following assert()s
** would fail. Should this ever change (because of changes in the code
** generator) then the fix would be to insert a call to
** sqlite3VdbeCursorMoveto().
*/
n3 = _sqlite3BtreePayloadSize(tls, pCrsr3)
if n3 > uint32(*(*int32)(unsafe.Pointer(db + 136))) {
goto too_big
}
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, pCrsr3, n3, pOut)
if rc != 0 {
goto abort_due_to_error
}
if !((*TOp)(unsafe.Pointer(pOp)).Fp3 != 0) {
if int32((*TMem)(unsafe.Pointer(pOut)).Fflags)&int32(MEM_Ephem) != 0 && _sqlite3VdbeMemMakeWriteable(tls, pOut) != 0 {
goto no_mem
}
}
goto _187
/* Opcode: Rowid P1 P2 * * *
** Synopsis: r[P2]=PX rowid of P1
**
** Store in register P2 an integer which is the key of the table entry that
** P1 is currently point to.
**
** P1 can be either an ordinary table or a virtual table. There used to
** be a separate OP_VRowid opcode for use with virtual tables, but this
** one opcode now works for both table types.
*/
_107:
;
pOut = _out2Prerelease(tls, p, pOp)
pC17 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if (*TVdbeCursor)(unsafe.Pointer(pC17)).FnullRow != 0 {
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Null)
goto _187
} else {
if (*TVdbeCursor)(unsafe.Pointer(pC17)).FdeferredMoveto != 0 {
*(*Ti64)(unsafe.Pointer(bp + 384)) = (*TVdbeCursor)(unsafe.Pointer(pC17)).FmovetoTarget
} else {
if int32((*TVdbeCursor)(unsafe.Pointer(pC17)).FeCurType) == int32(CURTYPE_VTAB) {
pVtab = (*Tsqlite3_vtab_cursor)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pC17 + 48)))).FpVtab
pModule = (*Tsqlite3_vtab)(unsafe.Pointer(pVtab)).FpModule
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer(pModule)).FxRowid})))(tls, *(*uintptr)(unsafe.Pointer(pC17 + 48)), bp+384)
_sqlite3VtabImportErrmsg(tls, p, pVtab)
if rc != 0 {
goto abort_due_to_error
}
} else {
rc = _sqlite3VdbeCursorRestore(tls, pC17)
if rc != 0 {
goto abort_due_to_error
}
if (*TVdbeCursor)(unsafe.Pointer(pC17)).FnullRow != 0 {
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Null)
goto _187
}
*(*Ti64)(unsafe.Pointer(bp + 384)) = _sqlite3BtreeIntegerKey(tls, *(*uintptr)(unsafe.Pointer(pC17 + 48)))
}
}
}
*(*Ti64)(unsafe.Pointer(pOut)) = *(*Ti64)(unsafe.Pointer(bp + 384))
goto _187
/* Opcode: NullRow P1 * * * *
**
** Move the cursor P1 to a null row. Any OP_Column operations
** that occur while the cursor is on the null row will always
** write a NULL.
**
** If cursor P1 is not previously opened, open it now to a special
** pseudo-cursor that always returns NULL for every column.
*/
_108:
;
pC18 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if pC18 == uintptr(0) {
/* If the cursor is not already open, create a special kind of
** pseudo-cursor that always gives null rows. */
pC18 = _allocateCursor(tls, p, (*TOp)(unsafe.Pointer(pOp)).Fp1, int32(1), uint8(CURTYPE_PSEUDO))
if pC18 == uintptr(0) {
goto no_mem
}
(*TVdbeCursor)(unsafe.Pointer(pC18)).FseekResult = 0
(*TVdbeCursor)(unsafe.Pointer(pC18)).FisTable = uint8(1)
libc.SetBitFieldPtr8Uint32(pC18+8, libc.Uint32FromInt32(1), 3, 0x8)
*(*uintptr)(unsafe.Pointer(pC18 + 48)) = _sqlite3BtreeFakeValidCursor(tls)
}
(*TVdbeCursor)(unsafe.Pointer(pC18)).FnullRow = uint8(1)
(*TVdbeCursor)(unsafe.Pointer(pC18)).FcacheStatus = uint32(CACHE_STALE)
if int32((*TVdbeCursor)(unsafe.Pointer(pC18)).FeCurType) == CURTYPE_BTREE {
_sqlite3BtreeClearCursor(tls, *(*uintptr)(unsafe.Pointer(pC18 + 48)))
}
goto _187
/* Opcode: SeekEnd P1 * * * *
**
** Position cursor P1 at the end of the btree for the purpose of
** appending a new entry onto the btree.
**
** It is assumed that the cursor is used only for appending and so
** if the cursor is valid, then the cursor must already be pointing
** at the end of the btree and so no changes are made to
** the cursor.
*/
/* Opcode: Last P1 P2 * * *
**
** The next use of the Rowid or Column or Prev instruction for P1
** will refer to the last entry in the database table or index.
** If the table or index is empty and P2>0, then jump immediately to P2.
** If P2 is 0 or if the table or index is not empty, fall through
** to the following instruction.
**
** This opcode leaves the cursor configured to move in reverse order,
** from the end toward the beginning. In other words, the cursor is
** configured to use Prev, not Next.
*/
_110:
; /* ncycle */
_109:
;
pC19 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pCrsr4 = *(*uintptr)(unsafe.Pointer(pC19 + 48))
*(*int32)(unsafe.Pointer(bp + 392)) = 0
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_SeekEnd) {
(*TVdbeCursor)(unsafe.Pointer(pC19)).FseekResult = -int32(1)
if _sqlite3BtreeCursorIsValidNN(tls, pCrsr4) != 0 {
goto _187
}
}
rc = _sqlite3BtreeLast(tls, pCrsr4, bp+392)
(*TVdbeCursor)(unsafe.Pointer(pC19)).FnullRow = uint8(*(*int32)(unsafe.Pointer(bp + 392)))
(*TVdbeCursor)(unsafe.Pointer(pC19)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC19)).FcacheStatus = uint32(CACHE_STALE)
if rc != 0 {
goto abort_due_to_error
}
if (*TOp)(unsafe.Pointer(pOp)).Fp2 > 0 {
if *(*int32)(unsafe.Pointer(bp + 392)) != 0 {
goto jump_to_p2
}
}
goto _187
/* Opcode: IfSmaller P1 P2 P3 * *
**
** Estimate the number of rows in the table P1. Jump to P2 if that
** estimate is less than approximately 2**(0.1*P3).
*/
_111:
;
pC20 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pCrsr5 = *(*uintptr)(unsafe.Pointer(pC20 + 48))
rc = _sqlite3BtreeFirst(tls, pCrsr5, bp+396)
if rc != 0 {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 396)) == 0 {
sz = _sqlite3BtreeRowCountEst(tls, pCrsr5)
if sz >= 0 && int32(_sqlite3LogEst(tls, uint64(sz))) < (*TOp)(unsafe.Pointer(pOp)).Fp3 {
*(*int32)(unsafe.Pointer(bp + 396)) = int32(1)
}
}
if *(*int32)(unsafe.Pointer(bp + 396)) != 0 {
goto jump_to_p2
}
goto _187
/* Opcode: SorterSort P1 P2 * * *
**
** After all records have been inserted into the Sorter object
** identified by P1, invoke this opcode to actually do the sorting.
** Jump to P2 if there are no records to be sorted.
**
** This opcode is an alias for OP_Sort and OP_Rewind that is used
** for Sorter objects.
*/
/* Opcode: Sort P1 P2 * * *
**
** This opcode does exactly the same thing as OP_Rewind except that
** it increments an undocumented global variable used for testing.
**
** Sorting is accomplished by writing records into a sorting index,
** then rewinding that index and playing it back from beginning to
** end. We use the OP_Sort opcode instead of OP_Rewind to do the
** rewinding so that the global variable will be incremented and
** regression tests can determine whether or not the optimizer is
** correctly optimizing out sorts.
*/
_113:
; /* jump ncycle */
_112:
; /* jump ncycle */
*(*Tu32)(unsafe.Pointer(p + 212 + 2*4))++
/* Fall through into OP_Rewind */
/* Opcode: Rewind P1 P2 * * *
**
** The next use of the Rowid or Column or Next instruction for P1
** will refer to the first entry in the database table or index.
** If the table or index is empty, jump immediately to P2.
** If the table or index is not empty, fall through to the following
** instruction.
**
** If P2 is zero, that is an assertion that the P1 table is never
** empty and hence the jump will never be taken.
**
** This opcode leaves the cursor configured to move in forward order,
** from the beginning toward the end. In other words, the cursor is
** configured to use Next, not Prev.
*/
_114:
;
pC21 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
*(*int32)(unsafe.Pointer(bp + 400)) = int32(1)
if int32((*TVdbeCursor)(unsafe.Pointer(pC21)).FeCurType) == int32(CURTYPE_SORTER) {
rc = _sqlite3VdbeSorterRewind(tls, pC21, bp+400)
} else {
pCrsr6 = *(*uintptr)(unsafe.Pointer(pC21 + 48))
rc = _sqlite3BtreeFirst(tls, pCrsr6, bp+400)
(*TVdbeCursor)(unsafe.Pointer(pC21)).FdeferredMoveto = uint8(0)
(*TVdbeCursor)(unsafe.Pointer(pC21)).FcacheStatus = uint32(CACHE_STALE)
}
if rc != 0 {
goto abort_due_to_error
}
(*TVdbeCursor)(unsafe.Pointer(pC21)).FnullRow = uint8(*(*int32)(unsafe.Pointer(bp + 400)))
if (*TOp)(unsafe.Pointer(pOp)).Fp2 > 0 {
if *(*int32)(unsafe.Pointer(bp + 400)) != 0 {
goto jump_to_p2
}
}
goto _187
/* Opcode: Next P1 P2 P3 * P5
**
** Advance cursor P1 so that it points to the next key/data pair in its
** table or index. If there are no more key/value pairs then fall through
** to the following instruction. But if the cursor advance was successful,
** jump immediately to P2.
**
** The Next opcode is only valid following an SeekGT, SeekGE, or
** OP_Rewind opcode used to position the cursor. Next is not allowed
** to follow SeekLT, SeekLE, or OP_Last.
**
** The P1 cursor must be for a real table, not a pseudo-table. P1 must have
** been opened prior to this opcode or the program will segfault.
**
** The P3 value is a hint to the btree implementation. If P3==1, that
** means P1 is an SQL index and that this instruction could have been
** omitted if that index had been unique. P3 is usually 0. P3 is
** always either 0 or 1.
**
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
**
** See also: Prev
*/
/* Opcode: Prev P1 P2 P3 * P5
**
** Back up cursor P1 so that it points to the previous key/data pair in its
** table or index. If there is no previous key/value pairs then fall through
** to the following instruction. But if the cursor backup was successful,
** jump immediately to P2.
**
**
** The Prev opcode is only valid following an SeekLT, SeekLE, or
** OP_Last opcode used to position the cursor. Prev is not allowed
** to follow SeekGT, SeekGE, or OP_Rewind.
**
** The P1 cursor must be for a real table, not a pseudo-table. If P1 is
** not open then the behavior is undefined.
**
** The P3 value is a hint to the btree implementation. If P3==1, that
** means P1 is an SQL index and that this instruction could have been
** omitted if that index had been unique. P3 is usually 0. P3 is
** always either 0 or 1.
**
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
*/
/* Opcode: SorterNext P1 P2 * * P5
**
** This opcode works just like OP_Next except that P1 must be a
** sorter object for which the OP_SorterSort opcode has been
** invoked. This opcode advances the cursor to the next sorted
** record, or jumps to P2 if there are no more sorted records.
*/
_117:
;
pC22 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
rc = _sqlite3VdbeSorterNext(tls, db, pC22)
goto next_tail
_115:
; /* jump, ncycle */
pC22 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
rc = _sqlite3BtreePrevious(tls, *(*uintptr)(unsafe.Pointer(pC22 + 48)), (*TOp)(unsafe.Pointer(pOp)).Fp3)
goto next_tail
_116:
; /* jump, ncycle */
pC22 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
rc = _sqlite3BtreeNext(tls, *(*uintptr)(unsafe.Pointer(pC22 + 48)), (*TOp)(unsafe.Pointer(pOp)).Fp3)
goto next_tail
next_tail:
;
(*TVdbeCursor)(unsafe.Pointer(pC22)).FcacheStatus = uint32(CACHE_STALE)
if rc == SQLITE_OK {
(*TVdbeCursor)(unsafe.Pointer(pC22)).FnullRow = uint8(0)
*(*Tu32)(unsafe.Pointer(p + 212 + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp5)*4))++
goto jump_to_p2_and_check_for_interrupt
}
if rc != int32(SQLITE_DONE) {
goto abort_due_to_error
}
rc = SQLITE_OK
(*TVdbeCursor)(unsafe.Pointer(pC22)).FnullRow = uint8(1)
goto check_for_interrupt
/* Opcode: IdxInsert P1 P2 P3 P4 P5
** Synopsis: key=r[P2]
**
** Register P2 holds an SQL index key made using the
** MakeRecord instructions. This opcode writes that key
** into the index P1. Data for the entry is nil.
**
** If P4 is not zero, then it is the number of values in the unpacked
** key of reg(P2). In that case, P3 is the index of the first register
** for the unpacked key. The availability of the unpacked key can sometimes
** be an optimization.
**
** If P5 has the OPFLAG_APPEND bit set, that is a hint to the b-tree layer
** that this insert is likely to be an append.
**
** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is
** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear,
** then the change counter is unchanged.
**
** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
** run faster by avoiding an unnecessary seek on cursor P1. However,
** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
** seeks on the cursor or if the most recent seek used a key equivalent
** to P2.
**
** This instruction only works for indices. The equivalent instruction
** for tables is OP_Insert.
*/
_118:
;
pC23 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_NCHANGE) != 0 {
(*TVdbe)(unsafe.Pointer(p)).FnChange++
}
if int32((*TMem)(unsafe.Pointer(pIn2)).Fflags)&int32(MEM_Zero) != 0 {
v273 = _sqlite3VdbeMemExpandBlob(tls, pIn2)
} else {
v273 = 0
}
rc = v273
if rc != 0 {
goto abort_due_to_error
}
(*(*TBtreePayload)(unsafe.Pointer(bp + 408))).FnKey = int64((*TMem)(unsafe.Pointer(pIn2)).Fn)
(*(*TBtreePayload)(unsafe.Pointer(bp + 408))).FpKey = (*TMem)(unsafe.Pointer(pIn2)).Fz
(*(*TBtreePayload)(unsafe.Pointer(bp + 408))).FaMem = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
(*(*TBtreePayload)(unsafe.Pointer(bp + 408))).FnMem = uint16((*TOp)(unsafe.Pointer(pOp)).Fp4.Fi)
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&int32(OPFLAG_USESEEKRESULT) != 0 {
v274 = (*TVdbeCursor)(unsafe.Pointer(pC23)).FseekResult
} else {
v274 = 0
}
rc = _sqlite3BtreeInsert(tls, *(*uintptr)(unsafe.Pointer(pC23 + 48)), bp+408, int32((*TOp)(unsafe.Pointer(pOp)).Fp5)&(libc.Int32FromInt32(OPFLAG_APPEND)|libc.Int32FromInt32(OPFLAG_SAVEPOSITION)|libc.Int32FromInt32(OPFLAG_PREFORMAT)), v274)
(*TVdbeCursor)(unsafe.Pointer(pC23)).FcacheStatus = uint32(CACHE_STALE)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: SorterInsert P1 P2 * * *
** Synopsis: key=r[P2]
**
** Register P2 holds an SQL index key made using the
** MakeRecord instructions. This opcode writes that key
** into the sorter P1. Data for the entry is nil.
*/
_119:
;
pC24 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pIn2)).Fflags)&int32(MEM_Zero) != 0 {
v275 = _sqlite3VdbeMemExpandBlob(tls, pIn2)
} else {
v275 = 0
}
rc = v275
if rc != 0 {
goto abort_due_to_error
}
rc = _sqlite3VdbeSorterWrite(tls, pC24, pIn2)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: IdxDelete P1 P2 P3 * P5
** Synopsis: key=r[P2@P3]
**
** The content of P3 registers starting at register P2 form
** an unpacked index key. This opcode removes that entry from the
** index opened by cursor P1.
**
** If P5 is not zero, then raise an SQLITE_CORRUPT_INDEX error
** if no matching index entry is found. This happens when running
** an UPDATE or DELETE statement and the index entry to be updated
** or deleted is not found. For some uses of IdxDelete
** (example: the EXCEPT operator) it does not matter that no matching
** entry is found. For those cases, P5 is zero. Also, do not raise
** this (self-correcting and non-critical) error if in writable_schema mode.
*/
_120:
;
pC25 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
pCrsr7 = *(*uintptr)(unsafe.Pointer(pC25 + 48))
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 464))).FpKeyInfo = (*TVdbeCursor)(unsafe.Pointer(pC25)).FpKeyInfo
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 464))).FnField = uint16((*TOp)(unsafe.Pointer(pOp)).Fp3)
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 464))).Fdefault_rc = 0
(*(*TUnpackedRecord)(unsafe.Pointer(bp + 464))).FaMem = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
rc = _sqlite3BtreeIndexMoveto(tls, pCrsr7, bp+464, bp+456)
if rc != 0 {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 456)) == 0 {
rc = _sqlite3BtreeDelete(tls, pCrsr7, uint8(BTREE_AUXDELETE))
if rc != 0 {
goto abort_due_to_error
}
} else {
if (*TOp)(unsafe.Pointer(pOp)).Fp5 != 0 && !(_sqlite3WritableSchema(tls, db) != 0) {
rc = _sqlite3ReportError(tls, libc.Int32FromInt32(SQLITE_CORRUPT)|libc.Int32FromInt32(3)< int64(0x7fffffff) {
rc = _sqlite3CorruptError(tls, int32(99398))
goto abort_due_to_error
}
_sqlite3VdbeMemInit(tls, bp+552, db, uint16(0))
rc = _sqlite3VdbeMemFromBtreeZeroOffset(tls, pCur2, uint32(nCellKey), bp+552)
if rc != 0 {
goto abort_due_to_error
}
res10 = _sqlite3VdbeRecordCompareWithSkip(tls, (*(*TMem)(unsafe.Pointer(bp + 552))).Fn, (*(*TMem)(unsafe.Pointer(bp + 552))).Fz, bp+512, 0)
_sqlite3VdbeMemReleaseMalloc(tls, bp+552)
/* End of inlined sqlite3VdbeIdxKeyCompare() */
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode)&int32(1) == libc.Int32FromInt32(OP_IdxLT)&libc.Int32FromInt32(1) {
res10 = -res10
} else {
res10++
}
if res10 > 0 {
goto jump_to_p2
}
goto _187
/* Opcode: Destroy P1 P2 P3 * *
**
** Delete an entire database table or index whose root page in the database
** file is given by P1.
**
** The table being destroyed is in the main database file if P3==0. If
** P3==1 then the table to be destroyed is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If AUTOVACUUM is enabled then it is possible that another root page
** might be moved into the newly deleted root page in order to keep all
** root pages contiguous at the beginning of the database. The former
** value of the root page that moved - its value before the move occurred -
** is stored in register P2. If no page movement was required (because the
** table being dropped was already the last one in the database) then a
** zero is stored in register P2. If AUTOVACUUM is disabled then a zero
** is stored in register P2.
**
** This opcode throws an error if there are any active reader VMs when
** it is invoked. This is done to avoid the difficulty associated with
** updating existing cursors when a root page is moved in an AUTOVACUUM
** database. This error is thrown even if the database is not an AUTOVACUUM
** db in order to avoid introducing an incompatibility between autovacuum
** and non-autovacuum modes.
**
** See also: Clear
*/
_128:
;
pOut = _out2Prerelease(tls, p, pOp)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Null)
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead > (*Tsqlite3)(unsafe.Pointer(db)).FnVDestroy+int32(1) {
rc = int32(SQLITE_LOCKED)
(*TVdbe)(unsafe.Pointer(p)).FerrorAction = uint8(OE_Abort)
goto abort_due_to_error
} else {
iDb2 = (*TOp)(unsafe.Pointer(pOp)).Fp3
*(*int32)(unsafe.Pointer(bp + 608)) = 0 /* Not needed. Only to silence a warning. */
rc = _sqlite3BtreeDropTable(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb2)*32))).FpBt, (*TOp)(unsafe.Pointer(pOp)).Fp1, bp+608)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(MEM_Int)
*(*Ti64)(unsafe.Pointer(pOut)) = int64(*(*int32)(unsafe.Pointer(bp + 608)))
if rc != 0 {
goto abort_due_to_error
}
if *(*int32)(unsafe.Pointer(bp + 608)) != 0 {
_sqlite3RootPageMoved(tls, db, iDb2, uint32(*(*int32)(unsafe.Pointer(bp + 608))), uint32((*TOp)(unsafe.Pointer(pOp)).Fp1))
/* All OP_Destroy operations occur on the same btree */
resetSchemaOnFault = uint8(iDb2 + int32(1))
}
}
goto _187
/* Opcode: Clear P1 P2 P3
**
** Delete all contents of the database table or index whose root page
** in the database file is given by P1. But, unlike Destroy, do not
** remove the table or index from the database file.
**
** The table being cleared is in the main database file if P2==0. If
** P2==1 then the table to be cleared is in the auxiliary database file
** that is used to store tables create using CREATE TEMPORARY TABLE.
**
** If the P3 value is non-zero, then the row change count is incremented
** by the number of rows in the table being cleared. If P3 is greater
** than zero, then the value stored in register P3 is also incremented
** by the number of rows in the table being cleared.
**
** See also: Destroy
*/
_129:
;
*(*Ti64)(unsafe.Pointer(bp + 616)) = 0
rc = _sqlite3BtreeClearTable(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*32))).FpBt, int32(uint32((*TOp)(unsafe.Pointer(pOp)).Fp1)), bp+616)
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
*(*Ti64)(unsafe.Pointer(p + 56)) += *(*Ti64)(unsafe.Pointer(bp + 616))
if (*TOp)(unsafe.Pointer(pOp)).Fp3 > 0 {
*(*Ti64)(unsafe.Pointer(aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56)) += *(*Ti64)(unsafe.Pointer(bp + 616))
}
}
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: ResetSorter P1 * * * *
**
** Delete all contents from the ephemeral table or sorter
** that is open on cursor P1.
**
** This opcode only works for cursors used for sorting and
** opened with OP_OpenEphemeral or OP_SorterOpen.
*/
_130:
;
pC29 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if int32((*TVdbeCursor)(unsafe.Pointer(pC29)).FeCurType) == int32(CURTYPE_SORTER) {
_sqlite3VdbeSorterReset(tls, db, *(*uintptr)(unsafe.Pointer(pC29 + 48)))
} else {
rc = _sqlite3BtreeClearTableOfCursor(tls, *(*uintptr)(unsafe.Pointer(pC29 + 48)))
if rc != 0 {
goto abort_due_to_error
}
}
goto _187
/* Opcode: CreateBtree P1 P2 P3 * *
** Synopsis: r[P2]=root iDb=P1 flags=P3
**
** Allocate a new b-tree in the main database file if P1==0 or in the
** TEMP database file if P1==1 or in an attached database if
** P1>1. The P3 argument must be 1 (BTREE_INTKEY) for a rowid table
** it must be 2 (BTREE_BLOBKEY) for an index or WITHOUT ROWID table.
** The root page number of the new b-tree is stored in register P2.
*/
_131:
;
pOut = _out2Prerelease(tls, p, pOp)
*(*TPgno)(unsafe.Pointer(bp + 624)) = uint32(0)
pDb3 = (*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32
rc = _sqlite3BtreeCreateTable(tls, (*TDb)(unsafe.Pointer(pDb3)).FpBt, bp+624, (*TOp)(unsafe.Pointer(pOp)).Fp3)
if rc != 0 {
goto abort_due_to_error
}
*(*Ti64)(unsafe.Pointer(pOut)) = int64(*(*TPgno)(unsafe.Pointer(bp + 624)))
goto _187
/* Opcode: SqlExec * * * P4 *
**
** Run the SQL statement or statements specified in the P4 string.
** Disable Auth and Trace callbacks while those statements are running if
** P1 is true.
*/
_132:
;
(*Tsqlite3)(unsafe.Pointer(db)).FnSqlExec++
*(*uintptr)(unsafe.Pointer(bp + 632)) = uintptr(0)
xAuth = (*Tsqlite3)(unsafe.Pointer(db)).FxAuth
mTrace = (*Tsqlite3)(unsafe.Pointer(db)).FmTrace
if (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FxAuth = uintptr(0)
(*Tsqlite3)(unsafe.Pointer(db)).FmTrace = uint8(0)
}
rc = Xsqlite3_exec(tls, db, *(*uintptr)(unsafe.Pointer(pOp + 16)), uintptr(0), uintptr(0), bp+632)
(*Tsqlite3)(unsafe.Pointer(db)).FnSqlExec--
(*Tsqlite3)(unsafe.Pointer(db)).FxAuth = xAuth
(*Tsqlite3)(unsafe.Pointer(db)).FmTrace = mTrace
if *(*uintptr)(unsafe.Pointer(bp + 632)) != 0 || rc != 0 {
_sqlite3VdbeError(tls, p, __ccgo_ts+3797, libc.VaList(bp+944, *(*uintptr)(unsafe.Pointer(bp + 632))))
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 632)))
if rc == int32(SQLITE_NOMEM) {
goto no_mem
}
goto abort_due_to_error
}
goto _187
/* Opcode: ParseSchema P1 * * P4 *
**
** Read and parse all entries from the schema table of database P1
** that match the WHERE clause P4. If P4 is a NULL pointer, then the
** entire schema for P1 is reparsed.
**
** This opcode invokes the parser to create a new virtual machine,
** then runs the new virtual machine. It is thus a re-entrant opcode.
*/
_133:
;
/* Any prepared statement that invokes this opcode will hold mutexes
** on every btree. This is a prerequisite for invoking
** sqlite3InitCallback().
*/
iDb3 = (*TOp)(unsafe.Pointer(pOp)).Fp1
if *(*uintptr)(unsafe.Pointer(pOp + 16)) == uintptr(0) {
_sqlite3SchemaClear(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb3)*32))).FpSchema)
*(*Tu32)(unsafe.Pointer(db + 44)) &= uint32(^libc.Int32FromInt32(DBFLAG_SchemaKnownOk))
rc = _sqlite3InitOne(tls, db, iDb3, p+168, uint32((*TOp)(unsafe.Pointer(pOp)).Fp5))
*(*Tu32)(unsafe.Pointer(db + 44)) |= uint32(DBFLAG_SchemaChange)
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 0, 0x3)
} else {
zSchema = __ccgo_ts + 6068
(*(*TInitData)(unsafe.Pointer(bp + 640))).Fdb = db
(*(*TInitData)(unsafe.Pointer(bp + 640))).FiDb = iDb3
(*(*TInitData)(unsafe.Pointer(bp + 640))).FpzErrMsg = p + 168
(*(*TInitData)(unsafe.Pointer(bp + 640))).FmInitFlags = uint32(0)
(*(*TInitData)(unsafe.Pointer(bp + 640))).FmxPage = _sqlite3BtreeLastPage(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb3)*32))).FpBt)
zSql = _sqlite3MPrintf(tls, db, __ccgo_ts+6082, libc.VaList(bp+944, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(iDb3)*32))).FzDbSName, zSchema, *(*uintptr)(unsafe.Pointer(pOp + 16))))
if zSql == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
(*Tsqlite3)(unsafe.Pointer(db)).Finit1.Fbusy = uint8(1)
(*(*TInitData)(unsafe.Pointer(bp + 640))).Frc = SQLITE_OK
(*(*TInitData)(unsafe.Pointer(bp + 640))).FnInitRow = uint32(0)
rc = Xsqlite3_exec(tls, db, zSql, __ccgo_fp(_sqlite3InitCallback), bp+640, uintptr(0))
if rc == SQLITE_OK {
rc = (*(*TInitData)(unsafe.Pointer(bp + 640))).Frc
}
if rc == SQLITE_OK && (*(*TInitData)(unsafe.Pointer(bp + 640))).FnInitRow == uint32(0) {
/* The OP_ParseSchema opcode with a non-NULL P4 argument should parse
** at least one SQL statement. Any less than that indicates that
** the sqlite_schema table is corrupt. */
rc = _sqlite3CorruptError(tls, int32(99678))
}
_sqlite3DbFreeNN(tls, db, zSql)
(*Tsqlite3)(unsafe.Pointer(db)).Finit1.Fbusy = uint8(0)
}
}
if rc != 0 {
_sqlite3ResetAllSchemasOfConnection(tls, db)
if rc == int32(SQLITE_NOMEM) {
goto no_mem
}
goto abort_due_to_error
}
goto _187
/* Opcode: LoadAnalysis P1 * * * *
**
** Read the sqlite_stat1 table for database P1 and load the content
** of that table into the internal index hash table. This will cause
** the analysis to be used when preparing all subsequent queries.
*/
_134:
;
rc = _sqlite3AnalysisLoad(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: DropTable P1 * * P4 *
**
** Remove the internal (in-memory) data structures that describe
** the table named P4 in database P1. This is called after a table
** is dropped from disk (using the Destroy opcode) in order to keep
** the internal representation of the
** schema consistent with what is on disk.
*/
_135:
;
_sqlite3UnlinkAndDeleteTable(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1, *(*uintptr)(unsafe.Pointer(pOp + 16)))
goto _187
/* Opcode: DropIndex P1 * * P4 *
**
** Remove the internal (in-memory) data structures that describe
** the index named P4 in database P1. This is called after an index
** is dropped from disk (using the Destroy opcode)
** in order to keep the internal representation of the
** schema consistent with what is on disk.
*/
_136:
;
_sqlite3UnlinkAndDeleteIndex(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1, *(*uintptr)(unsafe.Pointer(pOp + 16)))
goto _187
/* Opcode: DropTrigger P1 * * P4 *
**
** Remove the internal (in-memory) data structures that describe
** the trigger named P4 in database P1. This is called after a trigger
** is dropped from disk (using the Destroy opcode) in order to keep
** the internal representation of the
** schema consistent with what is on disk.
*/
_137:
;
_sqlite3UnlinkAndDeleteTrigger(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1, *(*uintptr)(unsafe.Pointer(pOp + 16)))
goto _187
/* Opcode: IntegrityCk P1 P2 P3 P4 P5
**
** Do an analysis of the currently open database. Store in
** register P1 the text of an error message describing any problems.
** If no problems are found, store a NULL in register P1.
**
** The register P3 contains one less than the maximum number of allowed errors.
** At most reg(P3) errors will be reported.
** In other words, the analysis stops as soon as reg(P1) errors are
** seen. Reg(P1) is updated with the number of errors remaining.
**
** The root page numbers of all tables in the database are integers
** stored in P4_INTARRAY argument.
**
** If P5 is not zero, the check is done on the auxiliary database
** file, not the main database file.
**
** This opcode is used to implement the integrity_check pragma.
*/
_138:
; /* Register keeping track of errors remaining */
nRoot = (*TOp)(unsafe.Pointer(pOp)).Fp2
aRoot = *(*uintptr)(unsafe.Pointer(pOp + 16))
pnErr = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
rc = _sqlite3BtreeIntegrityCheck(tls, db, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp5)*32))).FpBt, aRoot+1*4, nRoot, int32(*(*Ti64)(unsafe.Pointer(pnErr)))+int32(1), bp+680, bp+688)
_sqlite3VdbeMemSetNull(tls, pIn1)
if *(*int32)(unsafe.Pointer(bp + 680)) == 0 {
} else {
if rc != 0 {
Xsqlite3_free(tls, *(*uintptr)(unsafe.Pointer(bp + 688)))
goto abort_due_to_error
} else {
*(*Ti64)(unsafe.Pointer(pnErr)) -= int64(*(*int32)(unsafe.Pointer(bp + 680)) - int32(1))
_sqlite3VdbeMemSetStr(tls, pIn1, *(*uintptr)(unsafe.Pointer(bp + 688)), int64(-int32(1)), uint8(SQLITE_UTF8), __ccgo_fp(Xsqlite3_free))
}
}
_sqlite3VdbeChangeEncoding(tls, pIn1, int32(encoding))
goto check_for_interrupt
/* Opcode: RowSetAdd P1 P2 * * *
** Synopsis: rowset(P1)=r[P2]
**
** Insert the integer value held by register P2 into a RowSet object
** held in register P1.
**
** An assertion fails if P2 is not an integer.
*/
_139:
; /* in1, in2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Blob) == 0 {
if _sqlite3VdbeMemSetRowSet(tls, pIn1) != 0 {
goto no_mem
}
}
_sqlite3RowSetInsert(tls, (*TMem)(unsafe.Pointer(pIn1)).Fz, *(*Ti64)(unsafe.Pointer(pIn2)))
goto _187
/* Opcode: RowSetRead P1 P2 P3 * *
** Synopsis: r[P3]=rowset(P1)
**
** Extract the smallest value from the RowSet object in P1
** and put that value into register P3.
** Or, if RowSet object P1 is initially empty, leave P3
** unchanged and jump to instruction P2.
*/
_140:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Blob) == 0 || _sqlite3RowSetNext(tls, (*TMem)(unsafe.Pointer(pIn1)).Fz, bp+696) == 0 {
/* The boolean index is empty */
_sqlite3VdbeMemSetNull(tls, pIn1)
goto jump_to_p2_and_check_for_interrupt
} else {
/* A value was pulled from the index */
_sqlite3VdbeMemSetInt64(tls, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56, *(*Ti64)(unsafe.Pointer(bp + 696)))
}
goto check_for_interrupt
/* Opcode: RowSetTest P1 P2 P3 P4
** Synopsis: if r[P3] in rowset(P1) goto P2
**
** Register P3 is assumed to hold a 64-bit integer value. If register P1
** contains a RowSet object and that RowSet object contains
** the value held in P3, jump to register P2. Otherwise, insert the
** integer in P3 into the RowSet and continue on to the
** next opcode.
**
** The RowSet object is optimized for the case where sets of integers
** are inserted in distinct phases, which each set contains no duplicates.
** Each set is identified by a unique P4 value. The first set
** must have P4==0, the final set must have P4==-1, and for all other sets
** must have P4>0.
**
** This allows optimizations: (a) when P4==0 there is no need to test
** the RowSet object for P3, as it is guaranteed not to contain it,
** (b) when P4==-1 there is no need to insert the value, as it will
** never be tested for, and (c) when a value that is part of set X is
** inserted, there is no need to search to see if the same value was
** previously inserted as part of set X (only if it was previously
** inserted as part of some other set).
*/
_141:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
iSet = (*TOp)(unsafe.Pointer(pOp)).Fp4.Fi
/* If there is anything other than a rowset object in memory cell P1,
** delete it now and initialize P1 with an empty rowset
*/
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Blob) == 0 {
if _sqlite3VdbeMemSetRowSet(tls, pIn1) != 0 {
goto no_mem
}
}
if iSet != 0 {
exists = _sqlite3RowSetTest(tls, (*TMem)(unsafe.Pointer(pIn1)).Fz, iSet, *(*Ti64)(unsafe.Pointer(pIn3)))
if exists != 0 {
goto jump_to_p2
}
}
if iSet >= 0 {
_sqlite3RowSetInsert(tls, (*TMem)(unsafe.Pointer(pIn1)).Fz, *(*Ti64)(unsafe.Pointer(pIn3)))
}
goto _187
/* Opcode: Program P1 P2 P3 P4 P5
**
** Execute the trigger program passed as P4 (type P4_SUBPROGRAM).
**
** P1 contains the address of the memory cell that contains the first memory
** cell in an array of values used as arguments to the sub-program. P2
** contains the address to jump to if the sub-program throws an IGNORE
** exception using the RAISE() function. Register P3 contains the address
** of a memory cell in this (the parent) VM that is used to allocate the
** memory required by the sub-vdbe at runtime.
**
** P4 is a pointer to the VM containing the trigger program.
**
** If P5 is non-zero, then recursive program invocation is enabled.
*/
_142:
; /* Token identifying trigger */
pProgram = *(*uintptr)(unsafe.Pointer(pOp + 16))
pRt = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
/* If the p5 flag is clear, then recursive invocation of triggers is
** disabled for backwards compatibility (p5 is set if this sub-program
** is really a trigger, not a foreign key action, and the flag set
** and cleared by the "PRAGMA recursive_triggers" command is clear).
**
** It is recursive invocation of triggers, at the SQL level, that is
** disabled. In some cases a single trigger may generate more than one
** SubProgram (if the trigger may be executed with more than one different
** ON CONFLICT algorithm). SubProgram structures associated with a
** single trigger all have the same value for the SubProgram.token
** variable. */
if (*TOp)(unsafe.Pointer(pOp)).Fp5 != 0 {
t1 = (*TSubProgram)(unsafe.Pointer(pProgram)).Ftoken
pFrame2 = (*TVdbe)(unsafe.Pointer(p)).FpFrame
for {
if !(pFrame2 != 0 && (*TVdbeFrame)(unsafe.Pointer(pFrame2)).Ftoken != t1) {
break
}
goto _276
_276:
;
pFrame2 = (*TVdbeFrame)(unsafe.Pointer(pFrame2)).FpParent
}
if pFrame2 != 0 {
goto _187
}
}
if (*TVdbe)(unsafe.Pointer(p)).FnFrame >= *(*int32)(unsafe.Pointer(db + 136 + 10*4)) {
rc = int32(SQLITE_ERROR)
_sqlite3VdbeError(tls, p, __ccgo_ts+6125, 0)
goto abort_due_to_error
}
/* Register pRt is used to store the memory required to save the state
** of the current program, and the memory required at runtime to execute
** the trigger program. If this trigger has been fired before, then pRt
** is already allocated. Otherwise, it must be initialized. */
if int32((*TMem)(unsafe.Pointer(pRt)).Fflags)&int32(MEM_Blob) == 0 {
/* SubProgram.nMem is set to the number of memory cells used by the
** program stored in SubProgram.aOp. As well as these, one memory
** cell is required for each cursor used by the program. Set local
** variable nMem (and later, VdbeFrame.nChildMem) to this value.
*/
nMem = (*TSubProgram)(unsafe.Pointer(pProgram)).FnMem + (*TSubProgram)(unsafe.Pointer(pProgram)).FnCsr
if (*TSubProgram)(unsafe.Pointer(pProgram)).FnCsr == 0 {
nMem++
}
nByte2 = int32((libc.Uint64FromInt64(112)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)) + uint64(nMem)*uint64(56) + uint64((*TSubProgram)(unsafe.Pointer(pProgram)).FnCsr)*uint64(8) + uint64(((*TSubProgram)(unsafe.Pointer(pProgram)).FnOp+int32(7))/int32(8)))
pFrame2 = _sqlite3DbMallocZero(tls, db, uint64(nByte2))
if !(pFrame2 != 0) {
goto no_mem
}
_sqlite3VdbeMemRelease(tls, pRt)
(*TMem)(unsafe.Pointer(pRt)).Fflags = uint16(libc.Int32FromInt32(MEM_Blob) | libc.Int32FromInt32(MEM_Dyn))
(*TMem)(unsafe.Pointer(pRt)).Fz = pFrame2
(*TMem)(unsafe.Pointer(pRt)).Fn = nByte2
(*TMem)(unsafe.Pointer(pRt)).FxDel = __ccgo_fp(_sqlite3VdbeFrameMemDel)
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).Fv = p
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChildMem = nMem
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChildCsr = (*TSubProgram)(unsafe.Pointer(pProgram)).FnCsr
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).Fpc = int32((int64(pOp) - int64(aOp)) / 24)
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FaMem = (*TVdbe)(unsafe.Pointer(p)).FaMem
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnMem = (*TVdbe)(unsafe.Pointer(p)).FnMem
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FapCsr = (*TVdbe)(unsafe.Pointer(p)).FapCsr
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnCursor = (*TVdbe)(unsafe.Pointer(p)).FnCursor
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FaOp = (*TVdbe)(unsafe.Pointer(p)).FaOp
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnOp = (*TVdbe)(unsafe.Pointer(p)).FnOp
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).Ftoken = (*TSubProgram)(unsafe.Pointer(pProgram)).Ftoken
pEnd = pFrame2 + uintptr((libc.Uint64FromInt64(112)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7))) + uintptr((*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChildMem)*56
pMem1 = pFrame2 + uintptr((libc.Uint64FromInt64(112)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
for {
if !(pMem1 != pEnd) {
break
}
(*TMem)(unsafe.Pointer(pMem1)).Fflags = uint16(MEM_Undefined)
(*TMem)(unsafe.Pointer(pMem1)).Fdb = db
goto _277
_277:
;
pMem1 += 56
}
} else {
pFrame2 = (*TMem)(unsafe.Pointer(pRt)).Fz
}
(*TVdbe)(unsafe.Pointer(p)).FnFrame++
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FpParent = (*TVdbe)(unsafe.Pointer(p)).FpFrame
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FlastRowid = (*Tsqlite3)(unsafe.Pointer(db)).FlastRowid
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChange = (*TVdbe)(unsafe.Pointer(p)).FnChange
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnDbChange = (*Tsqlite3)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).Fdb)).FnChange
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FpAuxData = (*TVdbe)(unsafe.Pointer(p)).FpAuxData
(*TVdbe)(unsafe.Pointer(p)).FpAuxData = uintptr(0)
(*TVdbe)(unsafe.Pointer(p)).FnChange = 0
(*TVdbe)(unsafe.Pointer(p)).FpFrame = pFrame2
v278 = pFrame2 + uintptr((libc.Uint64FromInt64(112)+libc.Uint64FromInt32(7))&uint64(^libc.Int32FromInt32(7)))
aMem = v278
(*TVdbe)(unsafe.Pointer(p)).FaMem = v278
(*TVdbe)(unsafe.Pointer(p)).FnMem = (*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChildMem
(*TVdbe)(unsafe.Pointer(p)).FnCursor = int32(uint16((*TVdbeFrame)(unsafe.Pointer(pFrame2)).FnChildCsr))
(*TVdbe)(unsafe.Pointer(p)).FapCsr = aMem + uintptr((*TVdbe)(unsafe.Pointer(p)).FnMem)*56
(*TVdbeFrame)(unsafe.Pointer(pFrame2)).FaOnce = (*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TSubProgram)(unsafe.Pointer(pProgram)).FnCsr)*8
libc.Xmemset(tls, (*TVdbeFrame)(unsafe.Pointer(pFrame2)).FaOnce, 0, uint64(((*TSubProgram)(unsafe.Pointer(pProgram)).FnOp+int32(7))/int32(8)))
v279 = (*TSubProgram)(unsafe.Pointer(pProgram)).FaOp
aOp = v279
(*TVdbe)(unsafe.Pointer(p)).FaOp = v279
(*TVdbe)(unsafe.Pointer(p)).FnOp = (*TSubProgram)(unsafe.Pointer(pProgram)).FnOp
pOp = aOp + uintptr(-libc.Int32FromInt32(1))*24
goto check_for_interrupt
/* Opcode: Param P1 P2 * * *
**
** This opcode is only ever present in sub-programs called via the
** OP_Program instruction. Copy a value currently stored in a memory
** cell of the calling (parent) frame to cell P2 in the current frames
** address space. This is used by trigger programs to access the new.*
** and old.* values.
**
** The address of the cell in the parent frame is determined by adding
** the value of the P1 argument to the value of the P1 argument to the
** calling OP_Program instruction.
*/
_143:
;
pOut = _out2Prerelease(tls, p, pOp)
pFrame3 = (*TVdbe)(unsafe.Pointer(p)).FpFrame
pIn = (*TVdbeFrame)(unsafe.Pointer(pFrame3)).FaMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1+(*(*TOp)(unsafe.Pointer((*TVdbeFrame)(unsafe.Pointer(pFrame3)).FaOp + uintptr((*TVdbeFrame)(unsafe.Pointer(pFrame3)).Fpc)*24))).Fp1)*56
_sqlite3VdbeMemShallowCopy(tls, pOut, pIn, int32(MEM_Ephem))
goto _187
/* Opcode: FkCounter P1 P2 * * *
** Synopsis: fkctr[P1]+=P2
**
** Increment a "constraint counter" by P2 (P2 may be negative or positive).
** If P1 is non-zero, the database constraint counter is incremented
** (deferred foreign key constraints). Otherwise, if P1 is zero, the
** statement counter is incremented (immediate foreign key constraints).
*/
_144:
;
if (*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_DeferFKs) != 0 {
*(*Ti64)(unsafe.Pointer(db + 792)) += int64((*TOp)(unsafe.Pointer(pOp)).Fp2)
} else {
if (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
*(*Ti64)(unsafe.Pointer(db + 784)) += int64((*TOp)(unsafe.Pointer(pOp)).Fp2)
} else {
*(*Ti64)(unsafe.Pointer(p + 80)) += int64((*TOp)(unsafe.Pointer(pOp)).Fp2)
}
}
goto _187
/* Opcode: FkIfZero P1 P2 * * *
** Synopsis: if fkctr[P1]==0 goto P2
**
** This opcode tests if a foreign key constraint-counter is currently zero.
** If so, jump to instruction P2. Otherwise, fall through to the next
** instruction.
**
** If P1 is non-zero, then the jump is taken if the database constraint-counter
** is zero (the one that counts deferred constraint violations). If P1 is
** zero, the jump is taken if the statement constraint-counter is zero
** (immediate foreign key constraint violations).
*/
_145:
; /* jump */
if (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
if (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredCons == 0 && (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons == 0 {
goto jump_to_p2
}
} else {
if (*TVdbe)(unsafe.Pointer(p)).FnFkConstraint == 0 && (*Tsqlite3)(unsafe.Pointer(db)).FnDeferredImmCons == 0 {
goto jump_to_p2
}
}
goto _187
/* Opcode: MemMax P1 P2 * * *
** Synopsis: r[P1]=max(r[P1],r[P2])
**
** P1 is a register in the root frame of this VM (the root frame is
** different from the current frame if this instruction is being executed
** within a sub-program). Set the value of register P1 to the maximum of
** its current value and the value in register P2.
**
** This instruction throws an error if the memory cell is not initially
** an integer.
*/
_146:
;
if (*TVdbe)(unsafe.Pointer(p)).FpFrame != 0 {
pFrame4 = (*TVdbe)(unsafe.Pointer(p)).FpFrame
for {
if !((*TVdbeFrame)(unsafe.Pointer(pFrame4)).FpParent != 0) {
break
}
goto _280
_280:
;
pFrame4 = (*TVdbeFrame)(unsafe.Pointer(pFrame4)).FpParent
}
pIn1 = (*TVdbeFrame)(unsafe.Pointer(pFrame4)).FaMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
} else {
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
}
_sqlite3VdbeMemIntegerify(tls, pIn1)
pIn2 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
_sqlite3VdbeMemIntegerify(tls, pIn2)
if *(*Ti64)(unsafe.Pointer(pIn1)) < *(*Ti64)(unsafe.Pointer(pIn2)) {
*(*Ti64)(unsafe.Pointer(pIn1)) = *(*Ti64)(unsafe.Pointer(pIn2))
}
goto _187
/* Opcode: IfPos P1 P2 P3 * *
** Synopsis: if r[P1]>0 then r[P1]-=P3, goto P2
**
** Register P1 must contain an integer.
** If the value of register P1 is 1 or greater, subtract P3 from the
** value in P1 and jump to P2.
**
** If the initial value of register P1 is less than 1, then the
** value is unchanged and control passes through to the next instruction.
*/
_147:
; /* jump, in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if *(*Ti64)(unsafe.Pointer(pIn1)) > 0 {
*(*Ti64)(unsafe.Pointer(pIn1)) -= int64((*TOp)(unsafe.Pointer(pOp)).Fp3)
goto jump_to_p2
}
goto _187
/* Opcode: OffsetLimit P1 P2 P3 * *
** Synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)
**
** This opcode performs a commonly used computation associated with
** LIMIT and OFFSET processing. r[P1] holds the limit counter. r[P3]
** holds the offset counter. The opcode computes the combined value
** of the LIMIT and OFFSET and stores that value in r[P2]. The r[P2]
** value computed is the total number of rows that will need to be
** visited in order to complete the query.
**
** If r[P3] is zero or negative, that means there is no OFFSET
** and r[P2] is set to be the value of the LIMIT, r[P1].
**
** if r[P1] is zero or negative, that means there is no LIMIT
** and r[P2] is set to -1.
**
** Otherwise, r[P2] is set to the sum of r[P1] and r[P3].
*/
_148:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pIn3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
pOut = _out2Prerelease(tls, p, pOp)
*(*Ti64)(unsafe.Pointer(bp + 704)) = *(*Ti64)(unsafe.Pointer(pIn1))
if v282 = *(*Ti64)(unsafe.Pointer(bp + 704)) <= 0; !v282 {
if *(*Ti64)(unsafe.Pointer(pIn3)) > 0 {
v281 = *(*Ti64)(unsafe.Pointer(pIn3))
} else {
v281 = 0
}
}
if v282 || _sqlite3AddInt64(tls, bp+704, v281) != 0 {
/* If the LIMIT is less than or equal to zero, loop forever. This
** is documented. But also, if the LIMIT+OFFSET exceeds 2^63 then
** also loop forever. This is undocumented. In fact, one could argue
** that the loop should terminate. But assuming 1 billion iterations
** per second (far exceeding the capabilities of any current hardware)
** it would take nearly 300 years to actually reach the limit. So
** looping forever is a reasonable approximation. */
*(*Ti64)(unsafe.Pointer(pOut)) = int64(-int32(1))
} else {
*(*Ti64)(unsafe.Pointer(pOut)) = *(*Ti64)(unsafe.Pointer(bp + 704))
}
goto _187
/* Opcode: IfNotZero P1 P2 * * *
** Synopsis: if r[P1]!=0 then r[P1]--, goto P2
**
** Register P1 must contain an integer. If the content of register P1 is
** initially greater than zero, then decrement the value in register P1.
** If it is non-zero (negative or positive) and then also jump to P2.
** If register P1 is initially zero, leave it unchanged and fall through.
*/
_149:
; /* jump, in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if *(*Ti64)(unsafe.Pointer(pIn1)) != 0 {
if *(*Ti64)(unsafe.Pointer(pIn1)) > 0 {
*(*Ti64)(unsafe.Pointer(pIn1))--
}
goto jump_to_p2
}
goto _187
/* Opcode: DecrJumpZero P1 P2 * * *
** Synopsis: if (--r[P1])==0 goto P2
**
** Register P1 must hold an integer. Decrement the value in P1
** and jump to P2 if the new value is exactly zero.
*/
_150:
; /* jump, in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if *(*Ti64)(unsafe.Pointer(pIn1)) > int64(-libc.Int32FromInt32(1))-(libc.Int64FromUint32(0xffffffff)|libc.Int64FromInt32(0x7fffffff)<= 0) {
break
}
*(*uintptr)(unsafe.Pointer(pCtx1 + 48 + uintptr(i4)*8)) = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2+i4)*56
goto _283
_283:
;
i4--
}
}
(*TMem)(unsafe.Pointer(pMem2)).Fn++
if (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FpFunc)).FxInverse})))(tls, pCtx1, int32((*Tsqlite3_context)(unsafe.Pointer(pCtx1)).Fargc), pCtx1+48)
} else {
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FpFunc)).FxSFunc})))(tls, pCtx1, int32((*Tsqlite3_context)(unsafe.Pointer(pCtx1)).Fargc), pCtx1+48)
} /* IMP: R-24505-23230 */
if (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FisError != 0 {
if (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FisError > 0 {
_sqlite3VdbeError(tls, p, __ccgo_ts+3797, libc.VaList(bp+944, Xsqlite3_value_text(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FpOut)))
rc = (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FisError
}
if (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FskipFlag != 0 {
i4 = (*(*TOp)(unsafe.Pointer(pOp + uintptr(-libc.Int32FromInt32(1))*24))).Fp1
if i4 != 0 {
_sqlite3VdbeMemSetInt64(tls, aMem+uintptr(i4)*56, int64(1))
}
(*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FskipFlag = uint8(0)
}
_sqlite3VdbeMemRelease(tls, (*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FpOut)
(*TMem)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FpOut)).Fflags = uint16(MEM_Null)
(*Tsqlite3_context)(unsafe.Pointer(pCtx1)).FisError = 0
if rc != 0 {
goto abort_due_to_error
}
}
goto _187
/* Opcode: AggFinal P1 P2 * P4 *
** Synopsis: accum=r[P1] N=P2
**
** P1 is the memory location that is the accumulator for an aggregate
** or window function. Execute the finalizer function
** for an aggregate and store the result in P1.
**
** P2 is the number of arguments that the step function takes and
** P4 is a pointer to the FuncDef for this function. The P2
** argument is not used by this opcode. It is only there to disambiguate
** functions that can take varying numbers of arguments. The
** P4 argument is only needed for the case where
** the step function was not previously called.
*/
/* Opcode: AggValue * P2 P3 P4 *
** Synopsis: r[P3]=value N=P2
**
** Invoke the xValue() function and store the result in register P3.
**
** P2 is the number of arguments that the step function takes and
** P4 is a pointer to the FuncDef for this function. The P2
** argument is not used by this opcode. It is only there to disambiguate
** functions that can take varying numbers of arguments. The
** P4 argument is only needed for the case where
** the step function was not previously called.
*/
_155:
;
_154:
;
pMem3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
rc = _sqlite3VdbeMemAggValue(tls, pMem3, aMem+uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56, *(*uintptr)(unsafe.Pointer(pOp + 16)))
pMem3 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
} else {
rc = _sqlite3VdbeMemFinalize(tls, pMem3, *(*uintptr)(unsafe.Pointer(pOp + 16)))
}
if rc != 0 {
_sqlite3VdbeError(tls, p, __ccgo_ts+3797, libc.VaList(bp+944, Xsqlite3_value_text(tls, pMem3)))
goto abort_due_to_error
}
_sqlite3VdbeChangeEncoding(tls, pMem3, int32(encoding))
goto _187
/* Opcode: Checkpoint P1 P2 P3 * *
**
** Checkpoint database P1. This is a no-op if P1 is not currently in
** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL,
** RESTART, or TRUNCATE. Write 1 or 0 into mem[P3] if the checkpoint returns
** SQLITE_BUSY or not, respectively. Write the number of pages in the
** WAL after the checkpoint into mem[P3+1] and the number of pages
** in the WAL that have been checkpointed after the checkpoint
** completes into mem[P3+2]. However on an error, mem[P3+1] and
** mem[P3+2] are initialized to -1.
*/
_156:
; /* Write results here */
(*(*[3]int32)(unsafe.Pointer(bp + 712)))[0] = 0
v284 = -libc.Int32FromInt32(1)
(*(*[3]int32)(unsafe.Pointer(bp + 712)))[int32(2)] = v284
(*(*[3]int32)(unsafe.Pointer(bp + 712)))[int32(1)] = v284
rc = _sqlite3Checkpoint(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp1, (*TOp)(unsafe.Pointer(pOp)).Fp2, bp+712+1*4, bp+712+2*4)
if rc != 0 {
if rc != int32(SQLITE_BUSY) {
goto abort_due_to_error
}
rc = SQLITE_OK
(*(*[3]int32)(unsafe.Pointer(bp + 712)))[0] = int32(1)
}
i5 = 0
pMem4 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
for {
if !(i5 < int32(3)) {
break
}
_sqlite3VdbeMemSetInt64(tls, pMem4, int64((*(*[3]int32)(unsafe.Pointer(bp + 712)))[i5]))
goto _285
_285:
;
i5++
pMem4 += 56
}
goto _187
/* Opcode: JournalMode P1 P2 P3 * *
**
** Change the journal mode of database P1 to P3. P3 must be one of the
** PAGER_JOURNALMODE_XXX values. If changing between the various rollback
** modes (delete, truncate, persist, off and memory), this is a simple
** operation. No IO is required.
**
** If changing into or out of WAL mode the procedure is more complicated.
**
** Write a string containing the final journal-mode to register P2.
*/
_157:
; /* Name of database file for pPager */
pOut = _out2Prerelease(tls, p, pOp)
eNew = (*TOp)(unsafe.Pointer(pOp)).Fp3
pBt1 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32))).FpBt
pPager = _sqlite3BtreePager(tls, pBt1)
eOld = _sqlite3PagerGetJournalMode(tls, pPager)
if eNew == -int32(1) {
eNew = eOld
}
if !(_sqlite3PagerOkToChangeJournalMode(tls, pPager) != 0) {
eNew = eOld
}
zFilename = _sqlite3PagerFilename(tls, pPager, int32(1))
/* Do not allow a transition to journal_mode=WAL for a database
** in temporary storage or if the VFS does not support shared memory
*/
if eNew == int32(PAGER_JOURNALMODE_WAL) && (_sqlite3Strlen30(tls, zFilename) == 0 || !(_sqlite3PagerWalSupported(tls, pPager) != 0)) {
eNew = eOld
}
if eNew != eOld && (eOld == int32(PAGER_JOURNALMODE_WAL) || eNew == int32(PAGER_JOURNALMODE_WAL)) {
if !((*Tsqlite3)(unsafe.Pointer(db)).FautoCommit != 0) || (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeRead > int32(1) {
rc = int32(SQLITE_ERROR)
if eNew == int32(PAGER_JOURNALMODE_WAL) {
v286 = __ccgo_ts + 6162
} else {
v286 = __ccgo_ts + 6167
}
_sqlite3VdbeError(tls, p, __ccgo_ts+6174, libc.VaList(bp+944, v286))
goto abort_due_to_error
} else {
if eOld == int32(PAGER_JOURNALMODE_WAL) {
/* If leaving WAL mode, close the log file. If successful, the call
** to PagerCloseWal() checkpoints and deletes the write-ahead-log
** file. An EXCLUSIVE lock may still be held on the database file
** after a successful return.
*/
rc = _sqlite3PagerCloseWal(tls, pPager, db)
if rc == SQLITE_OK {
_sqlite3PagerSetJournalMode(tls, pPager, eNew)
}
} else {
if eOld == int32(PAGER_JOURNALMODE_MEMORY) {
/* Cannot transition directly from MEMORY to WAL. Use mode OFF
** as an intermediate */
_sqlite3PagerSetJournalMode(tls, pPager, int32(PAGER_JOURNALMODE_OFF))
}
}
/* Open a transaction on the database file. Regardless of the journal
** mode, this transaction always uses a rollback journal.
*/
if rc == SQLITE_OK {
if eNew == int32(PAGER_JOURNALMODE_WAL) {
v287 = int32(2)
} else {
v287 = int32(1)
}
rc = _sqlite3BtreeSetVersion(tls, pBt1, v287)
}
}
}
if rc != 0 {
eNew = eOld
}
eNew = _sqlite3PagerSetJournalMode(tls, pPager, eNew)
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(libc.Int32FromInt32(MEM_Str) | libc.Int32FromInt32(MEM_Static) | libc.Int32FromInt32(MEM_Term))
(*TMem)(unsafe.Pointer(pOut)).Fz = _sqlite3JournalModename(tls, eNew)
(*TMem)(unsafe.Pointer(pOut)).Fn = _sqlite3Strlen30(tls, (*TMem)(unsafe.Pointer(pOut)).Fz)
(*TMem)(unsafe.Pointer(pOut)).Fenc = uint8(SQLITE_UTF8)
_sqlite3VdbeChangeEncoding(tls, pOut, int32(encoding))
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: Vacuum P1 P2 * * *
**
** Vacuum the entire database P1. P1 is 0 for "main", and 2 or more
** for an attached database. The "temp" database may not be vacuumed.
**
** If P2 is not zero, then it is a register holding a string which is
** the file into which the result of vacuum should be written. When
** P2 is zero, the vacuum overwrites the original database.
*/
_158:
;
if (*TOp)(unsafe.Pointer(pOp)).Fp2 != 0 {
v288 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
} else {
v288 = uintptr(0)
}
rc = _sqlite3RunVacuum(tls, p+168, db, (*TOp)(unsafe.Pointer(pOp)).Fp1, v288)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: IncrVacuum P1 P2 * * *
**
** Perform a single step of the incremental vacuum procedure on
** the P1 database. If the vacuum has finished, jump to instruction
** P2. Otherwise, fall through to the next instruction.
*/
_159:
;
pBt2 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32))).FpBt
rc = _sqlite3BtreeIncrVacuum(tls, pBt2)
if rc != 0 {
if rc != int32(SQLITE_DONE) {
goto abort_due_to_error
}
rc = SQLITE_OK
goto jump_to_p2
}
goto _187
/* Opcode: Expire P1 P2 * * *
**
** Cause precompiled statements to expire. When an expired statement
** is executed using sqlite3_step() it will either automatically
** reprepare itself (if it was originally created using sqlite3_prepare_v2())
** or it will fail with SQLITE_SCHEMA.
**
** If P1 is 0, then all SQL statements become expired. If P1 is non-zero,
** then only the currently executing statement is expired.
**
** If P2 is 0, then SQL statements are expired immediately. If P2 is 1,
** then running SQL statements are allowed to continue to run to completion.
** The P2==1 case occurs when a CREATE INDEX or similar schema change happens
** that might help the statement run faster but which does not affect the
** correctness of operation.
*/
_160:
;
if !((*TOp)(unsafe.Pointer(pOp)).Fp1 != 0) {
_sqlite3ExpirePreparedStatements(tls, db, (*TOp)(unsafe.Pointer(pOp)).Fp2)
} else {
libc.SetBitFieldPtr16Uint32(p+200, uint32((*TOp)(unsafe.Pointer(pOp)).Fp2+libc.Int32FromInt32(1)), 0, 0x3)
}
goto _187
/* Opcode: CursorLock P1 * * * *
**
** Lock the btree to which cursor P1 is pointing so that the btree cannot be
** written by an other cursor.
*/
_161:
;
pC30 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
_sqlite3BtreeCursorPin(tls, *(*uintptr)(unsafe.Pointer(pC30 + 48)))
goto _187
/* Opcode: CursorUnlock P1 * * * *
**
** Unlock the btree to which cursor P1 is pointing so that it can be
** written by other cursors.
*/
_162:
;
pC31 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
_sqlite3BtreeCursorUnpin(tls, *(*uintptr)(unsafe.Pointer(pC31 + 48)))
goto _187
/* Opcode: TableLock P1 P2 P3 P4 *
** Synopsis: iDb=P1 root=P2 write=P3
**
** Obtain a lock on a particular table. This instruction is only used when
** the shared-cache feature is enabled.
**
** P1 is the index of the database in sqlite3.aDb[] of the database
** on which the lock is acquired. A readlock is obtained if P3==0 or
** a write lock if P3==1.
**
** P2 contains the root-page of the table to lock.
**
** P4 contains a pointer to the name of the table being locked. This is only
** used to generate an error message if the lock cannot be obtained.
*/
_163:
;
isWriteLock = uint8((*TOp)(unsafe.Pointer(pOp)).Fp3)
if isWriteLock != 0 || uint64(0) == (*Tsqlite3)(unsafe.Pointer(db)).Fflags&(uint64(libc.Int32FromInt32(0x00004))< 0 {
_sqlite3VdbeError(tls, p, __ccgo_ts+3797, libc.VaList(bp+944, Xsqlite3_value_text(tls, pDest2)))
rc = (*(*Tsqlite3_context)(unsafe.Pointer(bp + 800))).FisError
}
_sqlite3VdbeChangeEncoding(tls, pDest2, int32(encoding))
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: VNext P1 P2 * * *
**
** Advance virtual table P1 to the next row in its result set and
** jump to instruction P2. Or, if the virtual table has reached
** the end of its result set, then fall through to the next instruction.
*/
_172:
;
pCur6 = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FapCsr + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*8))
if (*TVdbeCursor)(unsafe.Pointer(pCur6)).FnullRow != 0 {
goto _187
}
pVtab5 = (*Tsqlite3_vtab_cursor)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pCur6 + 48)))).FpVtab
pModule5 = (*Tsqlite3_vtab)(unsafe.Pointer(pVtab5)).FpModule
/* Invoke the xNext() method of the module. There is no way for the
** underlying implementation to return an error if one occurs during
** xNext(). Instead, if an error occurs, true is returned (indicating that
** data is available) and the error code returned when xColumn or
** some other method is next invoked on the save virtual table cursor.
*/
rc = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer(pModule5)).FxNext})))(tls, *(*uintptr)(unsafe.Pointer(pCur6 + 48)))
_sqlite3VtabImportErrmsg(tls, p, pVtab5)
if rc != 0 {
goto abort_due_to_error
}
res12 = (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer(pModule5)).FxEof})))(tls, *(*uintptr)(unsafe.Pointer(pCur6 + 48)))
if !(res12 != 0) {
/* If there is data, jump to P2 */
goto jump_to_p2_and_check_for_interrupt
}
goto check_for_interrupt
/* Opcode: VRename P1 * * P4 *
**
** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
** This opcode invokes the corresponding xRename method. The value
** in register P1 is passed as the zName argument to the xRename method.
*/
_173:
;
isLegacy = int32((*Tsqlite3)(unsafe.Pointer(db)).Fflags & libc.Uint64FromInt32(SQLITE_LegacyAlter))
*(*Tu64)(unsafe.Pointer(db + 48)) |= uint64(SQLITE_LegacyAlter)
pVtab6 = (*TVTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FpVtab
pName = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
rc = _sqlite3VdbeChangeEncoding(tls, pName, int32(SQLITE_UTF8))
if rc != 0 {
goto abort_due_to_error
}
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer((*Tsqlite3_vtab)(unsafe.Pointer(pVtab6)).FpModule)).FxRename})))(tls, pVtab6, (*TMem)(unsafe.Pointer(pName)).Fz)
if isLegacy == 0 {
*(*Tu64)(unsafe.Pointer(db + 48)) &= ^libc.Uint64FromInt32(SQLITE_LegacyAlter)
}
_sqlite3VtabImportErrmsg(tls, p, pVtab6)
libc.SetBitFieldPtr16Uint32(p+200, libc.Uint32FromInt32(0), 0, 0x3)
if rc != 0 {
goto abort_due_to_error
}
goto _187
/* Opcode: VUpdate P1 P2 P3 P4 P5
** Synopsis: data=r[P3@P2]
**
** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.
** This opcode invokes the corresponding xUpdate method. P2 values
** are contiguous memory cells starting at P3 to pass to the xUpdate
** invocation. The value in register (P3+P2-1) corresponds to the
** p2th element of the argv array passed to xUpdate.
**
** The xUpdate method will do a DELETE or an INSERT or both.
** The argv[0] element (which corresponds to memory cell P3)
** is the rowid of a row to delete. If argv[0] is NULL then no
** deletion occurs. The argv[1] element is the rowid of the new
** row. This can be NULL to have the virtual table select the new
** rowid for itself. The subsequent elements in the array are
** the values of columns in the new row.
**
** If P2==1 then no insert is performed. argv[0] is the rowid of
** a row to delete.
**
** P1 is a boolean flag. If it is set to true and the xUpdate call
** is successful, then the value returned by sqlite3_last_insert_rowid()
** is set to the value of the rowid for the row just inserted.
**
** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to
** apply in the case of a constraint failure on an insert or update.
*/
_174:
;
*(*Tsqlite_int64)(unsafe.Pointer(bp + 928)) = 0
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
goto no_mem
}
pVtab7 = (*TVTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FpVtab
if pVtab7 == uintptr(0) || (*Tsqlite3_vtab)(unsafe.Pointer(pVtab7)).FpModule == uintptr(0) {
rc = int32(SQLITE_LOCKED)
goto abort_due_to_error
}
pModule6 = (*Tsqlite3_vtab)(unsafe.Pointer(pVtab7)).FpModule
nArg1 = (*TOp)(unsafe.Pointer(pOp)).Fp2
if (*Tsqlite3_module)(unsafe.Pointer(pModule6)).FxUpdate != 0 {
vtabOnConflict = (*Tsqlite3)(unsafe.Pointer(db)).FvtabOnConflict
apArg1 = (*TVdbe)(unsafe.Pointer(p)).FapArg
pX1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
i7 = 0
for {
if !(i7 < nArg1) {
break
}
*(*uintptr)(unsafe.Pointer(apArg1 + uintptr(i7)*8)) = pX1
pX1 += 56
goto _290
_290:
;
i7++
}
(*Tsqlite3)(unsafe.Pointer(db)).FvtabOnConflict = uint8((*TOp)(unsafe.Pointer(pOp)).Fp5)
rc = (*(*func(*libc.TLS, uintptr, int32, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3_module)(unsafe.Pointer(pModule6)).FxUpdate})))(tls, pVtab7, nArg1, apArg1, bp+928)
(*Tsqlite3)(unsafe.Pointer(db)).FvtabOnConflict = vtabOnConflict
_sqlite3VtabImportErrmsg(tls, p, pVtab7)
if rc == SQLITE_OK && (*TOp)(unsafe.Pointer(pOp)).Fp1 != 0 {
(*Tsqlite3)(unsafe.Pointer(db)).FlastRowid = *(*Tsqlite_int64)(unsafe.Pointer(bp + 928))
}
if rc&int32(0xff) == int32(SQLITE_CONSTRAINT) && (*TVTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pOp + 16)))).FbConstraint != 0 {
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5) == int32(OE_Ignore) {
rc = SQLITE_OK
} else {
if int32((*TOp)(unsafe.Pointer(pOp)).Fp5) == int32(OE_Replace) {
v291 = int32(OE_Abort)
} else {
v291 = int32((*TOp)(unsafe.Pointer(pOp)).Fp5)
}
(*TVdbe)(unsafe.Pointer(p)).FerrorAction = uint8(v291)
}
} else {
(*TVdbe)(unsafe.Pointer(p)).FnChange++
}
if rc != 0 {
goto abort_due_to_error
}
}
goto _187
/* Opcode: Pagecount P1 P2 * * *
**
** Write the current number of pages in database P1 to memory cell P2.
*/
_175:
; /* out2 */
pOut = _out2Prerelease(tls, p, pOp)
*(*Ti64)(unsafe.Pointer(pOut)) = int64(_sqlite3BtreeLastPage(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32))).FpBt))
goto _187
/* Opcode: MaxPgcnt P1 P2 P3 * *
**
** Try to set the maximum page count for database P1 to the value in P3.
** Do not let the maximum page count fall below the current page count and
** do not change the maximum page count value if P3==0.
**
** Store the maximum page count after the change in register P2.
*/
_176:
;
pOut = _out2Prerelease(tls, p, pOp)
pBt3 = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*32))).FpBt
newMax = uint32(0)
if (*TOp)(unsafe.Pointer(pOp)).Fp3 != 0 {
newMax = _sqlite3BtreeLastPage(tls, pBt3)
if newMax < uint32((*TOp)(unsafe.Pointer(pOp)).Fp3) {
newMax = uint32((*TOp)(unsafe.Pointer(pOp)).Fp3)
}
}
*(*Ti64)(unsafe.Pointer(pOut)) = int64(_sqlite3BtreeMaxPageCount(tls, pBt3, newMax))
goto _187
/* Opcode: Function P1 P2 P3 P4 *
** Synopsis: r[P3]=func(r[P2@NP])
**
** Invoke a user function (P4 is a pointer to an sqlite3_context object that
** contains a pointer to the function to be run) with arguments taken
** from register P2 and successors. The number of arguments is in
** the sqlite3_context object that P4 points to.
** The result of the function is stored
** in register P3. Register P3 must not be one of the function inputs.
**
** P1 is a 32-bit bitmask indicating whether or not each argument to the
** function was determined to be constant at compile time. If the first
** argument was constant then bit 0 of P1 is set. This is used to determine
** whether meta data associated with a user function argument using the
** sqlite3_set_auxdata() API may be safely retained until the next
** invocation of this opcode.
**
** See also: AggStep, AggFinal, PureFunc
*/
/* Opcode: PureFunc P1 P2 P3 P4 *
** Synopsis: r[P3]=func(r[P2@NP])
**
** Invoke a user function (P4 is a pointer to an sqlite3_context object that
** contains a pointer to the function to be run) with arguments taken
** from register P2 and successors. The number of arguments is in
** the sqlite3_context object that P4 points to.
** The result of the function is stored
** in register P3. Register P3 must not be one of the function inputs.
**
** P1 is a 32-bit bitmask indicating whether or not each argument to the
** function was determined to be constant at compile time. If the first
** argument was constant then bit 0 of P1 is set. This is used to determine
** whether meta data associated with a user function argument using the
** sqlite3_set_auxdata() API may be safely retained until the next
** invocation of this opcode.
**
** This opcode works exactly like OP_Function. The only difference is in
** its name. This opcode is used in places where the function must be
** purely non-deterministic. Some built-in date/time functions can be
** either deterministic of non-deterministic, depending on their arguments.
** When those function are used in a non-deterministic way, they will check
** to see if they were called using OP_PureFunc instead of OP_Function, and
** if they were, they throw an error.
**
** See also: AggStep, AggFinal, Function
*/
_178:
; /* group */
_177:
;
pCtx2 = *(*uintptr)(unsafe.Pointer(pOp + 16))
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
** reinitializes the relevant parts of the sqlite3_context object */
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp3)*56
if (*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FpOut != pOut {
(*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FpVdbe = p
(*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FpOut = pOut
(*Tsqlite3_context)(unsafe.Pointer(pCtx2)).Fenc = encoding
i8 = int32((*Tsqlite3_context)(unsafe.Pointer(pCtx2)).Fargc) - int32(1)
for {
if !(i8 >= 0) {
break
}
*(*uintptr)(unsafe.Pointer(pCtx2 + 48 + uintptr(i8)*8)) = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2+i8)*56
goto _292
_292:
;
i8--
}
}
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Null))
(*(*func(*libc.TLS, uintptr, int32, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TFuncDef)(unsafe.Pointer((*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FpFunc)).FxSFunc})))(tls, pCtx2, int32((*Tsqlite3_context)(unsafe.Pointer(pCtx2)).Fargc), pCtx2+48) /* IMP: R-24505-23230 */
/* If the function returned an error, throw an exception */
if (*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FisError != 0 {
if (*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FisError > 0 {
_sqlite3VdbeError(tls, p, __ccgo_ts+3797, libc.VaList(bp+944, Xsqlite3_value_text(tls, pOut)))
rc = (*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FisError
}
_sqlite3VdbeDeleteAuxData(tls, db, p+296, (*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FiOp, (*TOp)(unsafe.Pointer(pOp)).Fp1)
(*Tsqlite3_context)(unsafe.Pointer(pCtx2)).FisError = 0
if rc != 0 {
goto abort_due_to_error
}
}
goto _187
/* Opcode: ClrSubtype P1 * * * *
** Synopsis: r[P1].subtype = 0
**
** Clear the subtype from register P1.
*/
_179:
; /* in1 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
p293 = pIn1 + 20
*(*Tu16)(unsafe.Pointer(p293)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p293))) & ^libc.Int32FromInt32(MEM_Subtype))
goto _187
/* Opcode: GetSubtype P1 P2 * * *
** Synopsis: r[P2] = r[P1].subtype
**
** Extract the subtype value from register P1 and write that subtype
** into register P2. If P1 has no subtype, then P1 gets a NULL.
*/
_180:
; /* in1 out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Subtype) != 0 {
_sqlite3VdbeMemSetInt64(tls, pOut, int64((*TMem)(unsafe.Pointer(pIn1)).FeSubtype))
} else {
_sqlite3VdbeMemSetNull(tls, pOut)
}
goto _187
/* Opcode: SetSubtype P1 P2 * * *
** Synopsis: r[P2].subtype = r[P1]
**
** Set the subtype value of register P2 to the integer from register P1.
** If P1 is NULL, clear the subtype from p2.
*/
_181:
; /* in1 out2 */
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
pOut = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp2)*56
if int32((*TMem)(unsafe.Pointer(pIn1)).Fflags)&int32(MEM_Null) != 0 {
p294 = pOut + 20
*(*Tu16)(unsafe.Pointer(p294)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p294))) & ^libc.Int32FromInt32(MEM_Subtype))
} else {
p295 = pOut + 20
*(*Tu16)(unsafe.Pointer(p295)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p295))) | libc.Int32FromInt32(MEM_Subtype))
(*TMem)(unsafe.Pointer(pOut)).FeSubtype = uint8(*(*Ti64)(unsafe.Pointer(pIn1)) & libc.Int64FromInt32(0xff))
}
goto _187
/* Opcode: FilterAdd P1 * P3 P4 *
** Synopsis: filter(P1) += key(P3@P4)
**
** Compute a hash on the P4 registers starting with r[P3] and
** add that hash to the bloom filter contained in r[P1].
*/
_182:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
h = _filterHash(tls, aMem, pOp)
h %= uint64((*TMem)(unsafe.Pointer(pIn1)).Fn * libc.Int32FromInt32(8))
p296 = (*TMem)(unsafe.Pointer(pIn1)).Fz + uintptr(h/uint64(8))
*(*int8)(unsafe.Pointer(p296)) = int8(int32(*(*int8)(unsafe.Pointer(p296))) | libc.Int32FromInt32(1)<<(h&libc.Uint64FromInt32(7)))
goto _187
/* Opcode: Filter P1 P2 P3 P4 *
** Synopsis: if key(P3@P4) not in filter(P1) goto P2
**
** Compute a hash on the key contained in the P4 registers starting
** with r[P3]. Check to see if that hash is found in the
** bloom filter hosted by register P1. If it is not present then
** maybe jump to P2. Otherwise fall through.
**
** False negatives are harmless. It is always safe to fall through,
** even if the value is in the bloom filter. A false negative causes
** more CPU cycles to be used, but it should still yield the correct
** answer. However, an incorrect answer may well arise from a
** false positive - if the jump is taken when it should fall through.
*/
_183:
;
pIn1 = aMem + uintptr((*TOp)(unsafe.Pointer(pOp)).Fp1)*56
h1 = _filterHash(tls, aMem, pOp)
h1 %= uint64((*TMem)(unsafe.Pointer(pIn1)).Fn * libc.Int32FromInt32(8))
if int32(*(*int8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pIn1)).Fz + uintptr(h1/uint64(8)))))&(int32(1)<<(h1&uint64(7))) == 0 {
*(*Tu32)(unsafe.Pointer(p + 212 + 8*4))++
goto jump_to_p2
} else {
*(*Tu32)(unsafe.Pointer(p + 212 + 7*4))++
}
goto _187
/* Opcode: Trace P1 P2 * P4 *
**
** Write P4 on the statement trace output if statement tracing is
** enabled.
**
** Operand P1 must be 0x7fffffff and P2 must positive.
*/
/* Opcode: Init P1 P2 P3 P4 *
** Synopsis: Start at P2
**
** Programs contain a single instance of this opcode as the very first
** opcode.
**
** If tracing is enabled (by the sqlite3_trace()) interface, then
** the UTF-8 string contained in P4 is emitted on the trace callback.
** Or if P4 is blank, use the string returned by sqlite3_sql().
**
** If P2 is not zero, jump to instruction P2.
**
** Increment the value of P1 so that OP_Once opcodes will jump the
** first time they are evaluated for this run.
**
** If P3 is not zero, then it is an address to jump to if an SQLITE_CORRUPT
** error is encountered.
*/
_185:
;
_184:
;
/* If the P4 argument is not NULL, then it must be an SQL comment string.
** The "--" string is broken up to prevent false-positives with srcck1.c.
**
** This assert() provides evidence for:
** EVIDENCE-OF: R-50676-09860 The callback can compute the same text that
** would have been returned by the legacy sqlite3_trace() interface by
** using the X argument when X begins with "--" and invoking
** sqlite3_expanded_sql(P) otherwise.
*/
/* OP_Init is always instruction 0 */
if v299 = int32((*Tsqlite3)(unsafe.Pointer(db)).FmTrace)&(libc.Int32FromInt32(SQLITE_TRACE_STMT)|libc.Int32FromInt32(SQLITE_TRACE_LEGACY)) != 0 && int32((*TVdbe)(unsafe.Pointer(p)).FminWriteFileFormat) != int32(254); v299 {
if *(*uintptr)(unsafe.Pointer(pOp + 16)) != 0 {
v298 = *(*uintptr)(unsafe.Pointer(pOp + 16))
} else {
v298 = (*TVdbe)(unsafe.Pointer(p)).FzSql
}
v297 = v298
zTrace = v297
}
if v299 && v297 != uintptr(0) {
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmTrace)&int32(SQLITE_TRACE_LEGACY) != 0 {
z2 = _sqlite3VdbeExpandSql(tls, p, zTrace)
(*(*func(*libc.TLS, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).Ftrace.FxLegacy})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpTraceArg, z2)
Xsqlite3_free(tls, z2)
} else {
if (*Tsqlite3)(unsafe.Pointer(db)).FnVdbeExec > int32(1) {
z3 = _sqlite3MPrintf(tls, db, __ccgo_ts+6265, libc.VaList(bp+944, zTrace))
(*(*func(*libc.TLS, Tu32, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{*(*uintptr)(unsafe.Pointer(&(*Tsqlite3)(unsafe.Pointer(db)).Ftrace))})))(tls, uint32(SQLITE_TRACE_STMT), (*Tsqlite3)(unsafe.Pointer(db)).FpTraceArg, p, z3)
_sqlite3DbFree(tls, db, z3)
} else {
(*(*func(*libc.TLS, Tu32, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{*(*uintptr)(unsafe.Pointer(&(*Tsqlite3)(unsafe.Pointer(db)).Ftrace))})))(tls, uint32(SQLITE_TRACE_STMT), (*Tsqlite3)(unsafe.Pointer(db)).FpTraceArg, p, zTrace)
}
}
}
if (*TOp)(unsafe.Pointer(pOp)).Fp1 >= _sqlite3Config.FiOnceResetThreshold {
if int32((*TOp)(unsafe.Pointer(pOp)).Fopcode) == int32(OP_Trace) {
goto _187
}
i9 = int32(1)
for {
if !(i9 < (*TVdbe)(unsafe.Pointer(p)).FnOp) {
break
}
if int32((*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(i9)*24))).Fopcode) == int32(OP_Once) {
(*(*TOp)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(p)).FaOp + uintptr(i9)*24))).Fp1 = 0
}
goto _300
_300:
;
i9++
}
(*TOp)(unsafe.Pointer(pOp)).Fp1 = 0
}
(*TOp)(unsafe.Pointer(pOp)).Fp1++
*(*Tu32)(unsafe.Pointer(p + 212 + 6*4))++
goto jump_to_p2
/* Opcode: Noop * * * * *
**
** Do nothing. This instruction is often useful as a jump
** destination.
*/
/*
** The magic Explain opcode are only inserted when explain==2 (which
** is to say when the EXPLAIN QUERY PLAN syntax is used.)
** This opcode records information from the optimizer. It is the
** the same as a no-op. This opcodesnever appears in a real VM program.
*/
_186:
; /* This is really OP_Noop, OP_Explain */
goto _187
/*****************************************************************************
** The cases of the switch statement above this line should all be indented
** by 6 spaces. But the left-most 6 spaces have been removed to improve the
** readability. From this point on down, the normal indentation rules are
** restored.
*****************************************************************************/
_187:
;
/* The following code adds nothing to the actual functionality
** of the program. It is only here for testing and debugging.
** On the other hand, it does burn CPU cycles every time through
** the evaluator loop. So we can leave it out when NDEBUG is defined.
*/
goto _1
_1:
;
pOp += 24
} /* The end of the for(;;) loop the loops through opcodes */
/* If we reach this point, it means that execution is finished with
** an error of some kind.
*/
goto abort_due_to_error
abort_due_to_error:
;
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
rc = int32(SQLITE_NOMEM)
} else {
if rc == libc.Int32FromInt32(SQLITE_IOERR)|libc.Int32FromInt32(33)< 0 {
_sqlite3ResetOneSchema(tls, db, int32(resetSchemaOnFault)-int32(1))
}
/* This is the only way out of this procedure. We have to
** release the mutexes on btrees that were acquired at the
** top. */
goto vdbe_return
vdbe_return:
;
for nVmStep >= nProgressLimit && (*Tsqlite3)(unsafe.Pointer(db)).FxProgress != uintptr(0) {
nProgressLimit += uint64((*Tsqlite3)(unsafe.Pointer(db)).FnProgressOps)
if (*(*func(*libc.TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*Tsqlite3)(unsafe.Pointer(db)).FxProgress})))(tls, (*Tsqlite3)(unsafe.Pointer(db)).FpProgressArg) != 0 {
nProgressLimit = libc.Uint64FromUint32(0xffffffff) | libc.Uint64FromUint32(0xffffffff)< int32(4) {
(*TVdbe)(unsafe.Pointer(v)).Fpc = int32(4)
rc = _sqlite3VdbeExec(tls, v)
} else {
rc = Xsqlite3_step(tls, (*TIncrblob)(unsafe.Pointer(p)).FpStmt)
}
if rc == int32(SQLITE_ROW) {
pC = *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).FapCsr))
if int32((*TVdbeCursor)(unsafe.Pointer(pC)).FnHdrParsed) > int32((*TIncrblob)(unsafe.Pointer(p)).FiCol) {
v1 = *(*Tu32)(unsafe.Pointer(pC + 120 + uintptr((*TIncrblob)(unsafe.Pointer(p)).FiCol)*4))
} else {
v1 = uint32(0)
}
type1 = v1
if type1 < uint32(12) {
if type1 == uint32(0) {
v2 = __ccgo_ts + 1636
} else {
if type1 == uint32(7) {
v3 = __ccgo_ts + 6303
} else {
v3 = __ccgo_ts + 6308
}
v2 = v3
}
zErr = _sqlite3MPrintf(tls, (*TIncrblob)(unsafe.Pointer(p)).Fdb, __ccgo_ts+6316, libc.VaList(bp+8, v2))
rc = int32(SQLITE_ERROR)
Xsqlite3_finalize(tls, (*TIncrblob)(unsafe.Pointer(p)).FpStmt)
(*TIncrblob)(unsafe.Pointer(p)).FpStmt = uintptr(0)
} else {
(*TIncrblob)(unsafe.Pointer(p)).FiOffset = int32(*(*Tu32)(unsafe.Pointer(pC + 120 + uintptr(int32((*TIncrblob)(unsafe.Pointer(p)).FiCol)+int32((*TVdbeCursor)(unsafe.Pointer(pC)).FnField))*4)))
(*TIncrblob)(unsafe.Pointer(p)).FnByte = int32(_sqlite3VdbeSerialTypeLen(tls, type1))
(*TIncrblob)(unsafe.Pointer(p)).FpCsr = *(*uintptr)(unsafe.Pointer(pC + 48))
_sqlite3BtreeIncrblobCursor(tls, (*TIncrblob)(unsafe.Pointer(p)).FpCsr)
}
}
if rc == int32(SQLITE_ROW) {
rc = SQLITE_OK
} else {
if (*TIncrblob)(unsafe.Pointer(p)).FpStmt != 0 {
rc = Xsqlite3_finalize(tls, (*TIncrblob)(unsafe.Pointer(p)).FpStmt)
(*TIncrblob)(unsafe.Pointer(p)).FpStmt = uintptr(0)
if rc == SQLITE_OK {
zErr = _sqlite3MPrintf(tls, (*TIncrblob)(unsafe.Pointer(p)).Fdb, __ccgo_ts+6345, libc.VaList(bp+8, iRow))
rc = int32(SQLITE_ERROR)
} else {
zErr = _sqlite3MPrintf(tls, (*TIncrblob)(unsafe.Pointer(p)).Fdb, __ccgo_ts+3797, libc.VaList(bp+8, Xsqlite3_errmsg(tls, (*TIncrblob)(unsafe.Pointer(p)).Fdb)))
}
}
}
*(*uintptr)(unsafe.Pointer(pzErr)) = zErr
return rc
}
// C documentation
//
// /*
// ** Open a blob handle.
// */
func Xsqlite3_blob_open(tls *libc.TLS, db uintptr, zDb uintptr, zTable uintptr, zColumn uintptr, iRow Tsqlite_int64, wrFlag int32, ppBlob uintptr) (r int32) {
bp := tls.Alloc(448)
defer tls.Free(448)
var aOp, pBlob, pFKey, pIdx, pTab, v, zFault, v7 uintptr
var iCol, iDb, j, j1, nAttempt, rc, v6 int32
var _ /* sParse at bp+8 */ TParse
var _ /* zErr at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = aOp, iCol, iDb, j, j1, nAttempt, pBlob, pFKey, pIdx, pTab, rc, v, zFault, v6, v7
nAttempt = 0 /* Index of zColumn in row-record */
rc = SQLITE_OK
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
pBlob = uintptr(0)
*(*uintptr)(unsafe.Pointer(ppBlob)) = uintptr(0)
wrFlag = libc.BoolInt32(!!(wrFlag != 0)) /* wrFlag = (wrFlag ? 1 : 0); */
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
pBlob = _sqlite3DbMallocZero(tls, db, uint64(56))
for int32(1) != 0 {
_sqlite3ParseObjectInit(tls, bp+8, db)
if !(pBlob != 0) {
goto blob_open_out
}
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
_sqlite3BtreeEnterAll(tls, db)
pTab = _sqlite3LocateTable(tls, bp+8, uint32(0), zTable, zDb)
if pTab != 0 && int32((*TTable)(unsafe.Pointer(pTab)).FeTabType) == int32(TABTYP_VTAB) {
pTab = uintptr(0)
_sqlite3ErrorMsg(tls, bp+8, __ccgo_ts+6365, libc.VaList(bp+440, zTable))
}
if pTab != 0 && !((*TTable)(unsafe.Pointer(pTab)).FtabFlags&libc.Uint32FromInt32(TF_WithoutRowid) == libc.Uint32FromInt32(0)) {
pTab = uintptr(0)
_sqlite3ErrorMsg(tls, bp+8, __ccgo_ts+6395, libc.VaList(bp+440, zTable))
}
if pTab != 0 && int32((*TTable)(unsafe.Pointer(pTab)).FeTabType) == int32(TABTYP_VIEW) {
pTab = uintptr(0)
_sqlite3ErrorMsg(tls, bp+8, __ccgo_ts+6431, libc.VaList(bp+440, zTable))
}
if !(pTab != 0) {
if (*(*TParse)(unsafe.Pointer(bp + 8))).FzErrMsg != 0 {
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = (*(*TParse)(unsafe.Pointer(bp + 8))).FzErrMsg
(*(*TParse)(unsafe.Pointer(bp + 8))).FzErrMsg = uintptr(0)
}
rc = int32(SQLITE_ERROR)
_sqlite3BtreeLeaveAll(tls, db)
goto blob_open_out
}
(*TIncrblob)(unsafe.Pointer(pBlob)).FpTab = pTab
(*TIncrblob)(unsafe.Pointer(pBlob)).FzDb = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(_sqlite3SchemaToIndex(tls, db, (*TTable)(unsafe.Pointer(pTab)).FpSchema))*32))).FzDbSName
/* Now search pTab for the exact column. */
iCol = 0
for {
if !(iCol < int32((*TTable)(unsafe.Pointer(pTab)).FnCol)) {
break
}
if _sqlite3StrICmp(tls, (*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FaCol + uintptr(iCol)*16))).FzCnName, zColumn) == 0 {
break
}
goto _1
_1:
;
iCol++
}
if iCol == int32((*TTable)(unsafe.Pointer(pTab)).FnCol) {
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = _sqlite3MPrintf(tls, db, __ccgo_ts+6452, libc.VaList(bp+440, zColumn))
rc = int32(SQLITE_ERROR)
_sqlite3BtreeLeaveAll(tls, db)
goto blob_open_out
}
/* If the value is being opened for writing, check that the
** column is not indexed, and that it is not part of a foreign key.
*/
if wrFlag != 0 {
zFault = uintptr(0)
if (*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_ForeignKeys) != 0 {
pFKey = (*(*struct {
FaddColOffset int32
FpFKey uintptr
FpDfltList uintptr
})(unsafe.Pointer(pTab + 64))).FpFKey
for {
if !(pFKey != 0) {
break
}
j = 0
for {
if !(j < (*TFKey)(unsafe.Pointer(pFKey)).FnCol) {
break
}
if (*(*TsColMap)(unsafe.Pointer(pFKey + 64 + uintptr(j)*16))).FiFrom == iCol {
zFault = __ccgo_ts + 6473
}
goto _3
_3:
;
j++
}
goto _2
_2:
;
pFKey = (*TFKey)(unsafe.Pointer(pFKey)).FpNextFrom
}
}
pIdx = (*TTable)(unsafe.Pointer(pTab)).FpIndex
for {
if !(pIdx != 0) {
break
}
j1 = 0
for {
if !(j1 < int32((*TIndex)(unsafe.Pointer(pIdx)).FnKeyCol)) {
break
}
/* FIXME: Be smarter about indexes that use expressions */
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(j1)*2))) == iCol || int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(j1)*2))) == -int32(2) {
zFault = __ccgo_ts + 6485
}
goto _5
_5:
;
j1++
}
goto _4
_4:
;
pIdx = (*TIndex)(unsafe.Pointer(pIdx)).FpNext
}
if zFault != 0 {
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = _sqlite3MPrintf(tls, db, __ccgo_ts+6493, libc.VaList(bp+440, zFault))
rc = int32(SQLITE_ERROR)
_sqlite3BtreeLeaveAll(tls, db)
goto blob_open_out
}
}
(*TIncrblob)(unsafe.Pointer(pBlob)).FpStmt = _sqlite3VdbeCreate(tls, bp+8)
if (*TIncrblob)(unsafe.Pointer(pBlob)).FpStmt != 0 {
v = (*TIncrblob)(unsafe.Pointer(pBlob)).FpStmt
iDb = _sqlite3SchemaToIndex(tls, db, (*TTable)(unsafe.Pointer(pTab)).FpSchema)
_sqlite3VdbeAddOp4Int(tls, v, int32(OP_Transaction), iDb, wrFlag, (*TSchema)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FpSchema)).Fschema_cookie, (*TSchema)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FpSchema)).FiGeneration)
_sqlite3VdbeChangeP5(tls, v, uint16(1))
aOp = _sqlite3VdbeAddOpList(tls, v, int32(libc.Uint64FromInt64(24)/libc.Uint64FromInt64(4)), uintptr(unsafe.Pointer(&_openBlob)), _iLn)
/* Make sure a mutex is held on the table to be accessed */
_sqlite3VdbeUsesBtree(tls, v, iDb)
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 {
/* Configure the OP_TableLock instruction */
(*(*TVdbeOp)(unsafe.Pointer(aOp))).Fp1 = iDb
(*(*TVdbeOp)(unsafe.Pointer(aOp))).Fp2 = int32((*TTable)(unsafe.Pointer(pTab)).Ftnum)
(*(*TVdbeOp)(unsafe.Pointer(aOp))).Fp3 = wrFlag
_sqlite3VdbeChangeP4(tls, v, int32(2), (*TTable)(unsafe.Pointer(pTab)).FzName, P4_TRANSIENT)
}
if int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 {
/* Remove either the OP_OpenWrite or OpenRead. Set the P2
** parameter of the other to pTab->tnum. */
if wrFlag != 0 {
(*(*TVdbeOp)(unsafe.Pointer(aOp + 1*24))).Fopcode = uint8(OP_OpenWrite)
}
(*(*TVdbeOp)(unsafe.Pointer(aOp + 1*24))).Fp2 = int32((*TTable)(unsafe.Pointer(pTab)).Ftnum)
(*(*TVdbeOp)(unsafe.Pointer(aOp + 1*24))).Fp3 = iDb
/* Configure the number of columns. Configure the cursor to
** think that the table has one more column than it really
** does. An OP_Column to retrieve this imaginary column will
** always return an SQL NULL. This is useful because it means
** we can invoke OP_Column to fill in the vdbe cursors type
** and offset cache without causing any IO.
*/
(*(*TVdbeOp)(unsafe.Pointer(aOp + 1*24))).Fp4type = int8(-libc.Int32FromInt32(3))
*(*int32)(unsafe.Pointer(aOp + 1*24 + 16)) = int32((*TTable)(unsafe.Pointer(pTab)).FnCol) + int32(1)
(*(*TVdbeOp)(unsafe.Pointer(aOp + 3*24))).Fp2 = int32((*TTable)(unsafe.Pointer(pTab)).FnCol)
(*(*TParse)(unsafe.Pointer(bp + 8))).FnVar = 0
(*(*TParse)(unsafe.Pointer(bp + 8))).FnMem = int32(1)
(*(*TParse)(unsafe.Pointer(bp + 8))).FnTab = int32(1)
_sqlite3VdbeMakeReady(tls, v, bp+8)
}
}
(*TIncrblob)(unsafe.Pointer(pBlob)).FiCol = uint16(iCol)
(*TIncrblob)(unsafe.Pointer(pBlob)).Fdb = db
_sqlite3BtreeLeaveAll(tls, db)
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
goto blob_open_out
}
rc = _blobSeekToRow(tls, pBlob, iRow, bp)
nAttempt++
v6 = nAttempt
if v6 >= int32(SQLITE_MAX_SCHEMA_RETRY) || rc != int32(SQLITE_SCHEMA) {
break
}
_sqlite3ParseObjectReset(tls, bp+8)
}
goto blob_open_out
blob_open_out:
;
if rc == SQLITE_OK && int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 {
*(*uintptr)(unsafe.Pointer(ppBlob)) = pBlob
} else {
if pBlob != 0 && (*TIncrblob)(unsafe.Pointer(pBlob)).FpStmt != 0 {
_sqlite3VdbeFinalize(tls, (*TIncrblob)(unsafe.Pointer(pBlob)).FpStmt)
}
_sqlite3DbFree(tls, db, pBlob)
}
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
v7 = __ccgo_ts + 3797
} else {
v7 = libc.UintptrFromInt32(0)
}
_sqlite3ErrorWithMsg(tls, db, rc, v7, libc.VaList(bp+440, *(*uintptr)(unsafe.Pointer(bp))))
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
_sqlite3ParseObjectReset(tls, bp+8)
rc = _sqlite3ApiExit(tls, db, rc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
/* This VDBE program seeks a btree cursor to the identified
** db/table/row entry. The reason for using a vdbe program instead
** of writing code to use the b-tree layer directly is that the
** vdbe program will take advantage of the various transaction,
** locking and error handling infrastructure built into the vdbe.
**
** After seeking the cursor, the vdbe executes an OP_ResultRow.
** Code external to the Vdbe then "borrows" the b-tree cursor and
** uses it to implement the blob_read(), blob_write() and
** blob_bytes() functions.
**
** The sqlite3_blob_close() function finalizes the vdbe program,
** which closes the b-tree cursor and (possibly) commits the
** transaction.
*/
var _iLn int32
var _openBlob = [6]TVdbeOpList{
0: {
Fopcode: uint8(OP_TableLock),
},
1: {
Fopcode: uint8(OP_OpenRead),
},
2: {
Fopcode: uint8(OP_NotExists),
Fp2: int8(5),
Fp3: int8(1),
},
3: {
Fopcode: uint8(OP_Column),
Fp3: int8(1),
},
4: {
Fopcode: uint8(OP_ResultRow),
Fp1: int8(1),
},
5: {
Fopcode: uint8(OP_Halt),
},
}
// C documentation
//
// /*
// ** Close a blob handle that was previously created using
// ** sqlite3_blob_open().
// */
func Xsqlite3_blob_close(tls *libc.TLS, pBlob uintptr) (r int32) {
var db, p, pStmt uintptr
var rc int32
_, _, _, _ = db, p, pStmt, rc
p = pBlob
if p != 0 {
pStmt = (*TIncrblob)(unsafe.Pointer(p)).FpStmt
db = (*TIncrblob)(unsafe.Pointer(p)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
_sqlite3DbFree(tls, db, p)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
rc = Xsqlite3_finalize(tls, pStmt)
} else {
rc = SQLITE_OK
}
return rc
}
// C documentation
//
// /*
// ** Perform a read or write operation on a blob
// */
func _blobReadWrite(tls *libc.TLS, pBlob uintptr, z uintptr, n int32, iOffset int32, xCall uintptr) (r int32) {
var db, p, v uintptr
var iKey Tsqlite3_int64
var rc int32
_, _, _, _, _ = db, iKey, p, rc, v
p = pBlob
if p == uintptr(0) {
return _sqlite3MisuseError(tls, int32(102190))
}
db = (*TIncrblob)(unsafe.Pointer(p)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
v = (*TIncrblob)(unsafe.Pointer(p)).FpStmt
if n < 0 || iOffset < 0 || int64(iOffset)+int64(n) > int64((*TIncrblob)(unsafe.Pointer(p)).FnByte) {
/* Request is out of range. Return a transient error. */
rc = int32(SQLITE_ERROR)
} else {
if v == uintptr(0) {
/* If there is no statement handle, then the blob-handle has
** already been invalidated. Return SQLITE_ABORT in this case.
*/
rc = int32(SQLITE_ABORT)
} else {
/* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is
** returned, clean-up the statement handle.
*/
_sqlite3BtreeEnterCursor(tls, (*TIncrblob)(unsafe.Pointer(p)).FpCsr)
if xCall == __ccgo_fp(_sqlite3BtreePutData) && (*Tsqlite3)(unsafe.Pointer(db)).FxPreUpdateCallback != 0 {
iKey = _sqlite3BtreeIntegerKey(tls, (*TIncrblob)(unsafe.Pointer(p)).FpCsr)
_sqlite3VdbePreUpdateHook(tls, v, *(*uintptr)(unsafe.Pointer((*TVdbe)(unsafe.Pointer(v)).FapCsr)), int32(SQLITE_DELETE), (*TIncrblob)(unsafe.Pointer(p)).FzDb, (*TIncrblob)(unsafe.Pointer(p)).FpTab, iKey, -int32(1), int32((*TIncrblob)(unsafe.Pointer(p)).FiCol))
}
rc = (*(*func(*libc.TLS, uintptr, Tu32, Tu32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{xCall})))(tls, (*TIncrblob)(unsafe.Pointer(p)).FpCsr, uint32(iOffset+(*TIncrblob)(unsafe.Pointer(p)).FiOffset), uint32(n), z)
_sqlite3BtreeLeaveCursor(tls, (*TIncrblob)(unsafe.Pointer(p)).FpCsr)
if rc == int32(SQLITE_ABORT) {
_sqlite3VdbeFinalize(tls, v)
(*TIncrblob)(unsafe.Pointer(p)).FpStmt = uintptr(0)
} else {
(*TVdbe)(unsafe.Pointer(v)).Frc = rc
}
}
}
_sqlite3Error(tls, db, rc)
rc = _sqlite3ApiExit(tls, db, rc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
// C documentation
//
// /*
// ** Read data from a blob handle.
// */
func Xsqlite3_blob_read(tls *libc.TLS, pBlob uintptr, z uintptr, n int32, iOffset int32) (r int32) {
return _blobReadWrite(tls, pBlob, z, n, iOffset, __ccgo_fp(_sqlite3BtreePayloadChecked))
}
// C documentation
//
// /*
// ** Write data to a blob handle.
// */
func Xsqlite3_blob_write(tls *libc.TLS, pBlob uintptr, z uintptr, n int32, iOffset int32) (r int32) {
return _blobReadWrite(tls, pBlob, z, n, iOffset, __ccgo_fp(_sqlite3BtreePutData))
}
// C documentation
//
// /*
// ** Query a blob handle for the size of the data.
// **
// ** The Incrblob.nByte field is fixed for the lifetime of the Incrblob
// ** so no mutex is required for access.
// */
func Xsqlite3_blob_bytes(tls *libc.TLS, pBlob uintptr) (r int32) {
var p uintptr
var v1 int32
_, _ = p, v1
p = pBlob
if p != 0 && (*TIncrblob)(unsafe.Pointer(p)).FpStmt != 0 {
v1 = (*TIncrblob)(unsafe.Pointer(p)).FnByte
} else {
v1 = 0
}
return v1
}
// C documentation
//
// /*
// ** Move an existing blob handle to point to a different row of the same
// ** database table.
// **
// ** If an error occurs, or if the specified row does not exist or does not
// ** contain a blob or text value, then an error code is returned and the
// ** database handle error code and message set. If this happens, then all
// ** subsequent calls to sqlite3_blob_xxx() functions (except blob_close())
// ** immediately return SQLITE_ABORT.
// */
func Xsqlite3_blob_reopen(tls *libc.TLS, pBlob uintptr, iRow Tsqlite3_int64) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var db, p, v1 uintptr
var rc int32
var _ /* zErr at bp+0 */ uintptr
_, _, _, _ = db, p, rc, v1
p = pBlob
if p == uintptr(0) {
return _sqlite3MisuseError(tls, int32(102290))
}
db = (*TIncrblob)(unsafe.Pointer(p)).Fdb
Xsqlite3_mutex_enter(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
if (*TIncrblob)(unsafe.Pointer(p)).FpStmt == uintptr(0) {
/* If there is no statement handle, then the blob-handle has
** already been invalidated. Return SQLITE_ABORT in this case.
*/
rc = int32(SQLITE_ABORT)
} else {
(*TVdbe)(unsafe.Pointer((*TIncrblob)(unsafe.Pointer(p)).FpStmt)).Frc = SQLITE_OK
rc = _blobSeekToRow(tls, p, iRow, bp)
if rc != SQLITE_OK {
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
v1 = __ccgo_ts + 3797
} else {
v1 = libc.UintptrFromInt32(0)
}
_sqlite3ErrorWithMsg(tls, db, rc, v1, libc.VaList(bp+16, *(*uintptr)(unsafe.Pointer(bp))))
_sqlite3DbFree(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
}
}
rc = _sqlite3ApiExit(tls, db, rc)
Xsqlite3_mutex_leave(tls, (*Tsqlite3)(unsafe.Pointer(db)).Fmutex)
return rc
}
/************** End of vdbeblob.c ********************************************/
/************** Begin file vdbesort.c ****************************************/
/*
** 2011-07-09
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains code for the VdbeSorter object, used in concert with
** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements
** or by SELECT statements with ORDER BY clauses that cannot be satisfied
** using indexes and without LIMIT clauses.
**
** The VdbeSorter object implements a multi-threaded external merge sort
** algorithm that is efficient even if the number of elements being sorted
** exceeds the available memory.
**
** Here is the (internal, non-API) interface between this module and the
** rest of the SQLite system:
**
** sqlite3VdbeSorterInit() Create a new VdbeSorter object.
**
** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter
** object. The row is a binary blob in the
** OP_MakeRecord format that contains both
** the ORDER BY key columns and result columns
** in the case of a SELECT w/ ORDER BY, or
** the complete record for an index entry
** in the case of a CREATE INDEX.
**
** sqlite3VdbeSorterRewind() Sort all content previously added.
** Position the read cursor on the
** first sorted element.
**
** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted
** element.
**
** sqlite3VdbeSorterRowkey() Return the complete binary blob for the
** row currently under the read cursor.
**
** sqlite3VdbeSorterCompare() Compare the binary blob for the row
** currently under the read cursor against
** another binary blob X and report if
** X is strictly less than the read cursor.
** Used to enforce uniqueness in a
** CREATE UNIQUE INDEX statement.
**
** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim
** all resources.
**
** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This
** is like Close() followed by Init() only
** much faster.
**
** The interfaces above must be called in a particular order. Write() can
** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and
** Compare() can only occur in between Rewind() and Close()/Reset(). i.e.
**
** Init()
** for each record: Write()
** Rewind()
** Rowkey()/Compare()
** Next()
** Close()
**
** Algorithm:
**
** Records passed to the sorter via calls to Write() are initially held
** unsorted in main memory. Assuming the amount of memory used never exceeds
** a threshold, when Rewind() is called the set of records is sorted using
** an in-memory merge sort. In this case, no temporary files are required
** and subsequent calls to Rowkey(), Next() and Compare() read records
** directly from main memory.
**
** If the amount of space used to store records in main memory exceeds the
** threshold, then the set of records currently in memory are sorted and
** written to a temporary file in "Packed Memory Array" (PMA) format.
** A PMA created at this point is known as a "level-0 PMA". Higher levels
** of PMAs may be created by merging existing PMAs together - for example
** merging two or more level-0 PMAs together creates a level-1 PMA.
**
** The threshold for the amount of main memory to use before flushing
** records to a PMA is roughly the same as the limit configured for the
** page-cache of the main database. Specifically, the threshold is set to
** the value returned by "PRAGMA main.page_size" multiplied by
** that returned by "PRAGMA main.cache_size", in bytes.
**
** If the sorter is running in single-threaded mode, then all PMAs generated
** are appended to a single temporary file. Or, if the sorter is running in
** multi-threaded mode then up to (N+1) temporary files may be opened, where
** N is the configured number of worker threads. In this case, instead of
** sorting the records and writing the PMA to a temporary file itself, the
** calling thread usually launches a worker thread to do so. Except, if
** there are already N worker threads running, the main thread does the work
** itself.
**
** The sorter is running in multi-threaded mode if (a) the library was built
** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater
** than zero, and (b) worker threads have been enabled at runtime by calling
** "PRAGMA threads=N" with some value of N greater than 0.
**
** When Rewind() is called, any data remaining in memory is flushed to a
** final PMA. So at this point the data is stored in some number of sorted
** PMAs within temporary files on disk.
**
** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the
** sorter is running in single-threaded mode, then these PMAs are merged
** incrementally as keys are retrieved from the sorter by the VDBE. The
** MergeEngine object, described in further detail below, performs this
** merge.
**
** Or, if running in multi-threaded mode, then a background thread is
** launched to merge the existing PMAs. Once the background thread has
** merged T bytes of data into a single sorted PMA, the main thread
** begins reading keys from that PMA while the background thread proceeds
** with merging the next T bytes of data. And so on.
**
** Parameter T is set to half the value of the memory threshold used
** by Write() above to determine when to create a new PMA.
**
** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when
** Rewind() is called, then a hierarchy of incremental-merges is used.
** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on
** disk are merged together. Then T bytes of data from the second set, and
** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT
** PMAs at a time. This done is to improve locality.
**
** If running in multi-threaded mode and there are more than
** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more
** than one background thread may be created. Specifically, there may be
** one background thread for each temporary file on disk, and one background
** thread to merge the output of each of the others to a single PMA for
** the main thread to read from.
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
/*
** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various
** messages to stderr that may be helpful in understanding the performance
** characteristics of the sorter in multi-threaded mode.
*/
/*
** Hard-coded maximum amount of data to accumulate in memory before flushing
** to a level 0 PMA. The purpose of this limit is to prevent various integer
** overflows. 512MiB.
*/
// C documentation
//
// /*
// ** Private objects used by the sorter
// */
type TMergeEngine = struct {
FnTree int32
FpTask uintptr
FaTree uintptr
FaReadr uintptr
}
type MergeEngine = TMergeEngine
/* Merge PMAs together */
type TPmaReader = struct {
FiReadOff Ti64
FiEof Ti64
FnAlloc int32
FnKey int32
FpFd uintptr
FaAlloc uintptr
FaKey uintptr
FaBuffer uintptr
FnBuffer int32
FaMap uintptr
FpIncr uintptr
}
type PmaReader = TPmaReader
/* Incrementally read one PMA */
type TPmaWriter = struct {
FeFWErr int32
FaBuffer uintptr
FnBuffer int32
FiBufStart int32
FiBufEnd int32
FiWriteOff Ti64
FpFd uintptr
}
type PmaWriter = TPmaWriter
/* Incrementally write one PMA */
type TSorterRecord = struct {
FnVal int32
Fu struct {
FiNext [0]int32
FpNext uintptr
}
}
type SorterRecord = TSorterRecord
/* A record being sorted */
type TSortSubtask = struct {
FpThread uintptr
FbDone int32
FnPMA int32
FpSorter uintptr
FpUnpacked uintptr
Flist TSorterList
FxCompare TSorterCompare
Ffile TSorterFile
Ffile2 TSorterFile
}
type SortSubtask = TSortSubtask
/* A sub-task in the sort process */
type TSorterFile = struct {
FpFd uintptr
FiEof Ti64
}
type SorterFile = TSorterFile
/* Temporary file object wrapper */
type TSorterList = struct {
FpList uintptr
FaMemory uintptr
FszPMA Ti64
}
type SorterList = TSorterList
/* In-memory list of records */
type TIncrMerger = struct {
FpTask uintptr
FpMerger uintptr
FiStartOff Ti64
FmxSz int32
FbEof int32
FbUseThread int32
FaFile [2]TSorterFile
}
type IncrMerger = TIncrMerger
/* Read & merge multiple PMAs */
/*
** A container for a temp file handle and the current amount of data
** stored in the file.
*/
type TSorterFile1 = struct {
FpFd uintptr
FiEof Ti64
}
type SorterFile1 = TSorterFile1
/*
** An in-memory list of objects to be sorted.
**
** If aMemory==0 then each object is allocated separately and the objects
** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects
** are stored in the aMemory[] bulk memory, one right after the other, and
** are connected using SorterRecord.u.iNext.
*/
type TSorterList1 = struct {
FpList uintptr
FaMemory uintptr
FszPMA Ti64
}
type SorterList1 = TSorterList1
/*
** The MergeEngine object is used to combine two or more smaller PMAs into
** one big PMA using a merge operation. Separate PMAs all need to be
** combined into one big PMA in order to be able to step through the sorted
** records in order.
**
** The aReadr[] array contains a PmaReader object for each of the PMAs being
** merged. An aReadr[] object either points to a valid key or else is at EOF.
** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.)
** For the purposes of the paragraphs below, we assume that the array is
** actually N elements in size, where N is the smallest power of 2 greater
** to or equal to the number of PMAs being merged. The extra aReadr[] elements
** are treated as if they are empty (always at EOF).
**
** The aTree[] array is also N elements in size. The value of N is stored in
** the MergeEngine.nTree variable.
**
** The final (N/2) elements of aTree[] contain the results of comparing
** pairs of PMA keys together. Element i contains the result of
** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the
** aTree element is set to the index of it.
**
** For the purposes of this comparison, EOF is considered greater than any
** other key value. If the keys are equal (only possible with two EOF
** values), it doesn't matter which index is stored.
**
** The (N/4) elements of aTree[] that precede the final (N/2) described
** above contains the index of the smallest of each block of 4 PmaReaders
** And so on. So that aTree[1] contains the index of the PmaReader that
** currently points to the smallest key value. aTree[0] is unused.
**
** Example:
**
** aReadr[0] -> Banana
** aReadr[1] -> Feijoa
** aReadr[2] -> Elderberry
** aReadr[3] -> Currant
** aReadr[4] -> Grapefruit
** aReadr[5] -> Apple
** aReadr[6] -> Durian
** aReadr[7] -> EOF
**
** aTree[] = { X, 5 0, 5 0, 3, 5, 6 }
**
** The current element is "Apple" (the value of the key indicated by
** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will
** be advanced to the next key in its segment. Say the next key is
** "Eggplant":
**
** aReadr[5] -> Eggplant
**
** The contents of aTree[] are updated first by comparing the new PmaReader
** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader
** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree.
** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader
** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Bananafile2. And instead of using a
** background thread to prepare data for the PmaReader, with a single
** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with
** keys from pMerger by the calling thread whenever the PmaReader runs out
** of data.
*/
type TIncrMerger1 = struct {
FpTask uintptr
FpMerger uintptr
FiStartOff Ti64
FmxSz int32
FbEof int32
FbUseThread int32
FaFile [2]TSorterFile
}
type IncrMerger1 = TIncrMerger1
/*
** An instance of this object is used for writing a PMA.
**
** The PMA is written one record at a time. Each record is of an arbitrary
** size. But I/O is more efficient if it occurs in page-sized blocks where
** each block is aligned on a page boundary. This object caches writes to
** the PMA so that aligned, page-size blocks are written.
*/
type TPmaWriter1 = struct {
FeFWErr int32
FaBuffer uintptr
FnBuffer int32
FiBufStart int32
FiBufEnd int32
FiWriteOff Ti64
FpFd uintptr
}
type PmaWriter1 = TPmaWriter1
/*
** This object is the header on a single record while that record is being
** held in memory and prior to being written out as part of a PMA.
**
** How the linked list is connected depends on how memory is being managed
** by this module. If using a separate allocation for each in-memory record
** (VdbeSorter.list.aMemory==0), then the list is always connected using the
** SorterRecord.u.pNext pointers.
**
** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0),
** then while records are being accumulated the list is linked using the
** SorterRecord.u.iNext offset. This is because the aMemory[] array may
** be sqlite3Realloc()ed while records are being accumulated. Once the VM
** has finished passing records to the sorter, or when the in-memory buffer
** is full, the list is sorted. As part of the sorting process, it is
** converted to use the SorterRecord.u.pNext pointers. See function
** vdbeSorterSort() for details.
*/
type TSorterRecord1 = struct {
FnVal int32
Fu struct {
FiNext [0]int32
FpNext uintptr
}
}
type SorterRecord1 = TSorterRecord1
// C documentation
//
// /*
// ** Free all memory belonging to the PmaReader object passed as the
// ** argument. All structure fields are set to zero before returning.
// */
func _vdbePmaReaderClear(tls *libc.TLS, pReadr uintptr) {
Xsqlite3_free(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FaAlloc)
Xsqlite3_free(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FaBuffer)
if (*TPmaReader)(unsafe.Pointer(pReadr)).FaMap != 0 {
_sqlite3OsUnfetch(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FpFd, 0, (*TPmaReader)(unsafe.Pointer(pReadr)).FaMap)
}
_vdbeIncrFree(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr)
libc.Xmemset(tls, pReadr, 0, uint64(80))
}
// C documentation
//
// /*
// ** Read the next nByte bytes of data from the PMA p.
// ** If successful, set *ppOut to point to a buffer containing the data
// ** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite
// ** error code.
// **
// ** The buffer returned in *ppOut is only valid until the
// ** next call to this function.
// */
func _vdbePmaReadBlob(tls *libc.TLS, p uintptr, nByte int32, ppOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aNew uintptr
var iBuf, nAvail, nCopy, nRead, nRem, rc, rc1 int32
var nNew Tsqlite3_int64
var v1 int64
var _ /* aNext at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _ = aNew, iBuf, nAvail, nCopy, nNew, nRead, nRem, rc, rc1, v1 /* Bytes of data available in buffer */
if (*TPmaReader)(unsafe.Pointer(p)).FaMap != 0 {
*(*uintptr)(unsafe.Pointer(ppOut)) = (*TPmaReader)(unsafe.Pointer(p)).FaMap + uintptr((*TPmaReader)(unsafe.Pointer(p)).FiReadOff)
*(*Ti64)(unsafe.Pointer(p)) += int64(nByte)
return SQLITE_OK
}
/* If there is no more data to be read from the buffer, read the next
** p->nBuffer bytes of data from the file into it. Or, if there are less
** than p->nBuffer bytes remaining in the PMA, read all remaining data. */
iBuf = int32((*TPmaReader)(unsafe.Pointer(p)).FiReadOff % int64((*TPmaReader)(unsafe.Pointer(p)).FnBuffer))
if iBuf == 0 { /* sqlite3OsRead() return code */
/* Determine how many bytes of data to read. */
if (*TPmaReader)(unsafe.Pointer(p)).FiEof-(*TPmaReader)(unsafe.Pointer(p)).FiReadOff > int64((*TPmaReader)(unsafe.Pointer(p)).FnBuffer) {
nRead = (*TPmaReader)(unsafe.Pointer(p)).FnBuffer
} else {
nRead = int32((*TPmaReader)(unsafe.Pointer(p)).FiEof - (*TPmaReader)(unsafe.Pointer(p)).FiReadOff)
}
/* Readr data from the file. Return early if an error occurs. */
rc = _sqlite3OsRead(tls, (*TPmaReader)(unsafe.Pointer(p)).FpFd, (*TPmaReader)(unsafe.Pointer(p)).FaBuffer, nRead, (*TPmaReader)(unsafe.Pointer(p)).FiReadOff)
if rc != SQLITE_OK {
return rc
}
}
nAvail = (*TPmaReader)(unsafe.Pointer(p)).FnBuffer - iBuf
if nByte <= nAvail {
/* The requested data is available in the in-memory buffer. In this
** case there is no need to make a copy of the data, just return a
** pointer into the buffer to the caller. */
*(*uintptr)(unsafe.Pointer(ppOut)) = (*TPmaReader)(unsafe.Pointer(p)).FaBuffer + uintptr(iBuf)
*(*Ti64)(unsafe.Pointer(p)) += int64(nByte)
} else { /* Bytes remaining to copy */
/* Extend the p->aAlloc[] allocation if required. */
if (*TPmaReader)(unsafe.Pointer(p)).FnAlloc < nByte {
if int64(libc.Int32FromInt32(128)) > int64(2)*int64((*TPmaReader)(unsafe.Pointer(p)).FnAlloc) {
v1 = int64(libc.Int32FromInt32(128))
} else {
v1 = int64(2) * int64((*TPmaReader)(unsafe.Pointer(p)).FnAlloc)
}
nNew = v1
for int64(nByte) > nNew {
nNew = nNew * int64(2)
}
aNew = _sqlite3Realloc(tls, (*TPmaReader)(unsafe.Pointer(p)).FaAlloc, uint64(nNew))
if !(aNew != 0) {
return int32(SQLITE_NOMEM)
}
(*TPmaReader)(unsafe.Pointer(p)).FnAlloc = int32(nNew)
(*TPmaReader)(unsafe.Pointer(p)).FaAlloc = aNew
}
/* Copy as much data as is available in the buffer into the start of
** p->aAlloc[]. */
libc.Xmemcpy(tls, (*TPmaReader)(unsafe.Pointer(p)).FaAlloc, (*TPmaReader)(unsafe.Pointer(p)).FaBuffer+uintptr(iBuf), uint64(nAvail))
*(*Ti64)(unsafe.Pointer(p)) += int64(nAvail)
nRem = nByte - nAvail
/* The following loop copies up to p->nBuffer bytes per iteration into
** the p->aAlloc[] buffer. */
for nRem > 0 { /* Pointer to buffer to copy data from */
nCopy = nRem
if nRem > (*TPmaReader)(unsafe.Pointer(p)).FnBuffer {
nCopy = (*TPmaReader)(unsafe.Pointer(p)).FnBuffer
}
rc1 = _vdbePmaReadBlob(tls, p, nCopy, bp)
if rc1 != SQLITE_OK {
return rc1
}
libc.Xmemcpy(tls, (*TPmaReader)(unsafe.Pointer(p)).FaAlloc+uintptr(nByte-nRem), *(*uintptr)(unsafe.Pointer(bp)), uint64(nCopy))
nRem -= nCopy
}
*(*uintptr)(unsafe.Pointer(ppOut)) = (*TPmaReader)(unsafe.Pointer(p)).FaAlloc
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Read a varint from the stream of data accessed by p. Set *pnOut to
// ** the value read.
// */
func _vdbePmaReadVarint(tls *libc.TLS, p uintptr, pnOut uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var i, iBuf, rc, v1 int32
var _ /* a at bp+16 */ uintptr
var _ /* aVarint at bp+0 */ [16]Tu8
_, _, _, _ = i, iBuf, rc, v1
if (*TPmaReader)(unsafe.Pointer(p)).FaMap != 0 {
*(*Ti64)(unsafe.Pointer(p)) += int64(_sqlite3GetVarint(tls, (*TPmaReader)(unsafe.Pointer(p)).FaMap+uintptr((*TPmaReader)(unsafe.Pointer(p)).FiReadOff), pnOut))
} else {
iBuf = int32((*TPmaReader)(unsafe.Pointer(p)).FiReadOff % int64((*TPmaReader)(unsafe.Pointer(p)).FnBuffer))
if iBuf != 0 && (*TPmaReader)(unsafe.Pointer(p)).FnBuffer-iBuf >= int32(9) {
*(*Ti64)(unsafe.Pointer(p)) += int64(_sqlite3GetVarint(tls, (*TPmaReader)(unsafe.Pointer(p)).FaBuffer+uintptr(iBuf), pnOut))
} else {
i = 0
for cond := true; cond; cond = int32(*(*Tu8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16)))))&int32(0x80) != 0 {
rc = _vdbePmaReadBlob(tls, p, int32(1), bp+16)
if rc != 0 {
return rc
}
v1 = i
i++
(*(*[16]Tu8)(unsafe.Pointer(bp)))[v1&int32(0xf)] = *(*Tu8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp + 16))))
}
_sqlite3GetVarint(tls, bp, pnOut)
}
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Attempt to memory map file pFile. If successful, set *pp to point to the
// ** new mapping and return SQLITE_OK. If the mapping is not attempted
// ** (because the file is too large or the VFS layer is configured not to use
// ** mmap), return SQLITE_OK and set *pp to NULL.
// **
// ** Or, if an error occurs, return an SQLite error code. The final value of
// ** *pp is undefined in this case.
// */
func _vdbeSorterMapFile(tls *libc.TLS, pTask uintptr, pFile uintptr, pp uintptr) (r int32) {
var pFd uintptr
var rc int32
_, _ = pFd, rc
rc = SQLITE_OK
if (*TSorterFile)(unsafe.Pointer(pFile)).FiEof <= int64((*Tsqlite3)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).Fdb)).FnMaxSorterMmap) {
pFd = (*TSorterFile)(unsafe.Pointer(pFile)).FpFd
if (*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(pFd)).FpMethods)).FiVersion >= int32(3) {
rc = _sqlite3OsFetch(tls, pFd, 0, int32((*TSorterFile)(unsafe.Pointer(pFile)).FiEof), pp)
}
}
return rc
}
// C documentation
//
// /*
// ** Attach PmaReader pReadr to file pFile (if it is not already attached to
// ** that file) and seek it to offset iOff within the file. Return SQLITE_OK
// ** if successful, or an SQLite error code if an error occurs.
// */
func _vdbePmaReaderSeek(tls *libc.TLS, pTask uintptr, pReadr uintptr, pFile uintptr, iOff Ti64) (r int32) {
var iBuf, nRead, pgsz, rc int32
_, _, _, _ = iBuf, nRead, pgsz, rc
rc = SQLITE_OK
if _sqlite3FaultSim(tls, int32(201)) != 0 {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(1)< (*TPmaReader)(unsafe.Pointer(pReadr)).FiEof {
nRead = int32((*TPmaReader)(unsafe.Pointer(pReadr)).FiEof - (*TPmaReader)(unsafe.Pointer(pReadr)).FiReadOff)
}
rc = _sqlite3OsRead(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FpFd, (*TPmaReader)(unsafe.Pointer(pReadr)).FaBuffer+uintptr(iBuf), nRead, (*TPmaReader)(unsafe.Pointer(pReadr)).FiReadOff)
}
}
return rc
}
// C documentation
//
// /*
// ** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if
// ** no error occurs, or an SQLite error code if one does.
// */
func _vdbePmaReaderNext(tls *libc.TLS, pReadr uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var bEof, rc int32
var pIncr uintptr
var _ /* nRec at bp+0 */ Tu64
_, _, _ = bEof, pIncr, rc
rc = SQLITE_OK /* Return Code */
*(*Tu64)(unsafe.Pointer(bp)) = uint64(0) /* Size of record in bytes */
if (*TPmaReader)(unsafe.Pointer(pReadr)).FiReadOff >= (*TPmaReader)(unsafe.Pointer(pReadr)).FiEof {
pIncr = (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr
bEof = int32(1)
if pIncr != 0 {
rc = _vdbeIncrSwap(tls, pIncr)
if rc == SQLITE_OK && (*TIncrMerger)(unsafe.Pointer(pIncr)).FbEof == 0 {
rc = _vdbePmaReaderSeek(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask, pReadr, pIncr+40, (*TIncrMerger)(unsafe.Pointer(pIncr)).FiStartOff)
bEof = 0
}
}
if bEof != 0 {
/* This is an EOF condition */
_vdbePmaReaderClear(tls, pReadr)
return rc
}
}
if rc == SQLITE_OK {
rc = _vdbePmaReadVarint(tls, pReadr, bp)
}
if rc == SQLITE_OK {
(*TPmaReader)(unsafe.Pointer(pReadr)).FnKey = int32(*(*Tu64)(unsafe.Pointer(bp)))
rc = _vdbePmaReadBlob(tls, pReadr, int32(*(*Tu64)(unsafe.Pointer(bp))), pReadr+40)
}
return rc
}
// C documentation
//
// /*
// ** Initialize PmaReader pReadr to scan through the PMA stored in file pFile
// ** starting at offset iStart and ending at offset iEof-1. This function
// ** leaves the PmaReader pointing to the first key in the PMA (or EOF if the
// ** PMA is empty).
// **
// ** If the pnByte parameter is NULL, then it is assumed that the file
// ** contains a single PMA, and that that PMA omits the initial length varint.
// */
func _vdbePmaReaderInit(tls *libc.TLS, pTask uintptr, pFile uintptr, iStart Ti64, pReadr uintptr, pnByte uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var p1 uintptr
var _ /* nByte at bp+0 */ Tu64
_, _ = rc, p1
rc = _vdbePmaReaderSeek(tls, pTask, pReadr, pFile, iStart)
if rc == SQLITE_OK {
*(*Tu64)(unsafe.Pointer(bp)) = uint64(0) /* Size of PMA in bytes */
rc = _vdbePmaReadVarint(tls, pReadr, bp)
(*TPmaReader)(unsafe.Pointer(pReadr)).FiEof = int64(uint64((*TPmaReader)(unsafe.Pointer(pReadr)).FiReadOff) + *(*Tu64)(unsafe.Pointer(bp)))
p1 = pnByte
*(*Ti64)(unsafe.Pointer(p1)) = Ti64(uint64(*(*Ti64)(unsafe.Pointer(p1))) + *(*Tu64)(unsafe.Pointer(bp)))
}
if rc == SQLITE_OK {
rc = _vdbePmaReaderNext(tls, pReadr)
}
return rc
}
// C documentation
//
// /*
// ** A version of vdbeSorterCompare() that assumes that it has already been
// ** determined that the first field of key1 is equal to the first field of
// ** key2.
// */
func _vdbeSorterCompareTail(tls *libc.TLS, pTask uintptr, pbKey2Cached uintptr, pKey1 uintptr, nKey1 int32, pKey2 uintptr, nKey2 int32) (r int32) {
var r2 uintptr
_ = r2
r2 = (*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked
if *(*int32)(unsafe.Pointer(pbKey2Cached)) == 0 {
_sqlite3VdbeRecordUnpack(tls, (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo, nKey2, pKey2, r2)
*(*int32)(unsafe.Pointer(pbKey2Cached)) = int32(1)
}
return _sqlite3VdbeRecordCompareWithSkip(tls, nKey1, pKey1, r2, int32(1))
}
// C documentation
//
// /*
// ** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2,
// ** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences
// ** used by the comparison. Return the result of the comparison.
// **
// ** If IN/OUT parameter *pbKey2Cached is true when this function is called,
// ** it is assumed that (pTask->pUnpacked) contains the unpacked version
// ** of key2. If it is false, (pTask->pUnpacked) is populated with the unpacked
// ** version of key2 and *pbKey2Cached set to true before returning.
// **
// ** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set
// ** to SQLITE_NOMEM.
// */
func _vdbeSorterCompare(tls *libc.TLS, pTask uintptr, pbKey2Cached uintptr, pKey1 uintptr, nKey1 int32, pKey2 uintptr, nKey2 int32) (r int32) {
var r2 uintptr
_ = r2
r2 = (*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked
if !(*(*int32)(unsafe.Pointer(pbKey2Cached)) != 0) {
_sqlite3VdbeRecordUnpack(tls, (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo, nKey2, pKey2, r2)
*(*int32)(unsafe.Pointer(pbKey2Cached)) = int32(1)
}
return _sqlite3VdbeRecordCompare(tls, nKey1, pKey1, r2)
}
// C documentation
//
// /*
// ** A specially optimized version of vdbeSorterCompare() that assumes that
// ** the first field of each key is a TEXT value and that the collation
// ** sequence to compare them with is BINARY.
// */
func _vdbeSorterCompareText(tls *libc.TLS, pTask uintptr, pbKey2Cached uintptr, pKey1 uintptr, nKey1 int32, pKey2 uintptr, nKey2 int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var p1, p2, v1, v2 uintptr
var res, v11 int32
var _ /* n1 at bp+0 */ int32
var _ /* n2 at bp+4 */ int32
_, _, _, _, _, _ = p1, p2, res, v1, v2, v11
p1 = pKey1
p2 = pKey2
v1 = p1 + uintptr(*(*Tu8)(unsafe.Pointer(p1))) /* Pointer to value 1 */
v2 = p2 + uintptr(*(*Tu8)(unsafe.Pointer(p2)))
*(*int32)(unsafe.Pointer(bp)) = int32(uint32(*(*Tu8)(unsafe.Pointer(p1 + 1))))
if *(*int32)(unsafe.Pointer(bp)) >= int32(0x80) {
_sqlite3GetVarint32(tls, p1+1, bp)
}
*(*int32)(unsafe.Pointer(bp + 4)) = int32(uint32(*(*Tu8)(unsafe.Pointer(p2 + 1))))
if *(*int32)(unsafe.Pointer(bp + 4)) >= int32(0x80) {
_sqlite3GetVarint32(tls, p2+1, bp+4)
}
if *(*int32)(unsafe.Pointer(bp)) < *(*int32)(unsafe.Pointer(bp + 4)) {
v11 = *(*int32)(unsafe.Pointer(bp))
} else {
v11 = *(*int32)(unsafe.Pointer(bp + 4))
}
res = libc.Xmemcmp(tls, v1, v2, uint64((v11-int32(13))/int32(2)))
if res == 0 {
res = *(*int32)(unsafe.Pointer(bp)) - *(*int32)(unsafe.Pointer(bp + 4))
}
if res == 0 {
if int32((*TKeyInfo)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)).FnKeyField) > int32(1) {
res = _vdbeSorterCompareTail(tls, pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2)
}
} else {
if *(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)).FaSortFlags)) != 0 {
res = res * -int32(1)
}
}
return res
}
// C documentation
//
// /*
// ** A specially optimized version of vdbeSorterCompare() that assumes that
// ** the first field of each key is an INTEGER value.
// */
func _vdbeSorterCompareInt(tls *libc.TLS, pTask uintptr, pbKey2Cached uintptr, pKey1 uintptr, nKey1 int32, pKey2 uintptr, nKey2 int32) (r int32) {
var i, res, s1, s2, v21, v3 int32
var n Tu8
var p1, p2, v1, v2 uintptr
_, _, _, _, _, _, _, _, _, _, _ = i, n, p1, p2, res, s1, s2, v1, v2, v21, v3
p1 = pKey1
p2 = pKey2
s1 = int32(*(*Tu8)(unsafe.Pointer(p1 + 1))) /* Left hand serial type */
s2 = int32(*(*Tu8)(unsafe.Pointer(p2 + 1))) /* Right hand serial type */
v1 = p1 + uintptr(*(*Tu8)(unsafe.Pointer(p1))) /* Pointer to value 1 */
v2 = p2 + uintptr(*(*Tu8)(unsafe.Pointer(p2))) /* Return value */
if s1 == s2 {
n = _aLen[s1]
res = 0
i = 0
for {
if !(i < int32(n)) {
break
}
v21 = int32(*(*Tu8)(unsafe.Pointer(v1 + uintptr(i)))) - int32(*(*Tu8)(unsafe.Pointer(v2 + uintptr(i))))
res = v21
if v21 != 0 {
if (int32(*(*Tu8)(unsafe.Pointer(v1)))^int32(*(*Tu8)(unsafe.Pointer(v2))))&int32(0x80) != 0 {
if int32(*(*Tu8)(unsafe.Pointer(v1)))&int32(0x80) != 0 {
v3 = -int32(1)
} else {
v3 = +libc.Int32FromInt32(1)
}
res = v3
}
break
}
goto _1
_1:
;
i++
}
} else {
if s1 > int32(7) && s2 > int32(7) {
res = s1 - s2
} else {
if s2 > int32(7) {
res = +libc.Int32FromInt32(1)
} else {
if s1 > int32(7) {
res = -int32(1)
} else {
res = s1 - s2
}
}
if res > 0 {
if int32(*(*Tu8)(unsafe.Pointer(v1)))&int32(0x80) != 0 {
res = -int32(1)
}
} else {
if int32(*(*Tu8)(unsafe.Pointer(v2)))&int32(0x80) != 0 {
res = +libc.Int32FromInt32(1)
}
}
}
}
if res == 0 {
if int32((*TKeyInfo)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)).FnKeyField) > int32(1) {
res = _vdbeSorterCompareTail(tls, pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2)
}
} else {
if *(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)).FaSortFlags)) != 0 {
res = res * -int32(1)
}
}
return res
}
/* The two values have the same sign. Compare using memcmp(). */
var _aLen = [10]Tu8{
1: uint8(1),
2: uint8(2),
3: uint8(3),
4: uint8(4),
5: uint8(6),
6: uint8(8),
}
// C documentation
//
// /*
// ** Initialize the temporary index cursor just opened as a sorter cursor.
// **
// ** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nKeyField)
// ** to determine the number of fields that should be compared from the
// ** records being sorted. However, if the value passed as argument nField
// ** is non-zero and the sorter is able to guarantee a stable sort, nField
// ** is used instead. This is used when sorting records for a CREATE INDEX
// ** statement. In this case, keys are always delivered to the sorter in
// ** order of the primary key, which happens to be make up the final part
// ** of the records being sorted. So if the sort is stable, there is never
// ** any reason to compare PK fields and they can be ignored for a small
// ** performance boost.
// **
// ** The sorter can guarantee a stable sort when running in single-threaded
// ** mode, but not in multi-threaded mode.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _sqlite3VdbeSorterInit(tls *libc.TLS, db uintptr, nField int32, pCsr uintptr) (r int32) {
var i, nWorker, pgsz, rc, sz, szKeyInfo, v2, v5 int32
var mxCache Ti64
var pBt, pKeyInfo, pSorter, pTask, v1 uintptr
var szPma Tu32
var v4 int64
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = i, mxCache, nWorker, pBt, pKeyInfo, pSorter, pTask, pgsz, rc, sz, szKeyInfo, szPma, v1, v2, v4, v5 /* Size of pSorter in bytes */
rc = SQLITE_OK
/* Initialize the upper limit on the number of worker threads */
if _sqlite3TempInMemory(tls, db) != 0 || int32(_sqlite3Config.FbCoreMutex) == 0 {
nWorker = 0
} else {
nWorker = *(*int32)(unsafe.Pointer(db + 136 + 11*4))
}
/* Do not allow the total number of threads (main thread + all workers)
** to exceed the maximum merge count */
szKeyInfo = int32(uint64(40) + uint64(int32((*TKeyInfo)(unsafe.Pointer((*TVdbeCursor)(unsafe.Pointer(pCsr)).FpKeyInfo)).FnKeyField)-libc.Int32FromInt32(1))*uint64(8))
sz = int32(uint64(192) + uint64(nWorker)*uint64(96))
pSorter = _sqlite3DbMallocZero(tls, db, uint64(sz+szKeyInfo))
*(*uintptr)(unsafe.Pointer(pCsr + 48)) = pSorter
if pSorter == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
pBt = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FpBt
v1 = pSorter + uintptr(sz)
pKeyInfo = v1
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpKeyInfo = v1
libc.Xmemcpy(tls, pKeyInfo, (*TVdbeCursor)(unsafe.Pointer(pCsr)).FpKeyInfo, uint64(szKeyInfo))
(*TKeyInfo)(unsafe.Pointer(pKeyInfo)).Fdb = uintptr(0)
if nField != 0 && nWorker == 0 {
(*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnKeyField = uint16(nField)
}
_sqlite3BtreeEnter(tls, pBt)
v2 = _sqlite3BtreeGetPageSize(tls, pBt)
pgsz = v2
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Fpgsz = v2
_sqlite3BtreeLeave(tls, pBt)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask = uint8(nWorker + int32(1))
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FiPrev = uint8(nWorker - libc.Int32FromInt32(1))
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUseThreads = libc.BoolUint8(int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask) > libc.Int32FromInt32(1))
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Fdb = db
i = 0
for {
if !(i < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)) {
break
}
pTask = pSorter + 96 + uintptr(i)*96
(*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter = pSorter
goto _3
_3:
;
i++
}
if !(_sqlite3TempInMemory(tls, db) != 0) { /* Cache size in bytes*/
szPma = _sqlite3Config.FszPma
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FmnPmaSize = int32(szPma * uint32(pgsz))
mxCache = int64((*TSchema)(unsafe.Pointer((*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FpSchema)).Fcache_size)
if mxCache < 0 {
/* A negative cache-size value C indicates that the cache is abs(C)
** KiB in size. */
mxCache = mxCache * int64(-int32(1024))
} else {
mxCache = mxCache * int64(pgsz)
}
if mxCache < int64(libc.Int32FromInt32(1)< int32(mxCache) {
v5 = (*TVdbeSorter)(unsafe.Pointer(pSorter)).FmnPmaSize
} else {
v5 = int32(mxCache)
}
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize = v5
/* Avoid large memory allocations if the application has requested
** SQLITE_CONFIG_SMALL_MALLOC. */
if int32(_sqlite3Config.FbSmallMalloc) == 0 {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory = pgsz
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory = _sqlite3Malloc(tls, uint64(pgsz))
if !((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory != 0) {
rc = int32(SQLITE_NOMEM)
}
}
}
if int32((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FnAllField) < int32(13) && (*(*uintptr)(unsafe.Pointer(pKeyInfo + 32)) == uintptr(0) || *(*uintptr)(unsafe.Pointer(pKeyInfo + 32)) == (*Tsqlite3)(unsafe.Pointer(db)).FpDfltColl) && int32(*(*Tu8)(unsafe.Pointer((*TKeyInfo)(unsafe.Pointer(pKeyInfo)).FaSortFlags)))&int32(KEYINFO_ORDER_BIGNULL) == 0 {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FtypeMask = uint8(libc.Int32FromInt32(SORTER_TYPE_INTEGER) | libc.Int32FromInt32(SORTER_TYPE_TEXT))
}
}
return rc
}
// C documentation
//
// /*
// ** Free the list of sorted records starting at pRecord.
// */
func _vdbeSorterRecordFree(tls *libc.TLS, db uintptr, pRecord uintptr) {
var p, pNext uintptr
_, _ = p, pNext
p = pRecord
for {
if !(p != 0) {
break
}
pNext = *(*uintptr)(unsafe.Pointer(p + 8))
_sqlite3DbFree(tls, db, p)
goto _1
_1:
;
p = pNext
}
}
// C documentation
//
// /*
// ** Free all resources owned by the object indicated by argument pTask. All
// ** fields of *pTask are zeroed before returning.
// */
func _vdbeSortSubtaskCleanup(tls *libc.TLS, db uintptr, pTask uintptr) {
_sqlite3DbFree(tls, db, (*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked)
/* pTask->list.aMemory can only be non-zero if it was handed memory
** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */
if (*TSortSubtask)(unsafe.Pointer(pTask)).Flist.FaMemory != 0 {
Xsqlite3_free(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).Flist.FaMemory)
} else {
_vdbeSorterRecordFree(tls, uintptr(0), (*TSortSubtask)(unsafe.Pointer(pTask)).Flist.FpList)
}
if (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FpFd != 0 {
_sqlite3OsCloseFree(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FpFd)
}
if (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FpFd != 0 {
_sqlite3OsCloseFree(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FpFd)
}
libc.Xmemset(tls, pTask, 0, uint64(96))
}
// C documentation
//
// /*
// ** Join thread pTask->thread.
// */
func _vdbeSorterJoinThread(tls *libc.TLS, pTask uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* pRet at bp+0 */ uintptr
_ = rc
rc = SQLITE_OK
if (*TSortSubtask)(unsafe.Pointer(pTask)).FpThread != 0 {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(int64(libc.Int32FromInt32(SQLITE_ERROR)))
_sqlite3ThreadJoin(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).FpThread, bp)
rc = int32(int64(*(*uintptr)(unsafe.Pointer(bp))))
(*TSortSubtask)(unsafe.Pointer(pTask)).FbDone = 0
(*TSortSubtask)(unsafe.Pointer(pTask)).FpThread = uintptr(0)
}
return rc
}
// C documentation
//
// /*
// ** Launch a background thread to run xTask(pIn).
// */
func _vdbeSorterCreateThread(tls *libc.TLS, pTask uintptr, xTask uintptr, pIn uintptr) (r int32) {
return _sqlite3ThreadCreate(tls, pTask, xTask, pIn)
}
// C documentation
//
// /*
// ** Join all outstanding threads launched by SorterWrite() to create
// ** level-0 PMAs.
// */
func _vdbeSorterJoinAll(tls *libc.TLS, pSorter uintptr, rcin int32) (r int32) {
var i, rc, rc2 int32
var pTask uintptr
_, _, _, _ = i, pTask, rc, rc2
rc = rcin
/* This function is always called by the main user thread.
**
** If this function is being called after SorterRewind() has been called,
** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread
** is currently attempt to join one of the other threads. To avoid a race
** condition where this thread also attempts to join the same object, join
** thread pSorter->aTask[pSorter->nTask-1].pThread first. */
i = int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask) - int32(1)
for {
if !(i >= 0) {
break
}
pTask = pSorter + 96 + uintptr(i)*96
rc2 = _vdbeSorterJoinThread(tls, pTask)
if rc == SQLITE_OK {
rc = rc2
}
goto _1
_1:
;
i--
}
return rc
}
// C documentation
//
// /*
// ** Allocate a new MergeEngine object capable of handling up to
// ** nReader PmaReader inputs.
// **
// ** nReader is automatically rounded up to the next power of two.
// ** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up.
// */
func _vdbeMergeEngineNew(tls *libc.TLS, nReader int32) (r uintptr) {
var N, nByte int32
var pNew, v1 uintptr
_, _, _, _ = N, nByte, pNew, v1
N = int32(2) /* Pointer to allocated object to return */
for N < nReader {
N += N
}
nByte = int32(uint64(32) + uint64(N)*(libc.Uint64FromInt64(4)+libc.Uint64FromInt64(80)))
if _sqlite3FaultSim(tls, int32(100)) != 0 {
v1 = uintptr(0)
} else {
v1 = _sqlite3MallocZero(tls, uint64(nByte))
}
pNew = v1
if pNew != 0 {
(*TMergeEngine)(unsafe.Pointer(pNew)).FnTree = N
(*TMergeEngine)(unsafe.Pointer(pNew)).FpTask = uintptr(0)
(*TMergeEngine)(unsafe.Pointer(pNew)).FaReadr = pNew + 1*32
(*TMergeEngine)(unsafe.Pointer(pNew)).FaTree = (*TMergeEngine)(unsafe.Pointer(pNew)).FaReadr + uintptr(N)*80
}
return pNew
}
// C documentation
//
// /*
// ** Free the MergeEngine object passed as the only argument.
// */
func _vdbeMergeEngineFree(tls *libc.TLS, pMerger uintptr) {
var i int32
_ = i
if pMerger != 0 {
i = 0
for {
if !(i < (*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree) {
break
}
_vdbePmaReaderClear(tls, (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr+uintptr(i)*80)
goto _1
_1:
;
i++
}
}
Xsqlite3_free(tls, pMerger)
}
// C documentation
//
// /*
// ** Free all resources associated with the IncrMerger object indicated by
// ** the first argument.
// */
func _vdbeIncrFree(tls *libc.TLS, pIncr uintptr) {
if pIncr != 0 {
if (*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread != 0 {
_vdbeSorterJoinThread(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask)
if (*(*TSorterFile)(unsafe.Pointer(pIncr + 40))).FpFd != 0 {
_sqlite3OsCloseFree(tls, (*(*TSorterFile)(unsafe.Pointer(pIncr + 40))).FpFd)
}
if (*(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16))).FpFd != 0 {
_sqlite3OsCloseFree(tls, (*(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16))).FpFd)
}
}
_vdbeMergeEngineFree(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpMerger)
Xsqlite3_free(tls, pIncr)
}
}
// C documentation
//
// /*
// ** Reset a sorting cursor back to its original empty state.
// */
func _sqlite3VdbeSorterReset(tls *libc.TLS, db uintptr, pSorter uintptr) {
var i int32
var pTask uintptr
_, _ = i, pTask
_vdbeSorterJoinAll(tls, pSorter, SQLITE_OK)
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader != 0 {
_vdbePmaReaderClear(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader)
_sqlite3DbFree(tls, db, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader = uintptr(0)
}
_vdbeMergeEngineFree(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger = uintptr(0)
i = 0
for {
if !(i < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)) {
break
}
pTask = pSorter + 96 + uintptr(i)*96
_vdbeSortSubtaskCleanup(tls, db, pTask)
(*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter = pSorter
goto _1
_1:
;
i++
}
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory == uintptr(0) {
_vdbeSorterRecordFree(tls, uintptr(0), (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList)
}
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList = uintptr(0)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA = 0
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUsePMA = uint8(0)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory = 0
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxKeysize = 0
_sqlite3DbFree(tls, db, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpUnpacked)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpUnpacked = uintptr(0)
}
// C documentation
//
// /*
// ** Free any cursor components allocated by sqlite3VdbeSorterXXX routines.
// */
func _sqlite3VdbeSorterClose(tls *libc.TLS, db uintptr, pCsr uintptr) {
var pSorter uintptr
_ = pSorter
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
if pSorter != 0 {
_sqlite3VdbeSorterReset(tls, db, pSorter)
Xsqlite3_free(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory)
_sqlite3DbFree(tls, db, pSorter)
*(*uintptr)(unsafe.Pointer(pCsr + 48)) = uintptr(0)
}
}
// C documentation
//
// /*
// ** The first argument is a file-handle open on a temporary file. The file
// ** is guaranteed to be nByte bytes or smaller in size. This function
// ** attempts to extend the file to nByte bytes in size and to ensure that
// ** the VFS has memory mapped it.
// **
// ** Whether or not the file does end up memory mapped of course depends on
// ** the specific VFS implementation.
// */
func _vdbeSorterExtendFile(tls *libc.TLS, db uintptr, pFd uintptr, _nByte Ti64) {
bp := tls.Alloc(32)
defer tls.Free(32)
*(*Ti64)(unsafe.Pointer(bp)) = _nByte
var _ /* chunksize at bp+16 */ int32
var _ /* p at bp+8 */ uintptr
if *(*Ti64)(unsafe.Pointer(bp)) <= int64((*Tsqlite3)(unsafe.Pointer(db)).FnMaxSorterMmap) && (*Tsqlite3_io_methods1)(unsafe.Pointer((*Tsqlite3_file)(unsafe.Pointer(pFd)).FpMethods)).FiVersion >= int32(3) {
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
*(*int32)(unsafe.Pointer(bp + 16)) = libc.Int32FromInt32(4) * libc.Int32FromInt32(1024)
_sqlite3OsFileControlHint(tls, pFd, int32(SQLITE_FCNTL_CHUNK_SIZE), bp+16)
_sqlite3OsFileControlHint(tls, pFd, int32(SQLITE_FCNTL_SIZE_HINT), bp)
_sqlite3OsFetch(tls, pFd, 0, int32(*(*Ti64)(unsafe.Pointer(bp))), bp+8)
if *(*uintptr)(unsafe.Pointer(bp + 8)) != 0 {
_sqlite3OsUnfetch(tls, pFd, 0, *(*uintptr)(unsafe.Pointer(bp + 8)))
}
}
}
// C documentation
//
// /*
// ** Allocate space for a file-handle and open a temporary file. If successful,
// ** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK.
// ** Otherwise, set *ppFd to 0 and return an SQLite error code.
// */
func _vdbeSorterOpenTempFile(tls *libc.TLS, db uintptr, nExtend Ti64, ppFd uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* max at bp+8 */ Ti64
var _ /* rc at bp+0 */ int32
if _sqlite3FaultSim(tls, int32(202)) != 0 {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(13)< 0 {
_vdbeSorterExtendFile(tls, db, *(*uintptr)(unsafe.Pointer(ppFd)), nExtend)
}
}
return *(*int32)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** If it has not already been allocated, allocate the UnpackedRecord
// ** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or
// ** if no allocation was required), or SQLITE_NOMEM otherwise.
// */
func _vdbeSortAllocUnpacked(tls *libc.TLS, pTask uintptr) (r int32) {
if (*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked == uintptr(0) {
(*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked = _sqlite3VdbeAllocUnpackedRecord(tls, (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)
if (*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked == uintptr(0) {
return int32(SQLITE_NOMEM)
}
(*TUnpackedRecord)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked)).FnField = (*TKeyInfo)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FpKeyInfo)).FnKeyField
(*TUnpackedRecord)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked)).FerrCode = uint8(0)
}
return SQLITE_OK
}
// C documentation
//
// /*
// ** Merge the two sorted lists p1 and p2 into a single list.
// */
func _vdbeSorterMerge(tls *libc.TLS, pTask uintptr, p1 uintptr, p2 uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pp uintptr
var res int32
var _ /* bCached at bp+8 */ int32
var _ /* pFinal at bp+0 */ uintptr
_, _ = pp, res
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
pp = bp
*(*int32)(unsafe.Pointer(bp + 8)) = 0
for {
res = (*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*TSortSubtask)(unsafe.Pointer(pTask)).FxCompare})))(tls, pTask, bp+8, p1+libc.UintptrFromInt32(1)*16, (*TSorterRecord)(unsafe.Pointer(p1)).FnVal, p2+libc.UintptrFromInt32(1)*16, (*TSorterRecord)(unsafe.Pointer(p2)).FnVal)
if res <= 0 {
*(*uintptr)(unsafe.Pointer(pp)) = p1
pp = p1 + 8
p1 = *(*uintptr)(unsafe.Pointer(p1 + 8))
if p1 == uintptr(0) {
*(*uintptr)(unsafe.Pointer(pp)) = p2
break
}
} else {
*(*uintptr)(unsafe.Pointer(pp)) = p2
pp = p2 + 8
p2 = *(*uintptr)(unsafe.Pointer(p2 + 8))
*(*int32)(unsafe.Pointer(bp + 8)) = 0
if p2 == uintptr(0) {
*(*uintptr)(unsafe.Pointer(pp)) = p1
break
}
}
goto _1
_1:
}
return *(*uintptr)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Return the SorterCompare function to compare values collected by the
// ** sorter object passed as the only argument.
// */
func _vdbeSorterGetCompare(tls *libc.TLS, p uintptr) (r TSorterCompare) {
if int32((*TVdbeSorter)(unsafe.Pointer(p)).FtypeMask) == int32(SORTER_TYPE_INTEGER) {
return __ccgo_fp(_vdbeSorterCompareInt)
} else {
if int32((*TVdbeSorter)(unsafe.Pointer(p)).FtypeMask) == int32(SORTER_TYPE_TEXT) {
return __ccgo_fp(_vdbeSorterCompareText)
}
}
return __ccgo_fp(_vdbeSorterCompare)
}
// C documentation
//
// /*
// ** Sort the linked list of records headed at pTask->pList. Return
// ** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if
// ** an error occurs.
// */
func _vdbeSorterSort(tls *libc.TLS, pTask uintptr, pList uintptr) (r int32) {
bp := tls.Alloc(512)
defer tls.Free(512)
var i, rc int32
var p, pNext, v3 uintptr
var _ /* aSlot at bp+0 */ [64]uintptr
_, _, _, _, _ = i, p, pNext, rc, v3
rc = _vdbeSortAllocUnpacked(tls, pTask)
if rc != SQLITE_OK {
return rc
}
p = (*TSorterList)(unsafe.Pointer(pList)).FpList
(*TSortSubtask)(unsafe.Pointer(pTask)).FxCompare = _vdbeSorterGetCompare(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)
libc.Xmemset(tls, bp, 0, uint64(512))
for p != 0 {
if (*TSorterList)(unsafe.Pointer(pList)).FaMemory != 0 {
if p == (*TSorterList)(unsafe.Pointer(pList)).FaMemory {
pNext = uintptr(0)
} else {
pNext = (*TSorterList)(unsafe.Pointer(pList)).FaMemory + uintptr(*(*int32)(unsafe.Pointer(&(*TSorterRecord)(unsafe.Pointer(p)).Fu)))
}
} else {
pNext = *(*uintptr)(unsafe.Pointer(p + 8))
}
*(*uintptr)(unsafe.Pointer(p + 8)) = uintptr(0)
i = 0
for {
if !((*(*[64]uintptr)(unsafe.Pointer(bp)))[i] != 0) {
break
}
p = _vdbeSorterMerge(tls, pTask, p, (*(*[64]uintptr)(unsafe.Pointer(bp)))[i])
(*(*[64]uintptr)(unsafe.Pointer(bp)))[i] = uintptr(0)
goto _1
_1:
;
i++
}
(*(*[64]uintptr)(unsafe.Pointer(bp)))[i] = p
p = pNext
}
p = uintptr(0)
i = 0
for {
if !(i < int32(libc.Uint64FromInt64(512)/libc.Uint64FromInt64(8))) {
break
}
if (*(*[64]uintptr)(unsafe.Pointer(bp)))[i] == uintptr(0) {
goto _2
}
if p != 0 {
v3 = _vdbeSorterMerge(tls, pTask, p, (*(*[64]uintptr)(unsafe.Pointer(bp)))[i])
} else {
v3 = (*(*[64]uintptr)(unsafe.Pointer(bp)))[i]
}
p = v3
goto _2
_2:
;
i++
}
(*TSorterList)(unsafe.Pointer(pList)).FpList = p
return int32((*TUnpackedRecord)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked)).FerrCode)
}
// C documentation
//
// /*
// ** Initialize a PMA-writer object.
// */
func _vdbePmaWriterInit(tls *libc.TLS, pFd uintptr, p uintptr, nBuf int32, iStart Ti64) {
var v1 int32
_ = v1
libc.Xmemset(tls, p, 0, uint64(48))
(*TPmaWriter)(unsafe.Pointer(p)).FaBuffer = _sqlite3Malloc(tls, uint64(nBuf))
if !((*TPmaWriter)(unsafe.Pointer(p)).FaBuffer != 0) {
(*TPmaWriter)(unsafe.Pointer(p)).FeFWErr = int32(SQLITE_NOMEM)
} else {
v1 = int32(iStart % int64(nBuf))
(*TPmaWriter)(unsafe.Pointer(p)).FiBufStart = v1
(*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd = v1
(*TPmaWriter)(unsafe.Pointer(p)).FiWriteOff = iStart - int64((*TPmaWriter)(unsafe.Pointer(p)).FiBufStart)
(*TPmaWriter)(unsafe.Pointer(p)).FnBuffer = nBuf
(*TPmaWriter)(unsafe.Pointer(p)).FpFd = pFd
}
}
// C documentation
//
// /*
// ** Write nData bytes of data to the PMA. Return SQLITE_OK
// ** if successful, or an SQLite error code if an error occurs.
// */
func _vdbePmaWriteBlob(tls *libc.TLS, p uintptr, pData uintptr, nData int32) {
var nCopy, nRem, v1 int32
_, _, _ = nCopy, nRem, v1
nRem = nData
for nRem > 0 && (*TPmaWriter)(unsafe.Pointer(p)).FeFWErr == 0 {
nCopy = nRem
if nCopy > (*TPmaWriter)(unsafe.Pointer(p)).FnBuffer-(*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd {
nCopy = (*TPmaWriter)(unsafe.Pointer(p)).FnBuffer - (*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd
}
libc.Xmemcpy(tls, (*TPmaWriter)(unsafe.Pointer(p)).FaBuffer+uintptr((*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd), pData+uintptr(nData-nRem), uint64(nCopy))
*(*int32)(unsafe.Pointer(p + 24)) += nCopy
if (*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd == (*TPmaWriter)(unsafe.Pointer(p)).FnBuffer {
(*TPmaWriter)(unsafe.Pointer(p)).FeFWErr = _sqlite3OsWrite(tls, (*TPmaWriter)(unsafe.Pointer(p)).FpFd, (*TPmaWriter)(unsafe.Pointer(p)).FaBuffer+uintptr((*TPmaWriter)(unsafe.Pointer(p)).FiBufStart), (*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd-(*TPmaWriter)(unsafe.Pointer(p)).FiBufStart, (*TPmaWriter)(unsafe.Pointer(p)).FiWriteOff+int64((*TPmaWriter)(unsafe.Pointer(p)).FiBufStart))
v1 = libc.Int32FromInt32(0)
(*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd = v1
(*TPmaWriter)(unsafe.Pointer(p)).FiBufStart = v1
*(*Ti64)(unsafe.Pointer(p + 32)) += int64((*TPmaWriter)(unsafe.Pointer(p)).FnBuffer)
}
nRem -= nCopy
}
}
// C documentation
//
// /*
// ** Flush any buffered data to disk and clean up the PMA-writer object.
// ** The results of using the PMA-writer after this call are undefined.
// ** Return SQLITE_OK if flushing the buffered data succeeds or is not
// ** required. Otherwise, return an SQLite error code.
// **
// ** Before returning, set *piEof to the offset immediately following the
// ** last byte written to the file.
// */
func _vdbePmaWriterFinish(tls *libc.TLS, p uintptr, piEof uintptr) (r int32) {
var rc int32
_ = rc
if (*TPmaWriter)(unsafe.Pointer(p)).FeFWErr == 0 && (*TPmaWriter)(unsafe.Pointer(p)).FaBuffer != 0 && (*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd > (*TPmaWriter)(unsafe.Pointer(p)).FiBufStart {
(*TPmaWriter)(unsafe.Pointer(p)).FeFWErr = _sqlite3OsWrite(tls, (*TPmaWriter)(unsafe.Pointer(p)).FpFd, (*TPmaWriter)(unsafe.Pointer(p)).FaBuffer+uintptr((*TPmaWriter)(unsafe.Pointer(p)).FiBufStart), (*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd-(*TPmaWriter)(unsafe.Pointer(p)).FiBufStart, (*TPmaWriter)(unsafe.Pointer(p)).FiWriteOff+int64((*TPmaWriter)(unsafe.Pointer(p)).FiBufStart))
}
*(*Ti64)(unsafe.Pointer(piEof)) = (*TPmaWriter)(unsafe.Pointer(p)).FiWriteOff + int64((*TPmaWriter)(unsafe.Pointer(p)).FiBufEnd)
Xsqlite3_free(tls, (*TPmaWriter)(unsafe.Pointer(p)).FaBuffer)
rc = (*TPmaWriter)(unsafe.Pointer(p)).FeFWErr
libc.Xmemset(tls, p, 0, uint64(48))
return rc
}
// C documentation
//
// /*
// ** Write value iVal encoded as a varint to the PMA. Return
// ** SQLITE_OK if successful, or an SQLite error code if an error occurs.
// */
func _vdbePmaWriteVarint(tls *libc.TLS, p uintptr, iVal Tu64) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nByte int32
var _ /* aByte at bp+0 */ [10]Tu8
_ = nByte
nByte = _sqlite3PutVarint(tls, bp, iVal)
_vdbePmaWriteBlob(tls, p, bp, nByte)
}
// C documentation
//
// /*
// ** Write the current contents of in-memory linked-list pList to a level-0
// ** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if
// ** successful, or an SQLite error code otherwise.
// **
// ** The format of a PMA is:
// **
// ** * A varint. This varint contains the total number of bytes of content
// ** in the PMA (not including the varint itself).
// **
// ** * One or more records packed end-to-end in order of ascending keys.
// ** Each record consists of a varint followed by a blob of data (the
// ** key). The varint is the number of bytes in the blob of data.
// */
func _vdbeSorterListToPMA(tls *libc.TLS, pTask uintptr, pList uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var db, p, pNext uintptr
var rc int32
var _ /* writer at bp+0 */ TPmaWriter
_, _, _, _ = db, p, pNext, rc
db = (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).Fdb
rc = SQLITE_OK /* Object used to write to the file */
libc.Xmemset(tls, bp, 0, uint64(48))
/* If the first temporary PMA file has not been opened, open it now. */
if (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FpFd == uintptr(0) {
rc = _vdbeSorterOpenTempFile(tls, db, 0, pTask+64)
}
/* Try to get the file to memory map */
if rc == SQLITE_OK {
_vdbeSorterExtendFile(tls, db, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FpFd, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FiEof+(*TSorterList)(unsafe.Pointer(pList)).FszPMA+int64(9))
}
/* Sort the list */
if rc == SQLITE_OK {
rc = _vdbeSorterSort(tls, pTask, pList)
}
if rc == SQLITE_OK {
pNext = uintptr(0)
_vdbePmaWriterInit(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FpFd, bp, (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).Fpgsz, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile.FiEof)
(*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA++
_vdbePmaWriteVarint(tls, bp, uint64((*TSorterList)(unsafe.Pointer(pList)).FszPMA))
p = (*TSorterList)(unsafe.Pointer(pList)).FpList
for {
if !(p != 0) {
break
}
pNext = *(*uintptr)(unsafe.Pointer(p + 8))
_vdbePmaWriteVarint(tls, bp, uint64((*TSorterRecord)(unsafe.Pointer(p)).FnVal))
_vdbePmaWriteBlob(tls, bp, p+libc.UintptrFromInt32(1)*16, (*TSorterRecord)(unsafe.Pointer(p)).FnVal)
if (*TSorterList)(unsafe.Pointer(pList)).FaMemory == uintptr(0) {
Xsqlite3_free(tls, p)
}
goto _1
_1:
;
p = pNext
}
(*TSorterList)(unsafe.Pointer(pList)).FpList = p
rc = _vdbePmaWriterFinish(tls, bp, pTask+64+8)
}
return rc
}
// C documentation
//
// /*
// ** Advance the MergeEngine to its next entry.
// ** Set *pbEof to true there is no next entry because
// ** the MergeEngine has reached the end of all its inputs.
// **
// ** Return SQLITE_OK if successful or an error code if an error occurs.
// */
func _vdbeMergeEngineStep(tls *libc.TLS, pMerger uintptr, pbEof uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, iPrev, iRes, rc, v2 int32
var pReadr1, pReadr2, pTask uintptr
var _ /* bCached at bp+0 */ int32
_, _, _, _, _, _, _, _ = i, iPrev, iRes, pReadr1, pReadr2, pTask, rc, v2
iPrev = *(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(pMerger)).FaTree + 1*4)) /* Index of PmaReader to advance */
pTask = (*TMergeEngine)(unsafe.Pointer(pMerger)).FpTask
/* Advance the current PmaReader */
rc = _vdbePmaReaderNext(tls, (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr+uintptr(iPrev)*80)
/* Update contents of aTree[] */
if rc == SQLITE_OK { /* Second PmaReader to compare */
*(*int32)(unsafe.Pointer(bp)) = 0
/* Find the first two PmaReaders to compare. The one that was just
** advanced (iPrev) and the one next to it in the array. */
pReadr1 = (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr + uintptr(iPrev&libc.Int32FromInt32(0xFFFE))*80
pReadr2 = (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr + uintptr(iPrev|libc.Int32FromInt32(0x0001))*80
i = ((*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree + iPrev) / int32(2)
for {
if !(i > 0) {
break
}
if (*TPmaReader)(unsafe.Pointer(pReadr1)).FpFd == uintptr(0) {
iRes = +libc.Int32FromInt32(1)
} else {
if (*TPmaReader)(unsafe.Pointer(pReadr2)).FpFd == uintptr(0) {
iRes = -int32(1)
} else {
iRes = (*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*TSortSubtask)(unsafe.Pointer(pTask)).FxCompare})))(tls, pTask, bp, (*TPmaReader)(unsafe.Pointer(pReadr1)).FaKey, (*TPmaReader)(unsafe.Pointer(pReadr1)).FnKey, (*TPmaReader)(unsafe.Pointer(pReadr2)).FaKey, (*TPmaReader)(unsafe.Pointer(pReadr2)).FnKey)
}
}
/* If pReadr1 contained the smaller value, set aTree[i] to its index.
** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this
** case there is no cache of pReadr2 in pTask->pUnpacked, so set
** pKey2 to point to the record belonging to pReadr2.
**
** Alternatively, if pReadr2 contains the smaller of the two values,
** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare()
** was actually called above, then pTask->pUnpacked now contains
** a value equivalent to pReadr2. So set pKey2 to NULL to prevent
** vdbeSorterCompare() from decoding pReadr2 again.
**
** If the two values were equal, then the value from the oldest
** PMA should be considered smaller. The VdbeSorter.aReadr[] array
** is sorted from oldest to newest, so pReadr1 contains older values
** than pReadr2 iff (pReadr1nTask-1) tasks. Except, if
** the background thread from a sub-tasks previous turn is still running,
** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy,
** fall back to using the final sub-task. The first (pSorter->nTask-1)
** sub-tasks are preferred as they use background threads - the final
** sub-task uses the main thread. */
i = 0
for {
if !(i < nWorker) {
break
}
iTest = (int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FiPrev) + i + int32(1)) % nWorker
pTask = pSorter + 96 + uintptr(iTest)*96
if (*TSortSubtask)(unsafe.Pointer(pTask)).FbDone != 0 {
rc = _vdbeSorterJoinThread(tls, pTask)
}
if rc != SQLITE_OK || (*TSortSubtask)(unsafe.Pointer(pTask)).FpThread == uintptr(0) {
break
}
goto _1
_1:
;
i++
}
if rc == SQLITE_OK {
if i == nWorker {
/* Use the foreground thread for this operation */
rc = _vdbeSorterListToPMA(tls, pSorter+96+uintptr(nWorker)*96, pSorter+56)
} else {
aMem = (*TSortSubtask)(unsafe.Pointer(pTask)).Flist.FaMemory
pCtx = pTask
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FiPrev = uint8((int64(pTask) - t__predefined_ptrdiff_t(pSorter+96)) / 96)
(*TSortSubtask)(unsafe.Pointer(pTask)).Flist = (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList = uintptr(0)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA = 0
if aMem != 0 {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory = aMem
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory = _sqlite3MallocSize(tls, aMem)
} else {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory != 0 {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory = _sqlite3Malloc(tls, uint64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory))
if !((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory != 0) {
return int32(SQLITE_NOMEM)
}
}
}
rc = _vdbeSorterCreateThread(tls, pTask, __ccgo_fp(_vdbeSorterFlushThread), pCtx)
}
}
return rc
}
// C documentation
//
// /*
// ** Add a record to the sorter.
// */
func _sqlite3VdbeSorterWrite(tls *libc.TLS, pCsr uintptr, pVal uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var aNew, pNew, pSorter, p1, p2, p3 uintptr
var bFlush, iListOff, nMin, rc int32
var nNew Tsqlite3_int64
var nPMA, nReq Ti64
var _ /* t at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _ = aNew, bFlush, iListOff, nMin, nNew, nPMA, nReq, pNew, pSorter, rc, p1, p2, p3
rc = SQLITE_OK /* serial type of first record field */
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
*(*int32)(unsafe.Pointer(bp)) = int32(uint32(*(*Tu8)(unsafe.Pointer((*TMem)(unsafe.Pointer(pVal)).Fz + 1))))
if *(*int32)(unsafe.Pointer(bp)) >= int32(0x80) {
_sqlite3GetVarint32(tls, (*TMem)(unsafe.Pointer(pVal)).Fz+1, bp)
}
if *(*int32)(unsafe.Pointer(bp)) > 0 && *(*int32)(unsafe.Pointer(bp)) < int32(10) && *(*int32)(unsafe.Pointer(bp)) != int32(7) {
p1 = pSorter + 92
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) & libc.Int32FromInt32(SORTER_TYPE_INTEGER))
} else {
if *(*int32)(unsafe.Pointer(bp)) > int32(10) && *(*int32)(unsafe.Pointer(bp))&int32(0x01) != 0 {
p2 = pSorter + 92
*(*Tu8)(unsafe.Pointer(p2)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p2))) & libc.Int32FromInt32(SORTER_TYPE_TEXT))
} else {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FtypeMask = uint8(0)
}
}
/* Figure out whether or not the current contents of memory should be
** flushed to a PMA before continuing. If so, do so.
**
** If using the single large allocation mode (pSorter->aMemory!=0), then
** flush the contents of memory to a new PMA if (a) at least one value is
** already in memory and (b) the new value will not fit in memory.
**
** Or, if using separate allocations for each record, flush the contents
** of memory to a PMA if either of the following are true:
**
** * The total memory allocated for the in-memory list is greater
** than (page-size * cache-size), or
**
** * The total memory allocated for the in-memory list is greater
** than (page-size * 10) and sqlite3HeapNearlyFull() returns true.
*/
nReq = int64(uint64((*TMem)(unsafe.Pointer(pVal)).Fn) + uint64(16))
nPMA = int64((*TMem)(unsafe.Pointer(pVal)).Fn + _sqlite3VarintLen(tls, uint64((*TMem)(unsafe.Pointer(pVal)).Fn)))
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize != 0 {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory != 0 {
bFlush = libc.BoolInt32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory != 0 && int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory)+nReq > int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize))
} else {
bFlush = libc.BoolInt32((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA > int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize) || (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA > int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmnPmaSize) && _sqlite3HeapNearlyFull(tls) != 0)
}
if bFlush != 0 {
rc = _vdbeSorterFlushPMA(tls, pSorter)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA = 0
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory = 0
}
}
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FszPMA += nPMA
if nPMA > int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxKeysize) {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxKeysize = int32(nPMA)
}
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory != 0 {
nMin = int32(int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory) + nReq)
if nMin > (*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory {
nNew = int64(2) * int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory)
iListOff = -int32(1)
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList != 0 {
iListOff = int32(int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList) - int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory))
}
for nNew < int64(nMin) {
nNew = nNew * int64(2)
}
if nNew > int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize) {
nNew = int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).FmxPmaSize)
}
if nNew < int64(nMin) {
nNew = int64(nMin)
}
aNew = _sqlite3Realloc(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory, uint64(nNew))
if !(aNew != 0) {
return int32(SQLITE_NOMEM)
}
if iListOff >= 0 {
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList = aNew + uintptr(iListOff)
}
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory = aNew
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FnMemory = int32(nNew)
}
pNew = (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory + uintptr((*TVdbeSorter)(unsafe.Pointer(pSorter)).FiMemory)
p3 = pSorter + 80
*(*int32)(unsafe.Pointer(p3)) = int32(int64(*(*int32)(unsafe.Pointer(p3))) + (nReq+libc.Int64FromInt32(7))&int64(^libc.Int32FromInt32(7)))
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList != 0 {
*(*int32)(unsafe.Pointer(&(*TSorterRecord)(unsafe.Pointer(pNew)).Fu)) = int32(int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList) - int64((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory))
}
} else {
pNew = _sqlite3Malloc(tls, uint64(nReq))
if pNew == uintptr(0) {
return int32(SQLITE_NOMEM)
}
*(*uintptr)(unsafe.Pointer(pNew + 8)) = (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList
}
libc.Xmemcpy(tls, pNew+libc.UintptrFromInt32(1)*16, (*TMem)(unsafe.Pointer(pVal)).Fz, uint64((*TMem)(unsafe.Pointer(pVal)).Fn))
(*TSorterRecord)(unsafe.Pointer(pNew)).FnVal = (*TMem)(unsafe.Pointer(pVal)).Fn
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList = pNew
return rc
}
// C documentation
//
// /*
// ** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format
// ** of the data stored in aFile[1] is the same as that used by regular PMAs,
// ** except that the number-of-bytes varint is omitted from the start.
// */
func _vdbeIncrPopulate(tls *libc.TLS, pIncr uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var iEof, iStart Ti64
var nKey, rc, rc2 int32
var pMerger, pOut, pReader, pTask uintptr
var _ /* dummy at bp+48 */ int32
var _ /* writer at bp+0 */ TPmaWriter
_, _, _, _, _, _, _, _, _ = iEof, iStart, nKey, pMerger, pOut, pReader, pTask, rc, rc2
rc = SQLITE_OK
iStart = (*TIncrMerger)(unsafe.Pointer(pIncr)).FiStartOff
pOut = pIncr + 40 + 1*16
pTask = (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask
pMerger = (*TIncrMerger)(unsafe.Pointer(pIncr)).FpMerger
_vdbePmaWriterInit(tls, (*TSorterFile)(unsafe.Pointer(pOut)).FpFd, bp, (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).Fpgsz, iStart)
for rc == SQLITE_OK {
pReader = (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr + uintptr(*(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(pMerger)).FaTree + 1*4)))*80
nKey = (*TPmaReader)(unsafe.Pointer(pReader)).FnKey
iEof = (*(*TPmaWriter)(unsafe.Pointer(bp))).FiWriteOff + int64((*(*TPmaWriter)(unsafe.Pointer(bp))).FiBufEnd)
/* Check if the output file is full or if the input has been exhausted.
** In either case exit the loop. */
if (*TPmaReader)(unsafe.Pointer(pReader)).FpFd == uintptr(0) {
break
}
if iEof+int64(nKey)+int64(_sqlite3VarintLen(tls, uint64(nKey))) > iStart+int64((*TIncrMerger)(unsafe.Pointer(pIncr)).FmxSz) {
break
}
/* Write the next key to the output. */
_vdbePmaWriteVarint(tls, bp, uint64(nKey))
_vdbePmaWriteBlob(tls, bp, (*TPmaReader)(unsafe.Pointer(pReader)).FaKey, nKey)
rc = _vdbeMergeEngineStep(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpMerger, bp+48)
}
rc2 = _vdbePmaWriterFinish(tls, bp, pOut+8)
if rc == SQLITE_OK {
rc = rc2
}
return rc
}
// C documentation
//
// /*
// ** The main routine for background threads that populate aFile[1] of
// ** multi-threaded IncrMerger objects.
// */
func _vdbeIncrPopulateThread(tls *libc.TLS, pCtx uintptr) (r uintptr) {
var pIncr, pRet uintptr
_, _ = pIncr, pRet
pIncr = pCtx
pRet = uintptr(int64(_vdbeIncrPopulate(tls, pIncr)))
(*TSortSubtask)(unsafe.Pointer((*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask)).FbDone = int32(1)
return pRet
}
// C documentation
//
// /*
// ** Launch a background thread to populate aFile[1] of pIncr.
// */
func _vdbeIncrBgPopulate(tls *libc.TLS, pIncr uintptr) (r int32) {
var p uintptr
_ = p
p = pIncr
return _vdbeSorterCreateThread(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask, __ccgo_fp(_vdbeIncrPopulateThread), p)
}
// C documentation
//
// /*
// ** This function is called when the PmaReader corresponding to pIncr has
// ** finished reading the contents of aFile[0]. Its purpose is to "refill"
// ** aFile[0] such that the PmaReader should start rereading it from the
// ** beginning.
// **
// ** For single-threaded objects, this is accomplished by literally reading
// ** keys from pIncr->pMerger and repopulating aFile[0].
// **
// ** For multi-threaded objects, all that is required is to wait until the
// ** background thread is finished (if it is not already) and then swap
// ** aFile[0] and aFile[1] in place. If the contents of pMerger have not
// ** been exhausted, this function also launches a new background thread
// ** to populate the new aFile[1].
// **
// ** SQLITE_OK is returned on success, or an SQLite error code otherwise.
// */
func _vdbeIncrSwap(tls *libc.TLS, pIncr uintptr) (r int32) {
var f0 TSorterFile
var rc int32
_, _ = f0, rc
rc = SQLITE_OK
if (*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread != 0 {
rc = _vdbeSorterJoinThread(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask)
if rc == SQLITE_OK {
f0 = *(*TSorterFile)(unsafe.Pointer(pIncr + 40))
*(*TSorterFile)(unsafe.Pointer(pIncr + 40)) = *(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16))
*(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16)) = f0
}
if rc == SQLITE_OK {
if (*(*TSorterFile)(unsafe.Pointer(pIncr + 40))).FiEof == (*TIncrMerger)(unsafe.Pointer(pIncr)).FiStartOff {
(*TIncrMerger)(unsafe.Pointer(pIncr)).FbEof = int32(1)
} else {
rc = _vdbeIncrBgPopulate(tls, pIncr)
}
}
} else {
rc = _vdbeIncrPopulate(tls, pIncr)
*(*TSorterFile)(unsafe.Pointer(pIncr + 40)) = *(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16))
if (*(*TSorterFile)(unsafe.Pointer(pIncr + 40))).FiEof == (*TIncrMerger)(unsafe.Pointer(pIncr)).FiStartOff {
(*TIncrMerger)(unsafe.Pointer(pIncr)).FbEof = int32(1)
}
}
return rc
}
// C documentation
//
// /*
// ** Allocate and return a new IncrMerger object to read data from pMerger.
// **
// ** If an OOM condition is encountered, return NULL. In this case free the
// ** pMerger argument before returning.
// */
func _vdbeIncrMergerNew(tls *libc.TLS, pTask uintptr, pMerger uintptr, ppOut uintptr) (r int32) {
var pIncr, v1, v2 uintptr
var rc, v3 int32
_, _, _, _, _ = pIncr, rc, v1, v2, v3
rc = SQLITE_OK
if _sqlite3FaultSim(tls, int32(100)) != 0 {
v2 = uintptr(0)
} else {
v2 = _sqlite3MallocZero(tls, uint64(72))
}
v1 = v2
*(*uintptr)(unsafe.Pointer(ppOut)) = v1
pIncr = v1
if pIncr != 0 {
(*TIncrMerger)(unsafe.Pointer(pIncr)).FpMerger = pMerger
(*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask = pTask
if (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FmxKeysize+int32(9) > (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FmxPmaSize/int32(2) {
v3 = (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FmxKeysize + int32(9)
} else {
v3 = (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).FmxPmaSize / int32(2)
}
(*TIncrMerger)(unsafe.Pointer(pIncr)).FmxSz = v3
(*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FiEof += int64((*TIncrMerger)(unsafe.Pointer(pIncr)).FmxSz)
} else {
_vdbeMergeEngineFree(tls, pMerger)
rc = int32(SQLITE_NOMEM)
}
return rc
}
// C documentation
//
// /*
// ** Set the "use-threads" flag on object pIncr.
// */
func _vdbeIncrMergerSetThreads(tls *libc.TLS, pIncr uintptr) {
(*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread = int32(1)
(*TSortSubtask)(unsafe.Pointer((*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask)).Ffile2.FiEof -= int64((*TIncrMerger)(unsafe.Pointer(pIncr)).FmxSz)
}
// C documentation
//
// /*
// ** Recompute pMerger->aTree[iOut] by comparing the next keys on the
// ** two PmaReaders that feed that entry. Neither of the PmaReaders
// ** are advanced. This routine merely does the comparison.
// */
func _vdbeMergeEngineCompare(tls *libc.TLS, pMerger uintptr, iOut int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i1, i2, iRes, res int32
var p1, p2, pTask uintptr
var _ /* bCached at bp+0 */ int32
_, _, _, _, _, _, _ = i1, i2, iRes, p1, p2, pTask, res
if iOut >= (*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree/int32(2) {
i1 = (iOut - (*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree/int32(2)) * int32(2)
i2 = i1 + int32(1)
} else {
i1 = *(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(pMerger)).FaTree + uintptr(iOut*int32(2))*4))
i2 = *(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(pMerger)).FaTree + uintptr(iOut*int32(2)+int32(1))*4))
}
p1 = (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr + uintptr(i1)*80
p2 = (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr + uintptr(i2)*80
if (*TPmaReader)(unsafe.Pointer(p1)).FpFd == uintptr(0) {
iRes = i2
} else {
if (*TPmaReader)(unsafe.Pointer(p2)).FpFd == uintptr(0) {
iRes = i1
} else {
pTask = (*TMergeEngine)(unsafe.Pointer(pMerger)).FpTask
*(*int32)(unsafe.Pointer(bp)) = 0
/* from vdbeSortSubtaskMain() */
res = (*(*func(*libc.TLS, uintptr, uintptr, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{(*TSortSubtask)(unsafe.Pointer(pTask)).FxCompare})))(tls, pTask, bp, (*TPmaReader)(unsafe.Pointer(p1)).FaKey, (*TPmaReader)(unsafe.Pointer(p1)).FnKey, (*TPmaReader)(unsafe.Pointer(p2)).FaKey, (*TPmaReader)(unsafe.Pointer(p2)).FnKey)
if res <= 0 {
iRes = i1
} else {
iRes = i2
}
}
}
*(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(pMerger)).FaTree + uintptr(iOut)*4)) = iRes
}
// C documentation
//
// /*
// ** Initialize the MergeEngine object passed as the second argument. Once this
// ** function returns, the first key of merged data may be read from the
// ** MergeEngine object in the usual fashion.
// **
// ** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge
// ** objects attached to the PmaReader objects that the merger reads from have
// ** already been populated, but that they have not yet populated aFile[0] and
// ** set the PmaReader objects up to read from it. In this case all that is
// ** required is to call vdbePmaReaderNext() on each PmaReader to point it at
// ** its first key.
// **
// ** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use
// ** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data
// ** to pMerger.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _vdbeMergeEngineInit(tls *libc.TLS, pTask uintptr, pMerger uintptr, eMode int32) (r int32) {
var i, nTree, rc int32
_, _, _ = i, nTree, rc
rc = SQLITE_OK /* Number of subtrees to merge */
/* Failure to allocate the merge would have been detected prior to
** invoking this routine */
/* eMode is always INCRINIT_NORMAL in single-threaded mode */
/* Verify that the MergeEngine is assigned to a single thread */
(*TMergeEngine)(unsafe.Pointer(pMerger)).FpTask = pTask
nTree = (*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree
i = 0
for {
if !(i < nTree) {
break
}
if libc.Bool(int32(SQLITE_MAX_WORKER_THREADS) > 0) && eMode == int32(INCRINIT_ROOT) {
/* PmaReaders should be normally initialized in order, as if they are
** reading from the same temp file this makes for more linear file IO.
** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is
** in use it will block the vdbePmaReaderNext() call while it uses
** the main thread to fill its buffer. So calling PmaReaderNext()
** on this PmaReader before any of the multi-threaded PmaReaders takes
** better advantage of multi-processor hardware. */
rc = _vdbePmaReaderNext(tls, (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr+uintptr(nTree-i-int32(1))*80)
} else {
rc = _vdbePmaReaderIncrInit(tls, (*TMergeEngine)(unsafe.Pointer(pMerger)).FaReadr+uintptr(i)*80, INCRINIT_NORMAL)
}
if rc != SQLITE_OK {
return rc
}
goto _1
_1:
;
i++
}
i = (*TMergeEngine)(unsafe.Pointer(pMerger)).FnTree - int32(1)
for {
if !(i > 0) {
break
}
_vdbeMergeEngineCompare(tls, pMerger, i)
goto _2
_2:
;
i--
}
return int32((*TUnpackedRecord)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpUnpacked)).FerrCode)
}
// C documentation
//
// /*
// ** The PmaReader passed as the first argument is guaranteed to be an
// ** incremental-reader (pReadr->pIncr!=0). This function serves to open
// ** and/or initialize the temp file related fields of the IncrMerge
// ** object at (pReadr->pIncr).
// **
// ** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders
// ** in the sub-tree headed by pReadr are also initialized. Data is then
// ** loaded into the buffers belonging to pReadr and it is set to point to
// ** the first key in its range.
// **
// ** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed
// ** to be a multi-threaded PmaReader and this function is being called in a
// ** background thread. In this case all PmaReaders in the sub-tree are
// ** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to
// ** pReadr is populated. However, pReadr itself is not set up to point
// ** to its first key. A call to vdbePmaReaderNext() is still required to do
// ** that.
// **
// ** The reason this function does not call vdbePmaReaderNext() immediately
// ** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has
// ** to block on thread (pTask->thread) before accessing aFile[1]. But, since
// ** this entire function is being run by thread (pTask->thread), that will
// ** lead to the current background thread attempting to join itself.
// **
// ** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed
// ** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all
// ** child-trees have already been initialized using IncrInit(INCRINIT_TASK).
// ** In this case vdbePmaReaderNext() is called on all child PmaReaders and
// ** the current PmaReader set to point to the first key in its range.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _vdbePmaReaderIncrMergeInit(tls *libc.TLS, pReadr uintptr, eMode int32) (r int32) {
var db, pIncr, pTask uintptr
var mxSz, rc int32
_, _, _, _, _ = db, mxSz, pIncr, pTask, rc
rc = SQLITE_OK
pIncr = (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr
pTask = (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask
db = (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask)).FpSorter)).Fdb
/* eMode is always INCRINIT_NORMAL in single-threaded mode */
rc = _vdbeMergeEngineInit(tls, pTask, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpMerger, eMode)
/* Set up the required files for pIncr. A multi-threaded IncrMerge object
** requires two temp files to itself, whereas a single-threaded object
** only requires a region of pTask->file2. */
if rc == SQLITE_OK {
mxSz = (*TIncrMerger)(unsafe.Pointer(pIncr)).FmxSz
if (*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread != 0 {
rc = _vdbeSorterOpenTempFile(tls, db, int64(mxSz), pIncr+40)
if rc == SQLITE_OK {
rc = _vdbeSorterOpenTempFile(tls, db, int64(mxSz), pIncr+40+1*16)
}
} else {
/*if( !pIncr->bUseThread )*/
if (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FpFd == uintptr(0) {
rc = _vdbeSorterOpenTempFile(tls, db, (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FiEof, pTask+80)
(*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FiEof = 0
}
if rc == SQLITE_OK {
(*(*TSorterFile)(unsafe.Pointer(pIncr + 40 + 1*16))).FpFd = (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FpFd
(*TIncrMerger)(unsafe.Pointer(pIncr)).FiStartOff = (*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FiEof
(*TSortSubtask)(unsafe.Pointer(pTask)).Ffile2.FiEof += int64(mxSz)
}
}
}
if rc == SQLITE_OK && (*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread != 0 {
/* Use the current thread to populate aFile[1], even though this
** PmaReader is multi-threaded. If this is an INCRINIT_TASK object,
** then this function is already running in background thread
** pIncr->pTask->thread.
**
** If this is the INCRINIT_ROOT object, then it is running in the
** main VDBE thread. But that is Ok, as that thread cannot return
** control to the VDBE or proceed with anything useful until the
** first results are ready from this merger object anyway.
*/
rc = _vdbeIncrPopulate(tls, pIncr)
}
if rc == SQLITE_OK && (libc.Bool(false) || eMode != int32(INCRINIT_TASK)) {
rc = _vdbePmaReaderNext(tls, pReadr)
}
return rc
}
// C documentation
//
// /*
// ** The main routine for vdbePmaReaderIncrMergeInit() operations run in
// ** background threads.
// */
func _vdbePmaReaderBgIncrInit(tls *libc.TLS, pCtx uintptr) (r uintptr) {
var pReader, pRet uintptr
_, _ = pReader, pRet
pReader = pCtx
pRet = uintptr(int64(_vdbePmaReaderIncrMergeInit(tls, pReader, int32(INCRINIT_TASK))))
(*TSortSubtask)(unsafe.Pointer((*TIncrMerger)(unsafe.Pointer((*TPmaReader)(unsafe.Pointer(pReader)).FpIncr)).FpTask)).FbDone = int32(1)
return pRet
}
// C documentation
//
// /*
// ** If the PmaReader passed as the first argument is not an incremental-reader
// ** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it invokes
// ** the vdbePmaReaderIncrMergeInit() function with the parameters passed to
// ** this routine to initialize the incremental merge.
// **
// ** If the IncrMerger object is multi-threaded (IncrMerger.bUseThread==1),
// ** then a background thread is launched to call vdbePmaReaderIncrMergeInit().
// ** Or, if the IncrMerger is single threaded, the same function is called
// ** using the current thread.
// */
func _vdbePmaReaderIncrInit(tls *libc.TLS, pReadr uintptr, eMode int32) (r int32) {
var pCtx, pIncr uintptr
var rc int32
_, _, _ = pCtx, pIncr, rc
pIncr = (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr /* Incremental merger */
rc = SQLITE_OK /* Return code */
if pIncr != 0 {
if (*TIncrMerger)(unsafe.Pointer(pIncr)).FbUseThread != 0 {
pCtx = pReadr
rc = _vdbeSorterCreateThread(tls, (*TIncrMerger)(unsafe.Pointer(pIncr)).FpTask, __ccgo_fp(_vdbePmaReaderBgIncrInit), pCtx)
} else {
rc = _vdbePmaReaderIncrMergeInit(tls, pReadr, eMode)
}
}
return rc
}
// C documentation
//
// /*
// ** Allocate a new MergeEngine object to merge the contents of nPMA level-0
// ** PMAs from pTask->file. If no error occurs, set *ppOut to point to
// ** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut
// ** to NULL and return an SQLite error code.
// **
// ** When this function is called, *piOffset is set to the offset of the
// ** first PMA to read from pTask->file. Assuming no error occurs, it is
// ** set to the offset immediately following the last byte of the last
// ** PMA before returning. If an error does occur, then the final value of
// ** *piOffset is undefined.
// */
func _vdbeMergeEngineLevel0(tls *libc.TLS, pTask uintptr, nPMA int32, piOffset uintptr, ppOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, rc int32
var iOff Ti64
var pNew, pReadr, v1 uintptr
var _ /* nDummy at bp+0 */ Ti64
_, _, _, _, _, _ = i, iOff, pNew, pReadr, rc, v1 /* Merge engine to return */
iOff = *(*Ti64)(unsafe.Pointer(piOffset))
rc = SQLITE_OK
v1 = _vdbeMergeEngineNew(tls, nPMA)
pNew = v1
*(*uintptr)(unsafe.Pointer(ppOut)) = v1
if pNew == uintptr(0) {
rc = int32(SQLITE_NOMEM)
}
i = 0
for {
if !(i < nPMA && rc == SQLITE_OK) {
break
}
*(*Ti64)(unsafe.Pointer(bp)) = 0
pReadr = (*TMergeEngine)(unsafe.Pointer(pNew)).FaReadr + uintptr(i)*80
rc = _vdbePmaReaderInit(tls, pTask, pTask+64, iOff, pReadr, bp)
iOff = (*TPmaReader)(unsafe.Pointer(pReadr)).FiEof
goto _2
_2:
;
i++
}
if rc != SQLITE_OK {
_vdbeMergeEngineFree(tls, pNew)
*(*uintptr)(unsafe.Pointer(ppOut)) = uintptr(0)
}
*(*Ti64)(unsafe.Pointer(piOffset)) = iOff
return rc
}
// C documentation
//
// /*
// ** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of
// ** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes.
// **
// ** i.e.
// **
// ** nPMA<=16 -> TreeDepth() == 0
// ** nPMA<=256 -> TreeDepth() == 1
// ** nPMA<=65536 -> TreeDepth() == 2
// */
func _vdbeSorterTreeDepth(tls *libc.TLS, nPMA int32) (r int32) {
var nDepth int32
var nDiv Ti64
_, _ = nDepth, nDiv
nDepth = 0
nDiv = int64(SORTER_MAX_MERGE_COUNT)
for nDiv < int64(nPMA) {
nDiv = nDiv * int64(SORTER_MAX_MERGE_COUNT)
nDepth++
}
return nDepth
}
// C documentation
//
// /*
// ** pRoot is the root of an incremental merge-tree with depth nDepth (according
// ** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the
// ** tree, counting from zero. This function adds pLeaf to the tree.
// **
// ** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error
// ** code is returned and pLeaf is freed.
// */
func _vdbeSorterAddToTree(tls *libc.TLS, pTask uintptr, nDepth int32, iSeq int32, pRoot uintptr, pLeaf uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, iIter, nDiv, rc int32
var p, pNew, pReadr uintptr
var _ /* pIncr at bp+0 */ uintptr
_, _, _, _, _, _, _ = i, iIter, nDiv, p, pNew, pReadr, rc
rc = SQLITE_OK
nDiv = int32(1)
p = pRoot
rc = _vdbeIncrMergerNew(tls, pTask, pLeaf, bp)
i = int32(1)
for {
if !(i < nDepth) {
break
}
nDiv = nDiv * int32(SORTER_MAX_MERGE_COUNT)
goto _1
_1:
;
i++
}
i = int32(1)
for {
if !(i < nDepth && rc == SQLITE_OK) {
break
}
iIter = iSeq / nDiv % int32(SORTER_MAX_MERGE_COUNT)
pReadr = (*TMergeEngine)(unsafe.Pointer(p)).FaReadr + uintptr(iIter)*80
if (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr == uintptr(0) {
pNew = _vdbeMergeEngineNew(tls, int32(SORTER_MAX_MERGE_COUNT))
if pNew == uintptr(0) {
rc = int32(SQLITE_NOMEM)
} else {
rc = _vdbeIncrMergerNew(tls, pTask, pNew, pReadr+72)
}
}
if rc == SQLITE_OK {
p = (*TIncrMerger)(unsafe.Pointer((*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr)).FpMerger
nDiv = nDiv / int32(SORTER_MAX_MERGE_COUNT)
}
goto _2
_2:
;
i++
}
if rc == SQLITE_OK {
(*(*TPmaReader)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(p)).FaReadr + uintptr(iSeq%int32(SORTER_MAX_MERGE_COUNT))*80))).FpIncr = *(*uintptr)(unsafe.Pointer(bp))
} else {
_vdbeIncrFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
return rc
}
// C documentation
//
// /*
// ** This function is called as part of a SorterRewind() operation on a sorter
// ** that has already written two or more level-0 PMAs to one or more temp
// ** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that
// ** can be used to incrementally merge all PMAs on disk.
// **
// ** If successful, SQLITE_OK is returned and *ppOut set to point to the
// ** MergeEngine object at the root of the tree before returning. Or, if an
// ** error occurs, an SQLite error code is returned and the final value
// ** of *ppOut is undefined.
// */
func _vdbeSorterMergeTreeBuild(tls *libc.TLS, pSorter uintptr, ppOut uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var i, iSeq, iTask, nDepth, nReader, rc, v3, v4 int32
var pMain, pTask uintptr
var _ /* iReadOff at bp+8 */ Ti64
var _ /* pMerger at bp+16 */ uintptr
var _ /* pRoot at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _ = i, iSeq, iTask, nDepth, nReader, pMain, pTask, rc, v3, v4
pMain = uintptr(0)
rc = SQLITE_OK
/* If the sorter uses more than one task, then create the top-level
** MergeEngine here. This MergeEngine will read data from exactly
** one PmaReader per sub-task. */
if int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask) > int32(1) {
pMain = _vdbeMergeEngineNew(tls, int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask))
if pMain == uintptr(0) {
rc = int32(SQLITE_NOMEM)
}
}
iTask = 0
for {
if !(rc == SQLITE_OK && iTask < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)) {
break
}
pTask = pSorter + 96 + uintptr(iTask)*96
if libc.Bool(false) || (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA != 0 {
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) /* Root node of tree for this task */
nDepth = _vdbeSorterTreeDepth(tls, (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA)
*(*Ti64)(unsafe.Pointer(bp + 8)) = 0
if (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA <= int32(SORTER_MAX_MERGE_COUNT) {
rc = _vdbeMergeEngineLevel0(tls, pTask, (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA, bp+8, bp)
} else {
iSeq = 0
*(*uintptr)(unsafe.Pointer(bp)) = _vdbeMergeEngineNew(tls, int32(SORTER_MAX_MERGE_COUNT))
if *(*uintptr)(unsafe.Pointer(bp)) == uintptr(0) {
rc = int32(SQLITE_NOMEM)
}
i = 0
for {
if !(i < (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA && rc == SQLITE_OK) {
break
}
*(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0) /* Number of level-0 PMAs to merge */
if (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA-i < int32(SORTER_MAX_MERGE_COUNT) {
v3 = (*TSortSubtask)(unsafe.Pointer(pTask)).FnPMA - i
} else {
v3 = int32(SORTER_MAX_MERGE_COUNT)
}
nReader = v3
rc = _vdbeMergeEngineLevel0(tls, pTask, nReader, bp+8, bp+16)
if rc == SQLITE_OK {
v4 = iSeq
iSeq++
rc = _vdbeSorterAddToTree(tls, pTask, nDepth, v4, *(*uintptr)(unsafe.Pointer(bp)), *(*uintptr)(unsafe.Pointer(bp + 16)))
}
goto _2
_2:
;
i += int32(SORTER_MAX_MERGE_COUNT)
}
}
if rc == SQLITE_OK {
if pMain != uintptr(0) {
rc = _vdbeIncrMergerNew(tls, pTask, *(*uintptr)(unsafe.Pointer(bp)), (*TMergeEngine)(unsafe.Pointer(pMain)).FaReadr+uintptr(iTask)*80+72)
} else {
pMain = *(*uintptr)(unsafe.Pointer(bp))
}
} else {
_vdbeMergeEngineFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
}
goto _1
_1:
;
iTask++
}
if rc != SQLITE_OK {
_vdbeMergeEngineFree(tls, pMain)
pMain = uintptr(0)
}
*(*uintptr)(unsafe.Pointer(ppOut)) = pMain
return rc
}
// C documentation
//
// /*
// ** This function is called as part of an sqlite3VdbeSorterRewind() operation
// ** on a sorter that has written two or more PMAs to temporary files. It sets
// ** up either VdbeSorter.pMerger (for single threaded sorters) or pReader
// ** (for multi-threaded sorters) so that it can be used to iterate through
// ** all records stored in the sorter.
// **
// ** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
// */
func _vdbeSorterSetupMerge(tls *libc.TLS, pSorter uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var db, p, pIncr, pLast, pReadr, pTask0, v3 uintptr
var i, iTask, rc int32
var xCompare TSorterCompare
var _ /* pMain at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _ = db, i, iTask, p, pIncr, pLast, pReadr, pTask0, rc, xCompare, v3 /* Return code */
pTask0 = pSorter + 96
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
db = (*TVdbeSorter)(unsafe.Pointer((*TSortSubtask)(unsafe.Pointer(pTask0)).FpSorter)).Fdb
xCompare = _vdbeSorterGetCompare(tls, pSorter)
i = 0
for {
if !(i < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)) {
break
}
(*(*TSortSubtask)(unsafe.Pointer(pSorter + 96 + uintptr(i)*96))).FxCompare = xCompare
goto _1
_1:
;
i++
}
rc = _vdbeSorterMergeTreeBuild(tls, pSorter, bp)
if rc == SQLITE_OK {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUseThreads != 0 {
pReadr = uintptr(0)
pLast = pSorter + 96 + uintptr(int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)-int32(1))*96
rc = _vdbeSortAllocUnpacked(tls, pLast)
if rc == SQLITE_OK {
pReadr = _sqlite3DbMallocZero(tls, db, uint64(80))
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader = pReadr
if pReadr == uintptr(0) {
rc = int32(SQLITE_NOMEM)
}
}
if rc == SQLITE_OK {
rc = _vdbeIncrMergerNew(tls, pLast, *(*uintptr)(unsafe.Pointer(bp)), pReadr+72)
if rc == SQLITE_OK {
_vdbeIncrMergerSetThreads(tls, (*TPmaReader)(unsafe.Pointer(pReadr)).FpIncr)
iTask = 0
for {
if !(iTask < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)-int32(1)) {
break
}
v3 = (*(*TPmaReader)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaReadr + uintptr(iTask)*80))).FpIncr
pIncr = v3
if v3 != 0 {
_vdbeIncrMergerSetThreads(tls, pIncr)
}
goto _2
_2:
;
iTask++
}
iTask = 0
for {
if !(rc == SQLITE_OK && iTask < int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FnTask)) {
break
}
/* Check that:
**
** a) The incremental merge object is configured to use the
** right task, and
** b) If it is using task (nTask-1), it is configured to run
** in single-threaded mode. This is important, as the
** root merge (INCRINIT_ROOT) will be using the same task
** object.
*/
p = (*TMergeEngine)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FaReadr + uintptr(iTask)*80
rc = _vdbePmaReaderIncrInit(tls, p, int32(INCRINIT_TASK))
goto _4
_4:
;
iTask++
}
}
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
}
if rc == SQLITE_OK {
rc = _vdbePmaReaderIncrMergeInit(tls, pReadr, int32(INCRINIT_ROOT))
}
} else {
rc = _vdbeMergeEngineInit(tls, pTask0, *(*uintptr)(unsafe.Pointer(bp)), INCRINIT_NORMAL)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger = *(*uintptr)(unsafe.Pointer(bp))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
}
}
if rc != SQLITE_OK {
_vdbeMergeEngineFree(tls, *(*uintptr)(unsafe.Pointer(bp)))
}
return rc
}
// C documentation
//
// /*
// ** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite,
// ** this function is called to prepare for iterating through the records
// ** in sorted order.
// */
func _sqlite3VdbeSorterRewind(tls *libc.TLS, pCsr uintptr, pbEof uintptr) (r int32) {
var pSorter uintptr
var rc int32
_, _ = pSorter, rc
rc = SQLITE_OK /* Return code */
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
/* If no data has been written to disk, then do not do so now. Instead,
** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly
** from the in-memory list. */
if int32((*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUsePMA) == 0 {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList != 0 {
*(*int32)(unsafe.Pointer(pbEof)) = 0
rc = _vdbeSorterSort(tls, pSorter+96, pSorter+56)
} else {
*(*int32)(unsafe.Pointer(pbEof)) = int32(1)
}
return rc
}
/* Write the current in-memory list to a PMA. When the VdbeSorterWrite()
** function flushes the contents of memory to disk, it immediately always
** creates a new list consisting of a single key immediately afterwards.
** So the list is never empty at this point. */
rc = _vdbeSorterFlushPMA(tls, pSorter)
/* Join all threads */
rc = _vdbeSorterJoinAll(tls, pSorter, rc)
/* Assuming no errors have occurred, set up a merger structure to
** incrementally read and merge all remaining PMAs. */
if rc == SQLITE_OK {
rc = _vdbeSorterSetupMerge(tls, pSorter)
*(*int32)(unsafe.Pointer(pbEof)) = 0
}
return rc
}
// C documentation
//
// /*
// ** Advance to the next element in the sorter. Return value:
// **
// ** SQLITE_OK success
// ** SQLITE_DONE end of data
// ** otherwise some kind of error.
// */
func _sqlite3VdbeSorterNext(tls *libc.TLS, db uintptr, pCsr uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pFree, pSorter uintptr
var rc, v1 int32
var _ /* res at bp+0 */ int32
_, _, _, _ = pFree, pSorter, rc, v1 /* Return code */
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUsePMA != 0 {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUseThreads != 0 {
rc = _vdbePmaReaderNext(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader)
if rc == SQLITE_OK && (*TPmaReader)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader)).FpFd == uintptr(0) {
rc = int32(SQLITE_DONE)
}
} else {
/*if( !pSorter->bUseThreads )*/
*(*int32)(unsafe.Pointer(bp)) = 0
rc = _vdbeMergeEngineStep(tls, (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger, bp)
if rc == SQLITE_OK && *(*int32)(unsafe.Pointer(bp)) != 0 {
rc = int32(SQLITE_DONE)
}
}
} else {
pFree = (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList
(*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList = *(*uintptr)(unsafe.Pointer(pFree + 8))
*(*uintptr)(unsafe.Pointer(pFree + 8)) = uintptr(0)
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FaMemory == uintptr(0) {
_vdbeSorterRecordFree(tls, db, pFree)
}
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList != 0 {
v1 = SQLITE_OK
} else {
v1 = int32(SQLITE_DONE)
}
rc = v1
}
return rc
}
// C documentation
//
// /*
// ** Return a pointer to a buffer owned by the sorter that contains the
// ** current key.
// */
func _vdbeSorterRowkey(tls *libc.TLS, pSorter uintptr, pnKey uintptr) (r uintptr) {
var pKey, pReader uintptr
_, _ = pKey, pReader
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUsePMA != 0 {
if (*TVdbeSorter)(unsafe.Pointer(pSorter)).FbUseThreads != 0 {
pReader = (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpReader
} else {
/*if( !pSorter->bUseThreads )*/
pReader = (*TMergeEngine)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger)).FaReadr + uintptr(*(*int32)(unsafe.Pointer((*TMergeEngine)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer(pSorter)).FpMerger)).FaTree + 1*4)))*80
}
*(*int32)(unsafe.Pointer(pnKey)) = (*TPmaReader)(unsafe.Pointer(pReader)).FnKey
pKey = (*TPmaReader)(unsafe.Pointer(pReader)).FaKey
} else {
*(*int32)(unsafe.Pointer(pnKey)) = (*TSorterRecord)(unsafe.Pointer((*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList)).FnVal
pKey = (*TVdbeSorter)(unsafe.Pointer(pSorter)).Flist.FpList + libc.UintptrFromInt32(1)*16
}
return pKey
}
// C documentation
//
// /*
// ** Copy the current sorter key into the memory cell pOut.
// */
func _sqlite3VdbeSorterRowkey(tls *libc.TLS, pCsr uintptr, pOut uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var pKey, pSorter uintptr
var _ /* nKey at bp+0 */ int32
_, _ = pKey, pSorter /* Sorter key to copy into pOut */
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
pKey = _vdbeSorterRowkey(tls, pSorter, bp)
if _sqlite3VdbeMemClearAndResize(tls, pOut, *(*int32)(unsafe.Pointer(bp))) != 0 {
return int32(SQLITE_NOMEM)
}
(*TMem)(unsafe.Pointer(pOut)).Fn = *(*int32)(unsafe.Pointer(bp))
(*TMem)(unsafe.Pointer(pOut)).Fflags = uint16(int32((*TMem)(unsafe.Pointer(pOut)).Fflags) & ^(libc.Int32FromInt32(MEM_TypeMask)|libc.Int32FromInt32(MEM_Zero)) | int32(MEM_Blob))
libc.Xmemcpy(tls, (*TMem)(unsafe.Pointer(pOut)).Fz, pKey, uint64(*(*int32)(unsafe.Pointer(bp))))
return SQLITE_OK
}
// C documentation
//
// /*
// ** Compare the key in memory cell pVal with the key that the sorter cursor
// ** passed as the first argument currently points to. For the purposes of
// ** the comparison, ignore the rowid field at the end of each record.
// **
// ** If the sorter cursor key contains any NULL values, consider it to be
// ** less than pVal. Even if pVal also contains NULL values.
// **
// ** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM).
// ** Otherwise, set *pRes to a negative, zero or positive value if the
// ** key in pVal is smaller than, equal to or larger than the current sorter
// ** key.
// **
// ** This routine forms the core of the OP_SorterCompare opcode, which in
// ** turn is used to verify uniqueness when constructing a UNIQUE INDEX.
// */
func _sqlite3VdbeSorterCompare(tls *libc.TLS, pCsr uintptr, pVal uintptr, nKeyCol int32, pRes uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i int32
var pKey, pKeyInfo, pSorter, r2, v1 uintptr
var _ /* nKey at bp+0 */ int32
_, _, _, _, _, _ = i, pKey, pKeyInfo, pSorter, r2, v1 /* Sorter key to compare pVal with */
pSorter = *(*uintptr)(unsafe.Pointer(pCsr + 48))
r2 = (*TVdbeSorter)(unsafe.Pointer(pSorter)).FpUnpacked
pKeyInfo = (*TVdbeCursor)(unsafe.Pointer(pCsr)).FpKeyInfo
if r2 == uintptr(0) {
v1 = _sqlite3VdbeAllocUnpackedRecord(tls, pKeyInfo)
(*TVdbeSorter)(unsafe.Pointer(pSorter)).FpUnpacked = v1
r2 = v1
if r2 == uintptr(0) {
return int32(SQLITE_NOMEM)
}
(*TUnpackedRecord)(unsafe.Pointer(r2)).FnField = uint16(nKeyCol)
}
pKey = _vdbeSorterRowkey(tls, pSorter, bp)
_sqlite3VdbeRecordUnpack(tls, pKeyInfo, *(*int32)(unsafe.Pointer(bp)), pKey, r2)
i = 0
for {
if !(i < nKeyCol) {
break
}
if int32((*(*TMem)(unsafe.Pointer((*TUnpackedRecord)(unsafe.Pointer(r2)).FaMem + uintptr(i)*56))).Fflags)&int32(MEM_Null) != 0 {
*(*int32)(unsafe.Pointer(pRes)) = -int32(1)
return SQLITE_OK
}
goto _2
_2:
;
i++
}
*(*int32)(unsafe.Pointer(pRes)) = _sqlite3VdbeRecordCompare(tls, (*TMem)(unsafe.Pointer(pVal)).Fn, (*TMem)(unsafe.Pointer(pVal)).Fz, r2)
return SQLITE_OK
}
/************** End of vdbesort.c ********************************************/
/************** Begin file vdbevtab.c ****************************************/
/*
** 2020-03-23
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file implements virtual-tables for examining the bytecode content
** of a prepared statement.
*/
/* #include "sqliteInt.h" */
/************** End of vdbevtab.c ********************************************/
/************** Begin file memjournal.c **************************************/
/*
** 2008 October 7
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains code use to implement an in-memory rollback journal.
** The in-memory rollback journal is used to journal transactions for
** ":memory:" databases and when the journal_mode=MEMORY pragma is used.
**
** Update: The in-memory journal is also used to temporarily cache
** smaller journals that are not critical for power-loss recovery.
** For example, statement journals that are not too big will be held
** entirely in memory, thus reducing the number of file I/O calls, and
** more importantly, reducing temporary file creation events. If these
** journals become too large for memory, they are spilled to disk. But
** in the common case, they are usually small and no file I/O needs to
** occur.
*/
/* #include "sqliteInt.h" */
// C documentation
//
// /* Forward references to internal structures */
type TMemJournal = struct {
FpMethod uintptr
FnChunkSize int32
FnSpill int32
FpFirst uintptr
Fendpoint TFilePoint
Freadpoint TFilePoint
Fflags int32
FpVfs uintptr
FzJournal uintptr
}
type MemJournal = TMemJournal
type TFilePoint = struct {
FiOffset Tsqlite3_int64
FpChunk uintptr
}
type FilePoint = TFilePoint
type TFileChunk = struct {
FpNext uintptr
FzChunk [8]Tu8
}
type FileChunk = TFileChunk
/*
** The rollback journal is composed of a linked list of these structures.
**
** The zChunk array is always at least 8 bytes in size - usually much more.
** Its actual size is stored in the MemJournal.nChunkSize variable.
*/
type TFileChunk1 = struct {
FpNext uintptr
FzChunk [8]Tu8
}
type FileChunk1 = TFileChunk1
/*
** By default, allocate this many bytes of memory for each FileChunk object.
*/
/*
** For chunk size nChunkSize, return the number of bytes that should
** be allocated for each FileChunk structure.
*/
/*
** An instance of this object serves as a cursor into the rollback journal.
** The cursor can be either for reading or writing.
*/
type TFilePoint1 = struct {
FiOffset Tsqlite3_int64
FpChunk uintptr
}
type FilePoint1 = TFilePoint1
/*
** This structure is a subclass of sqlite3_file. Each open memory-journal
** is an instance of this class.
*/
type TMemJournal1 = struct {
FpMethod uintptr
FnChunkSize int32
FnSpill int32
FpFirst uintptr
Fendpoint TFilePoint
Freadpoint TFilePoint
Fflags int32
FpVfs uintptr
FzJournal uintptr
}
type MemJournal1 = TMemJournal1
// C documentation
//
// /*
// ** Read data from the in-memory journal file. This is the implementation
// ** of the sqlite3_vfs.xRead method.
// */
func _memjrnlRead(tls *libc.TLS, pJfd uintptr, zBuf uintptr, iAmt int32, iOfst Tsqlite_int64) (r int32) {
var iChunkOffset, iSpace, nCopy, nRead, v5 int32
var iOff Tsqlite3_int64
var p, pChunk, zOut, v2 uintptr
var v3 bool
var v6 int64
_, _, _, _, _, _, _, _, _, _, _, _ = iChunkOffset, iOff, iSpace, nCopy, nRead, p, pChunk, zOut, v2, v3, v5, v6
p = pJfd
zOut = zBuf
nRead = iAmt
if int64(iAmt)+iOfst > (*TMemJournal)(unsafe.Pointer(p)).Fendpoint.FiOffset {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(2)<= 0; v3 {
v2 = (*TFileChunk)(unsafe.Pointer(pChunk)).FpNext
pChunk = v2
}
if !(v3 && v2 != uintptr(0) && nRead > 0) {
break
}
}
if pChunk != 0 {
v6 = iOfst + int64(iAmt)
} else {
v6 = 0
}
(*TMemJournal)(unsafe.Pointer(p)).Freadpoint.FiOffset = v6
(*TMemJournal)(unsafe.Pointer(p)).Freadpoint.FpChunk = pChunk
return SQLITE_OK
}
// C documentation
//
// /*
// ** Free the list of FileChunk structures headed at MemJournal.pFirst.
// */
func _memjrnlFreeChunks(tls *libc.TLS, pFirst uintptr) {
var pIter, pNext uintptr
_, _ = pIter, pNext
pIter = pFirst
for {
if !(pIter != 0) {
break
}
pNext = (*TFileChunk)(unsafe.Pointer(pIter)).FpNext
Xsqlite3_free(tls, pIter)
goto _1
_1:
;
pIter = pNext
}
}
// C documentation
//
// /*
// ** Flush the contents of memory to a real file on disk.
// */
func _memjrnlCreateFile(tls *libc.TLS, p uintptr) (r int32) {
var copy1 TMemJournal
var iOff Ti64
var nChunk, rc int32
var pIter, pReal uintptr
_, _, _, _, _, _ = copy1, iOff, nChunk, pIter, pReal, rc
pReal = p
copy1 = *(*TMemJournal)(unsafe.Pointer(p))
libc.Xmemset(tls, p, 0, uint64(80))
rc = _sqlite3OsOpen(tls, copy1.FpVfs, copy1.FzJournal, pReal, copy1.Fflags, uintptr(0))
if rc == SQLITE_OK {
nChunk = copy1.FnChunkSize
iOff = 0
pIter = copy1.FpFirst
for {
if !(pIter != 0) {
break
}
if iOff+int64(nChunk) > copy1.Fendpoint.FiOffset {
nChunk = int32(copy1.Fendpoint.FiOffset - iOff)
}
rc = _sqlite3OsWrite(tls, pReal, pIter+8, nChunk, iOff)
if rc != 0 {
break
}
iOff += int64(nChunk)
goto _1
_1:
;
pIter = (*TFileChunk)(unsafe.Pointer(pIter)).FpNext
}
if rc == SQLITE_OK {
/* No error has occurred. Free the in-memory buffers. */
_memjrnlFreeChunks(tls, copy1.FpFirst)
}
}
if rc != SQLITE_OK {
/* If an error occurred while creating or writing to the file, restore
** the original before returning. This way, SQLite uses the in-memory
** journal data to roll back changes made to the internal page-cache
** before this function was called. */
_sqlite3OsClose(tls, pReal)
*(*TMemJournal)(unsafe.Pointer(p)) = copy1
}
return rc
}
// C documentation
//
// /*
// ** Write data to the file.
// */
func _memjrnlWrite(tls *libc.TLS, pJfd uintptr, zBuf uintptr, iAmt int32, iOfst Tsqlite_int64) (r int32) {
var iChunkOffset, iSpace, nWrite, rc, v1 int32
var p, pChunk, pNew, zWrite, v2 uintptr
_, _, _, _, _, _, _, _, _, _ = iChunkOffset, iSpace, nWrite, p, pChunk, pNew, rc, zWrite, v1, v2
p = pJfd
nWrite = iAmt
zWrite = zBuf
/* If the file should be created now, create it and write the new data
** into the file on disk. */
if (*TMemJournal)(unsafe.Pointer(p)).FnSpill > 0 && int64(iAmt)+iOfst > int64((*TMemJournal)(unsafe.Pointer(p)).FnSpill) {
rc = _memjrnlCreateFile(tls, p)
if rc == SQLITE_OK {
rc = _sqlite3OsWrite(tls, pJfd, zBuf, iAmt, iOfst)
}
return rc
} else {
/* An in-memory journal file should only ever be appended to. Random
** access writes are not required. The only exception to this is when
** the in-memory journal is being used by a connection using the
** atomic-write optimization. In this case the first 28 bytes of the
** journal file may be written as part of committing the transaction. */
if iOfst > 0 && iOfst != (*TMemJournal)(unsafe.Pointer(p)).Fendpoint.FiOffset {
_memjrnlTruncate(tls, pJfd, iOfst)
}
if iOfst == 0 && (*TMemJournal)(unsafe.Pointer(p)).FpFirst != 0 {
libc.Xmemcpy(tls, (*TMemJournal)(unsafe.Pointer(p)).FpFirst+8, zBuf, uint64(iAmt))
} else {
for nWrite > 0 {
pChunk = (*TMemJournal)(unsafe.Pointer(p)).Fendpoint.FpChunk
iChunkOffset = int32((*TMemJournal)(unsafe.Pointer(p)).Fendpoint.FiOffset % int64((*TMemJournal)(unsafe.Pointer(p)).FnChunkSize))
if nWrite < (*TMemJournal)(unsafe.Pointer(p)).FnChunkSize-iChunkOffset {
v1 = nWrite
} else {
v1 = (*TMemJournal)(unsafe.Pointer(p)).FnChunkSize - iChunkOffset
}
iSpace = v1
if iChunkOffset == 0 {
/* New chunk is required to extend the file. */
pNew = Xsqlite3_malloc(tls, int32(libc.Uint64FromInt64(16)+uint64((*TMemJournal)(unsafe.Pointer(p)).FnChunkSize-libc.Int32FromInt32(8))))
if !(pNew != 0) {
return libc.Int32FromInt32(SQLITE_IOERR) | libc.Int32FromInt32(12)< 0 {
(*TMemJournal)(unsafe.Pointer(p)).FnChunkSize = nSpill
} else {
(*TMemJournal)(unsafe.Pointer(p)).FnChunkSize = int32(uint64(libc.Int32FromInt32(8)+libc.Int32FromInt32(MEMJOURNAL_DFLT_FILECHUNKSIZE)) - libc.Uint64FromInt64(16))
}
(*Tsqlite3_file)(unsafe.Pointer(pJfd)).FpMethods = uintptr(unsafe.Pointer(&_MemJournalMethods))
(*TMemJournal)(unsafe.Pointer(p)).FnSpill = nSpill
(*TMemJournal)(unsafe.Pointer(p)).Fflags = flags
(*TMemJournal)(unsafe.Pointer(p)).FzJournal = zName
(*TMemJournal)(unsafe.Pointer(p)).FpVfs = pVfs
return SQLITE_OK
}
// C documentation
//
// /*
// ** Open an in-memory journal file.
// */
func _sqlite3MemJournalOpen(tls *libc.TLS, pJfd uintptr) {
_sqlite3JournalOpen(tls, uintptr(0), uintptr(0), pJfd, 0, -int32(1))
}
// C documentation
//
// /*
// ** The file-handle passed as the only argument is open on a journal file.
// ** Return true if this "journal file" is currently stored in heap memory,
// ** or false otherwise.
// */
func _sqlite3JournalIsInMemory(tls *libc.TLS, p uintptr) (r int32) {
return libc.BoolInt32((*Tsqlite3_file)(unsafe.Pointer(p)).FpMethods == uintptr(unsafe.Pointer(&_MemJournalMethods)))
}
// C documentation
//
// /*
// ** Return the number of bytes required to store a JournalFile that uses vfs
// ** pVfs to create the underlying on-disk files.
// */
func _sqlite3JournalSize(tls *libc.TLS, pVfs uintptr) (r int32) {
var v1 int32
_ = v1
if (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile > libc.Int32FromInt64(80) {
v1 = (*Tsqlite3_vfs)(unsafe.Pointer(pVfs)).FszOsFile
} else {
v1 = libc.Int32FromInt64(80)
}
return v1
}
/************** End of memjournal.c ******************************************/
/************** Begin file walker.c ******************************************/
/*
** 2008 August 16
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains routines used for walking the parser tree for
** an SQL statement.
*/
/* #include "sqliteInt.h" */
/* #include */
/* #include */
// C documentation
//
// /*
// ** Walk all expressions linked into the list of Window objects passed
// ** as the second argument.
// */
func _walkWindowList(tls *libc.TLS, pWalker uintptr, pList uintptr, bOneOnly int32) (r int32) {
var pWin uintptr
var rc int32
_, _ = pWin, rc
pWin = pList
for {
if !(pWin != 0) {
break
}
rc = _sqlite3WalkExprList(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpOrderBy)
if rc != 0 {
return int32(WRC_Abort)
}
rc = _sqlite3WalkExprList(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpPartition)
if rc != 0 {
return int32(WRC_Abort)
}
rc = _sqlite3WalkExpr(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpFilter)
if rc != 0 {
return int32(WRC_Abort)
}
rc = _sqlite3WalkExpr(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpStart)
if rc != 0 {
return int32(WRC_Abort)
}
rc = _sqlite3WalkExpr(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpEnd)
if rc != 0 {
return int32(WRC_Abort)
}
if bOneOnly != 0 {
break
}
goto _1
_1:
;
pWin = (*TWindow)(unsafe.Pointer(pWin)).FpNextWin
}
return WRC_Continue
}
// C documentation
//
// /*
// ** Walk an expression tree. Invoke the callback once for each node
// ** of the expression, while descending. (In other words, the callback
// ** is invoked before visiting children.)
// **
// ** The return value from the callback should be one of the WRC_*
// ** constants to specify how to proceed with the walk.
// **
// ** WRC_Continue Continue descending down the tree.
// **
// ** WRC_Prune Do not descend into child nodes, but allow
// ** the walk to continue with sibling nodes.
// **
// ** WRC_Abort Do no more callbacks. Unwind the stack and
// ** return from the top-level walk call.
// **
// ** The return value from this routine is WRC_Abort to abandon the tree walk
// ** and WRC_Continue to continue.
// */
func _sqlite3WalkExprNN(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var rc int32
_ = rc
for int32(1) != 0 {
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TWalker)(unsafe.Pointer(pWalker)).FxExprCallback})))(tls, pWalker, pExpr)
if rc != 0 {
return rc & int32(WRC_Abort)
}
if !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_TokenOnly)|libc.Int32FromInt32(EP_Leaf)) != libc.Uint32FromInt32(0)) {
if (*TExpr)(unsafe.Pointer(pExpr)).FpLeft != 0 && _sqlite3WalkExprNN(tls, pWalker, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft) != 0 {
return int32(WRC_Abort)
}
if (*TExpr)(unsafe.Pointer(pExpr)).FpRight != 0 {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpRight
continue
} else {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
if _sqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) != 0 {
return int32(WRC_Abort)
}
} else {
if *(*uintptr)(unsafe.Pointer(pExpr + 32)) != 0 {
if _sqlite3WalkExprList(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32))) != 0 {
return int32(WRC_Abort)
}
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
if _walkWindowList(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 64)), int32(1)) != 0 {
return int32(WRC_Abort)
}
}
}
}
}
break
}
return WRC_Continue
}
func _sqlite3WalkExpr(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var v1 int32
_ = v1
if pExpr != 0 {
v1 = _sqlite3WalkExprNN(tls, pWalker, pExpr)
} else {
v1 = WRC_Continue
}
return v1
}
// C documentation
//
// /*
// ** Call sqlite3WalkExpr() for every expression in list p or until
// ** an abort request is seen.
// */
func _sqlite3WalkExprList(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
var i int32
var pItem uintptr
_, _ = i, pItem
if p != 0 {
i = (*TExprList)(unsafe.Pointer(p)).FnExpr
pItem = p + 8
for {
if !(i > 0) {
break
}
if _sqlite3WalkExpr(tls, pWalker, (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr) != 0 {
return int32(WRC_Abort)
}
goto _1
_1:
;
i--
pItem += 32
}
}
return WRC_Continue
}
// C documentation
//
// /*
// ** This is a no-op callback for Walker->xSelectCallback2. If this
// ** callback is set, then the Select->pWinDefn list is traversed.
// */
func _sqlite3WalkWinDefnDummyCallback(tls *libc.TLS, pWalker uintptr, p uintptr) {
_ = pWalker
_ = p
/* No-op */
}
// C documentation
//
// /*
// ** Walk all expressions associated with SELECT statement p. Do
// ** not invoke the SELECT callback on p, but do (of course) invoke
// ** any expr callbacks and SELECT callbacks that come from subqueries.
// ** Return WRC_Abort or WRC_Continue.
// */
func _sqlite3WalkSelectExpr(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
var pParse, v1 uintptr
var rc int32
var v2 bool
_, _, _, _ = pParse, rc, v1, v2
if _sqlite3WalkExprList(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpEList) != 0 {
return int32(WRC_Abort)
}
if _sqlite3WalkExpr(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpWhere) != 0 {
return int32(WRC_Abort)
}
if _sqlite3WalkExprList(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpGroupBy) != 0 {
return int32(WRC_Abort)
}
if _sqlite3WalkExpr(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpHaving) != 0 {
return int32(WRC_Abort)
}
if _sqlite3WalkExprList(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpOrderBy) != 0 {
return int32(WRC_Abort)
}
if _sqlite3WalkExpr(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpLimit) != 0 {
return int32(WRC_Abort)
}
if (*TSelect)(unsafe.Pointer(p)).FpWinDefn != 0 {
if v2 = (*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback2 == __ccgo_fp(_sqlite3WalkWinDefnDummyCallback); !v2 {
v1 = (*TWalker)(unsafe.Pointer(pWalker)).FpParse
pParse = v1
}
if v2 || v1 != uintptr(0) && int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) || (*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback2 == __ccgo_fp(_sqlite3SelectPopWith) {
/* The following may return WRC_Abort if there are unresolvable
** symbols (e.g. a table that does not exist) in a window definition. */
rc = _walkWindowList(tls, pWalker, (*TSelect)(unsafe.Pointer(p)).FpWinDefn, 0)
return rc
}
}
return WRC_Continue
}
// C documentation
//
// /*
// ** Walk the parse trees associated with all subqueries in the
// ** FROM clause of SELECT statement p. Do not invoke the select
// ** callback on p, but do invoke it on each FROM clause subquery
// ** and on any subqueries further down in the tree. Return
// ** WRC_Abort or WRC_Continue;
// */
func _sqlite3WalkSelectFrom(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
var i int32
var pItem, pSrc uintptr
_, _, _ = i, pItem, pSrc
pSrc = (*TSelect)(unsafe.Pointer(p)).FpSrc
if pSrc != 0 {
i = (*TSrcList)(unsafe.Pointer(pSrc)).FnSrc
pItem = pSrc + 8
for {
if !(i > 0) {
break
}
if (*TSrcItem)(unsafe.Pointer(pItem)).FpSelect != 0 && _sqlite3WalkSelect(tls, pWalker, (*TSrcItem)(unsafe.Pointer(pItem)).FpSelect) != 0 {
return int32(WRC_Abort)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 60 + 4))&0x4>>2)) != 0 && _sqlite3WalkExprList(tls, pWalker, *(*uintptr)(unsafe.Pointer(pItem + 88))) != 0 {
return int32(WRC_Abort)
}
goto _1
_1:
;
i--
pItem += 104
}
}
return WRC_Continue
}
// C documentation
//
// /*
// ** Call sqlite3WalkExpr() for every expression in Select statement p.
// ** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and
// ** on the compound select chain, p->pPrior.
// **
// ** If it is not NULL, the xSelectCallback() callback is invoked before
// ** the walk of the expressions and FROM clause. The xSelectCallback2()
// ** method is invoked following the walk of the expressions and FROM clause,
// ** but only if both xSelectCallback and xSelectCallback2 are both non-NULL
// ** and if the expressions and FROM clause both return WRC_Continue;
// **
// ** Return WRC_Continue under normal conditions. Return WRC_Abort if
// ** there is an abort request.
// **
// ** If the Walker does not have an xSelectCallback() then this routine
// ** is a no-op returning WRC_Continue.
// */
func _sqlite3WalkSelect(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
var rc int32
_ = rc
if p == uintptr(0) {
return WRC_Continue
}
if (*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback == uintptr(0) {
return WRC_Continue
}
for cond := true; cond; cond = p != uintptr(0) {
rc = (*(*func(*libc.TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{(*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback})))(tls, pWalker, p)
if rc != 0 {
return rc & int32(WRC_Abort)
}
if _sqlite3WalkSelectExpr(tls, pWalker, p) != 0 || _sqlite3WalkSelectFrom(tls, pWalker, p) != 0 {
return int32(WRC_Abort)
}
if (*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback2 != 0 {
(*(*func(*libc.TLS, uintptr, uintptr))(unsafe.Pointer(&struct{ uintptr }{(*TWalker)(unsafe.Pointer(pWalker)).FxSelectCallback2})))(tls, pWalker, p)
}
p = (*TSelect)(unsafe.Pointer(p)).FpPrior
}
return WRC_Continue
}
// C documentation
//
// /* Increase the walkerDepth when entering a subquery, and
// ** decrease when leaving the subquery.
// */
func _sqlite3WalkerDepthIncrease(tls *libc.TLS, pWalker uintptr, pSelect uintptr) (r int32) {
_ = pSelect
(*TWalker)(unsafe.Pointer(pWalker)).FwalkerDepth++
return WRC_Continue
}
func _sqlite3WalkerDepthDecrease(tls *libc.TLS, pWalker uintptr, pSelect uintptr) {
_ = pSelect
(*TWalker)(unsafe.Pointer(pWalker)).FwalkerDepth--
}
// C documentation
//
// /*
// ** No-op routine for the parse-tree walker.
// **
// ** When this routine is the Walker.xExprCallback then expression trees
// ** are walked without any actions being taken at each node. Presumably,
// ** when this routine is used for Walker.xExprCallback then
// ** Walker.xSelectCallback is set to do something useful for every
// ** subquery in the parser tree.
// */
func _sqlite3ExprWalkNoop(tls *libc.TLS, NotUsed uintptr, NotUsed2 uintptr) (r int32) {
_ = NotUsed
_ = NotUsed2
return WRC_Continue
}
// C documentation
//
// /*
// ** No-op routine for the parse-tree walker for SELECT statements.
// ** subquery in the parser tree.
// */
func _sqlite3SelectWalkNoop(tls *libc.TLS, NotUsed uintptr, NotUsed2 uintptr) (r int32) {
_ = NotUsed
_ = NotUsed2
return WRC_Continue
}
/************** End of walker.c **********************************************/
/************** Begin file resolve.c *****************************************/
/*
** 2008 August 18
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
**
** This file contains routines used for walking the parser tree and
** resolve all identifiers by associating them with a particular
** table and column.
*/
/* #include "sqliteInt.h" */
/*
** Magic table number to mean the EXCLUDED table in an UPSERT statement.
*/
// C documentation
//
// /*
// ** Walk the expression tree pExpr and increase the aggregate function
// ** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node.
// ** This needs to occur when copying a TK_AGG_FUNCTION node from an
// ** outer query into an inner subquery.
// **
// ** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..)
// ** is a helper function - a callback for the tree walker.
// **
// ** See also the sqlite3WindowExtraAggFuncDepth() routine in window.c
// */
func _incrAggDepth(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var p1 uintptr
_ = p1
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_AGG_FUNCTION) {
p1 = pExpr + 2
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) + *(*int32)(unsafe.Pointer(&(*TWalker)(unsafe.Pointer(pWalker)).Fu)))
}
return WRC_Continue
}
func _incrAggFunctionDepth(tls *libc.TLS, pExpr uintptr, N int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* w at bp+0 */ TWalker
if N > 0 {
libc.Xmemset(tls, bp, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_incrAggDepth)
*(*int32)(unsafe.Pointer(bp + 40)) = N
_sqlite3WalkExpr(tls, bp, pExpr)
}
}
// C documentation
//
// /*
// ** Turn the pExpr expression into an alias for the iCol-th column of the
// ** result set in pEList.
// **
// ** If the reference is followed by a COLLATE operator, then make sure
// ** the COLLATE operator is preserved. For example:
// **
// ** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase;
// **
// ** Should be transformed into:
// **
// ** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase;
// **
// ** The nSubquery parameter specifies how many levels of subquery the
// ** alias is removed from the original expression. The usual value is
// ** zero but it might be more if the alias is contained within a subquery
// ** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION
// ** structures must be increased by the nSubquery amount.
// */
func _resolveAlias(tls *libc.TLS, pParse uintptr, pEList uintptr, iCol int32, pExpr uintptr, nSubquery int32) {
bp := tls.Alloc(80)
defer tls.Free(80)
var db, pDup, pOrig uintptr
var _ /* temp at bp+0 */ TExpr
_, _, _ = db, pDup, pOrig /* The database connection */
pOrig = (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(iCol)*32))).FpExpr
if (*TExpr)(unsafe.Pointer(pExpr)).FpAggInfo != 0 {
return
}
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
pDup = _sqlite3ExprDup(tls, db, pOrig, 0)
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
_sqlite3ExprDelete(tls, db, pDup)
pDup = uintptr(0)
} else {
_incrAggFunctionDepth(tls, pDup, nSubquery)
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_COLLATE) {
pDup = _sqlite3ExprAddCollateString(tls, pParse, pDup, *(*uintptr)(unsafe.Pointer(pExpr + 8)))
}
libc.Xmemcpy(tls, bp, pDup, uint64(72))
libc.Xmemcpy(tls, pDup, pExpr, uint64(72))
libc.Xmemcpy(tls, pExpr, bp, uint64(72))
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
if *(*uintptr)(unsafe.Pointer(pExpr + 64)) != uintptr(0) {
(*TWindow)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 64)))).FpOwner = pExpr
}
}
_sqlite3ExprDeferredDelete(tls, pParse, pDup)
}
}
// C documentation
//
// /*
// ** Subqueries store the original database, table and column names for their
// ** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN",
// ** and mark the expression-list item by setting ExprList.a[].fg.eEName
// ** to ENAME_TAB.
// **
// ** Check to see if the zSpan/eEName of the expression-list item passed to this
// ** routine matches the zDb, zTab, and zCol. If any of zDb, zTab, and zCol are
// ** NULL then those fields will match anything. Return true if there is a match,
// ** or false otherwise.
// **
// ** SF_NestedFrom subqueries also store an entry for the implicit rowid (or
// ** _rowid_, or oid) column by setting ExprList.a[].fg.eEName to ENAME_ROWID,
// ** and setting zSpan to "DATABASE.TABLE.". This type of pItem
// ** argument matches if zCol is a rowid alias. If it is not NULL, (*pbRowid)
// ** is set to 1 if there is this kind of match.
// */
func _sqlite3MatchEName(tls *libc.TLS, pItem uintptr, zCol uintptr, zTab uintptr, zDb uintptr, pbRowid uintptr) (r int32) {
var eEName, n int32
var zSpan uintptr
_, _, _ = eEName, n, zSpan
eEName = int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 16 + 4)) & 0x3 >> 0))
if eEName != int32(ENAME_TAB) && (eEName != int32(ENAME_ROWID) || pbRowid == uintptr(0)) {
return 0
}
zSpan = (*TExprList_item)(unsafe.Pointer(pItem)).FzEName
n = 0
for {
if !(*(*int8)(unsafe.Pointer(zSpan + uintptr(n))) != 0 && int32(*(*int8)(unsafe.Pointer(zSpan + uintptr(n)))) != int32('.')) {
break
}
goto _1
_1:
;
n++
}
if zDb != 0 && (Xsqlite3_strnicmp(tls, zSpan, zDb, n) != 0 || int32(*(*int8)(unsafe.Pointer(zDb + uintptr(n)))) != 0) {
return 0
}
zSpan += uintptr(n + int32(1))
n = 0
for {
if !(*(*int8)(unsafe.Pointer(zSpan + uintptr(n))) != 0 && int32(*(*int8)(unsafe.Pointer(zSpan + uintptr(n)))) != int32('.')) {
break
}
goto _2
_2:
;
n++
}
if zTab != 0 && (Xsqlite3_strnicmp(tls, zSpan, zTab, n) != 0 || int32(*(*int8)(unsafe.Pointer(zTab + uintptr(n)))) != 0) {
return 0
}
zSpan += uintptr(n + int32(1))
if zCol != 0 {
if eEName == int32(ENAME_TAB) && _sqlite3StrICmp(tls, zSpan, zCol) != 0 {
return 0
}
if eEName == int32(ENAME_ROWID) && _sqlite3IsRowid(tls, zCol) == 0 {
return 0
}
}
if eEName == int32(ENAME_ROWID) {
*(*int32)(unsafe.Pointer(pbRowid)) = int32(1)
}
return int32(1)
}
// C documentation
//
// /*
// ** Return TRUE if the double-quoted string mis-feature should be supported.
// */
func _areDoubleQuotedStringsEnabled(tls *libc.TLS, db uintptr, pTopNC uintptr) (r int32) {
if (*Tsqlite3)(unsafe.Pointer(db)).Finit1.Fbusy != 0 {
return int32(1)
} /* Always support for legacy schemas */
if (*TNameContext)(unsafe.Pointer(pTopNC)).FncFlags&int32(NC_IsDDL) != 0 {
/* Currently parsing a DDL statement */
if _sqlite3WritableSchema(tls, db) != 0 && (*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_DqsDML) != uint64(0) {
return int32(1)
}
return libc.BoolInt32((*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_DqsDDL) != uint64(0))
} else {
/* Currently parsing a DML statement */
return libc.BoolInt32((*Tsqlite3)(unsafe.Pointer(db)).Fflags&uint64(SQLITE_DqsDML) != uint64(0))
}
return r
}
// C documentation
//
// /*
// ** The argument is guaranteed to be a non-NULL Expr node of type TK_COLUMN.
// ** return the appropriate colUsed mask.
// */
func _sqlite3ExprColUsed(tls *libc.TLS, pExpr uintptr) (r TBitmask) {
var n int32
var pExTab uintptr
var v1 uint64
_, _, _ = n, pExTab, v1
n = int32((*TExpr)(unsafe.Pointer(pExpr)).FiColumn)
pExTab = *(*uintptr)(unsafe.Pointer(pExpr + 64))
if (*TTable)(unsafe.Pointer(pExTab)).FtabFlags&uint32(TF_HasGenerated) != uint32(0) && int32((*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pExTab)).FaCol + uintptr(n)*16))).FcolFlags)&int32(COLFLAG_GENERATED) != 0 {
if int32((*TTable)(unsafe.Pointer(pExTab)).FnCol) >= int32(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8)) {
v1 = uint64(-libc.Int32FromInt32(1))
} else {
v1 = libc.Uint64FromInt32(1)<<(*TTable)(unsafe.Pointer(pExTab)).FnCol - uint64(1)
}
return v1
} else {
if n >= int32(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8)) {
n = int32(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8)) - libc.Int32FromInt32(1)
}
return libc.Uint64FromInt32(1) << n
}
return r
}
// C documentation
//
// /*
// ** Create a new expression term for the column specified by pMatch and
// ** iColumn. Append this new expression term to the FULL JOIN Match set
// ** in *ppList. Create a new *ppList if this is the first term in the
// ** set.
// */
func _extendFJMatch(tls *libc.TLS, pParse uintptr, ppList uintptr, pMatch uintptr, iColumn Ti16) {
var pNew uintptr
_ = pNew
pNew = _sqlite3ExprAlloc(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, int32(TK_COLUMN), uintptr(0), 0)
if pNew != 0 {
(*TExpr)(unsafe.Pointer(pNew)).FiTable = (*TSrcItem)(unsafe.Pointer(pMatch)).FiCursor
(*TExpr)(unsafe.Pointer(pNew)).FiColumn = iColumn
*(*uintptr)(unsafe.Pointer(pNew + 64)) = (*TSrcItem)(unsafe.Pointer(pMatch)).FpTab
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(libc.Int32FromInt32(EP_CanBeNull))
*(*uintptr)(unsafe.Pointer(ppList)) = _sqlite3ExprListAppend(tls, pParse, *(*uintptr)(unsafe.Pointer(ppList)), pNew)
}
}
// C documentation
//
// /*
// ** Return TRUE (non-zero) if zTab is a valid name for the schema table pTab.
// */
func _isValidSchemaTableName(tls *libc.TLS, zTab uintptr, pTab uintptr, pSchema uintptr) (r int32) {
var zLegacy uintptr
_ = zLegacy
if Xsqlite3_strnicmp(tls, zTab, __ccgo_ts+6527, int32(7)) != 0 {
return 0
}
zLegacy = (*TTable)(unsafe.Pointer(pTab)).FzName
if libc.Xstrcmp(tls, zLegacy+uintptr(7), __ccgo_ts+6535+7) == 0 {
if _sqlite3StrICmp(tls, zTab+uintptr(7), __ccgo_ts+6554+7) == 0 {
return int32(1)
}
if pSchema == uintptr(0) {
return 0
}
if _sqlite3StrICmp(tls, zTab+uintptr(7), __ccgo_ts+6068+7) == 0 {
return int32(1)
}
if _sqlite3StrICmp(tls, zTab+uintptr(7), __ccgo_ts+6573+7) == 0 {
return int32(1)
}
} else {
if _sqlite3StrICmp(tls, zTab+uintptr(7), __ccgo_ts+6573+7) == 0 {
return int32(1)
}
}
return 0
}
// C documentation
//
// /*
// ** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up
// ** that name in the set of source tables in pSrcList and make the pExpr
// ** expression node refer back to that source column. The following changes
// ** are made to pExpr:
// **
// ** pExpr->iDb Set the index in db->aDb[] of the database X
// ** (even if X is implied).
// ** pExpr->iTable Set to the cursor number for the table obtained
// ** from pSrcList.
// ** pExpr->y.pTab Points to the Table structure of X.Y (even if
// ** X and/or Y are implied.)
// ** pExpr->iColumn Set to the column number within the table.
// ** pExpr->op Set to TK_COLUMN.
// ** pExpr->pLeft Any expression this points to is deleted
// ** pExpr->pRight Any expression this points to is deleted.
// **
// ** The zDb variable is the name of the database (the "X"). This value may be
// ** NULL meaning that name is of the form Y.Z or Z. Any available database
// ** can be used. The zTable variable is the name of the table (the "Y"). This
// ** value can be NULL if zDb is also NULL. If zTable is NULL it
// ** means that the form of the name is Z and that columns from any table
// ** can be used.
// **
// ** If the name cannot be resolved unambiguously, leave an error message
// ** in pParse and return WRC_Abort. Return WRC_Prune on success.
// */
func _lookupName(tls *libc.TLS, pParse uintptr, zDb uintptr, zTab uintptr, zCol uintptr, pNC uintptr, pExpr uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var cnt, cntTab, eNewExprOp, hit, i, iCol, j, nSubquery, op, v5 int32
var db, pCol, pEList, pItem, pMatch, pOrig, pSchema, pSrcList, pTab, pTopNC, pUpsert, zAs, zErr, v10 uintptr
var hCol, hCol1 Tu8
var v7, v8 uint32
var _ /* bRowid at bp+8 */ int32
var _ /* pFJMatch at bp+0 */ uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = cnt, cntTab, db, eNewExprOp, hCol, hCol1, hit, i, iCol, j, nSubquery, op, pCol, pEList, pItem, pMatch, pOrig, pSchema, pSrcList, pTab, pTopNC, pUpsert, zAs, zErr, v10, v5, v7, v8 /* Loop counters */
cnt = 0 /* Number of matching column names */
cntTab = 0 /* Number of potential "rowid" matches */
nSubquery = 0 /* How many levels of subquery */
db = (*TParse)(unsafe.Pointer(pParse)).Fdb /* Use for looping over pSrcList items */
pMatch = uintptr(0) /* The matching pSrcList item */
pTopNC = pNC /* First namecontext in the list */
pSchema = uintptr(0) /* Schema of the expression */
eNewExprOp = int32(TK_COLUMN) /* New value for pExpr->op on success */
pTab = uintptr(0) /* A column of pTab */
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0) /* Matches for FULL JOIN .. USING */
/* the name context cannot be NULL. */
/* The Z in X.Y.Z cannot be NULL */
/* Initialize the node to no-match */
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = -int32(1)
/* Translate the schema name in zDb into a pointer to the corresponding
** schema. If not found, pSchema will remain NULL and nothing will match
** resulting in an appropriate error message toward the end of this routine
*/
if zDb != 0 {
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_PartIdx)|libc.Int32FromInt32(NC_IsCheck)) != 0 {
/* Silently ignore database qualifiers inside CHECK constraints and
** partial indices. Do not raise errors because that might break
** legacy and because it does not hurt anything to just ignore the
** database name. */
zDb = uintptr(0)
} else {
i = 0
for {
if !(i < (*Tsqlite3)(unsafe.Pointer(db)).FnDb) {
break
}
if _sqlite3StrICmp(tls, (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FzDbSName, zDb) == 0 {
pSchema = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb + uintptr(i)*32))).FpSchema
break
}
goto _1
_1:
;
i++
}
if i == (*Tsqlite3)(unsafe.Pointer(db)).FnDb && _sqlite3StrICmp(tls, __ccgo_ts+6587, zDb) == 0 {
/* This branch is taken when the main database has been renamed
** using SQLITE_DBCONFIG_MAINDBNAME. */
pSchema = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FpSchema
zDb = (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer(db)).FaDb))).FzDbSName
}
}
}
/* Start at the inner-most context and move outward until a match is found */
for cond := true; cond; cond = pNC != 0 {
pSrcList = (*TNameContext)(unsafe.Pointer(pNC)).FpSrcList
if pSrcList != 0 {
i = 0
pItem = pSrcList + 8
for {
if !(i < (*TSrcList)(unsafe.Pointer(pSrcList)).FnSrc) {
break
}
pTab = (*TSrcItem)(unsafe.Pointer(pItem)).FpTab
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 60 + 4))&0x2000>>13)) != 0 {
/* In this case, pItem is a subquery that has been formed from a
** parenthesized subset of the FROM clause terms. Example:
** .... FROM t1 LEFT JOIN (t2 RIGHT JOIN t3 USING(x)) USING(y) ...
** \_________________________/
** This pItem -------------^
*/
hit = 0
pEList = (*TSelect)(unsafe.Pointer((*TSrcItem)(unsafe.Pointer(pItem)).FpSelect)).FpEList
j = 0
for {
if !(j < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
*(*int32)(unsafe.Pointer(bp + 8)) = 0 /* True if possible rowid match */
if !(_sqlite3MatchEName(tls, pEList+8+uintptr(j)*32, zCol, zTab, zDb, bp+8) != 0) {
goto _3
}
if *(*int32)(unsafe.Pointer(bp + 8)) == 0 {
if cnt > 0 {
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 60 + 4))&0x400>>10)) == 0 || _sqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pItem + 72)), zCol) < 0 {
/* Two or more tables have the same column name which is
** not joined by USING. This is an error. Signal as much
** by clearing pFJMatch and letting cnt go above 1. */
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
if int32((*TSrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&int32(JT_RIGHT) == 0 {
/* An INNER or LEFT JOIN. Use the left-most table */
goto _3
} else {
if int32((*TSrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&int32(JT_LEFT) == 0 {
/* A RIGHT JOIN. Use the right-most table */
cnt = 0
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
/* For a FULL JOIN, we must construct a coalesce() func */
_extendFJMatch(tls, pParse, bp, pMatch, (*TExpr)(unsafe.Pointer(pExpr)).FiColumn)
}
}
}
}
cnt++
hit = int32(1)
} else {
if cnt > 0 {
/* This is a potential rowid match, but there has already been
** a real match found. So this can be ignored. */
goto _3
}
}
cntTab++
pMatch = pItem
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(j)
libc.SetBitFieldPtr16Uint32(pEList+8+uintptr(j)*32+16+4, libc.Uint32FromInt32(1), 6, 0x40)
/* rowid cannot be part of a USING clause - assert() this. */
if int32(uint32(*(*uint16)(unsafe.Pointer(pEList + 8 + uintptr(j)*32 + 16 + 4))&0x80>>7)) != 0 {
break
}
goto _3
_3:
;
j++
}
if hit != 0 || zTab == uintptr(0) {
goto _2
}
}
if zTab != 0 {
if zDb != 0 {
if (*TTable)(unsafe.Pointer(pTab)).FpSchema != pSchema {
goto _2
}
if pSchema == uintptr(0) && libc.Xstrcmp(tls, zDb, __ccgo_ts+6592) != 0 {
goto _2
}
}
if (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias != uintptr(0) {
if _sqlite3StrICmp(tls, zTab, (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias) != 0 {
goto _2
}
} else {
if _sqlite3StrICmp(tls, zTab, (*TTable)(unsafe.Pointer(pTab)).FzName) != 0 {
if (*TTable)(unsafe.Pointer(pTab)).Ftnum != uint32(1) {
goto _2
}
if !(_isValidSchemaTableName(tls, zTab, pTab, pSchema) != 0) {
goto _2
}
}
}
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) && (*TSrcItem)(unsafe.Pointer(pItem)).FzAlias != 0 {
_sqlite3RenameTokenRemap(tls, pParse, uintptr(0), pExpr+64)
}
}
hCol = _sqlite3StrIHash(tls, zCol)
j = 0
pCol = (*TTable)(unsafe.Pointer(pTab)).FaCol
for {
if !(j < int32((*TTable)(unsafe.Pointer(pTab)).FnCol)) {
break
}
if int32((*TColumn)(unsafe.Pointer(pCol)).FhName) == int32(hCol) && _sqlite3StrICmp(tls, (*TColumn)(unsafe.Pointer(pCol)).FzCnName, zCol) == 0 {
if cnt > 0 {
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 60 + 4))&0x400>>10)) == 0 || _sqlite3IdListIndex(tls, *(*uintptr)(unsafe.Pointer(pItem + 72)), zCol) < 0 {
/* Two or more tables have the same column name which is
** not joined by USING. This is an error. Signal as much
** by clearing pFJMatch and letting cnt go above 1. */
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
if int32((*TSrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&int32(JT_RIGHT) == 0 {
/* An INNER or LEFT JOIN. Use the left-most table */
goto _4
} else {
if int32((*TSrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&int32(JT_LEFT) == 0 {
/* A RIGHT JOIN. Use the right-most table */
cnt = 0
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
} else {
/* For a FULL JOIN, we must construct a coalesce() func */
_extendFJMatch(tls, pParse, bp, pMatch, (*TExpr)(unsafe.Pointer(pExpr)).FiColumn)
}
}
}
}
cnt++
pMatch = pItem
/* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */
if j == int32((*TTable)(unsafe.Pointer(pTab)).FiPKey) {
v5 = -int32(1)
} else {
v5 = int32(int16(j))
}
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(v5)
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 60 + 4))&0x2000>>13)) != 0 {
_sqlite3SrcItemColumnUsed(tls, pItem, j)
}
break
}
goto _4
_4:
;
j++
pCol += 16
}
if 0 == cnt && (*TTable)(unsafe.Pointer(pTab)).FtabFlags&uint32(TF_NoVisibleRowid) == uint32(0) {
/* pTab is a potential ROWID match. Keep track of it and match
** the ROWID later if that seems appropriate. (Search for "cntTab"
** to find related code.) Only allow a ROWID match if there is
** a single ROWID match candidate.
*/
/* The (much more common) non-SQLITE_ALLOW_ROWID_IN_VIEW case is
** simpler since we require exactly one candidate, which will
** always be a non-VIEW
*/
cntTab++
pMatch = pItem
}
goto _2
_2:
;
i++
pItem += 104
}
if pMatch != 0 {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = (*TSrcItem)(unsafe.Pointer(pMatch)).FiCursor
*(*uintptr)(unsafe.Pointer(pExpr + 64)) = (*TSrcItem)(unsafe.Pointer(pMatch)).FpTab
if int32((*TSrcItem)(unsafe.Pointer(pMatch)).Ffg.Fjointype)&(libc.Int32FromInt32(JT_LEFT)|libc.Int32FromInt32(JT_LTORJ)) != 0 {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_CanBeNull))
}
pSchema = (*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 64)))).FpSchema
}
} /* if( pSrcList ) */
/* If we have not already resolved the name, then maybe
** it is a new.* or old.* trigger argument reference. Or
** maybe it is an excluded.* from an upsert. Or maybe it is
** a reference in the RETURNING clause to a table being modified.
*/
if cnt == 0 && zDb == uintptr(0) {
pTab = uintptr(0)
if (*TParse)(unsafe.Pointer(pParse)).FpTriggerTab != uintptr(0) {
op = int32((*TParse)(unsafe.Pointer(pParse)).FeTriggerOp)
if (*TParse)(unsafe.Pointer(pParse)).FbReturning != 0 {
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_UBaseReg) != 0 && (zTab == uintptr(0) || _sqlite3StrICmp(tls, zTab, (*TTable)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).FpTriggerTab)).FzName) == 0) {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = libc.BoolInt32(op != int32(TK_DELETE))
pTab = (*TParse)(unsafe.Pointer(pParse)).FpTriggerTab
}
} else {
if op != int32(TK_DELETE) && zTab != 0 && _sqlite3StrICmp(tls, __ccgo_ts+6594, zTab) == 0 {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = int32(1)
pTab = (*TParse)(unsafe.Pointer(pParse)).FpTriggerTab
} else {
if op != int32(TK_INSERT) && zTab != 0 && _sqlite3StrICmp(tls, __ccgo_ts+6598, zTab) == 0 {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = 0
pTab = (*TParse)(unsafe.Pointer(pParse)).FpTriggerTab
}
}
}
}
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_UUpsert) != 0 && zTab != uintptr(0) {
pUpsert = *(*uintptr)(unsafe.Pointer(pNC + 16))
if pUpsert != 0 && _sqlite3StrICmp(tls, __ccgo_ts+6602, zTab) == 0 {
pTab = (*(*TSrcItem)(unsafe.Pointer((*TUpsert)(unsafe.Pointer(pUpsert)).FpUpsertSrc + 8))).FpTab
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = int32(EXCLUDED_TABLE_NUMBER)
}
}
if pTab != 0 {
hCol1 = _sqlite3StrIHash(tls, zCol)
pSchema = (*TTable)(unsafe.Pointer(pTab)).FpSchema
cntTab++
iCol = 0
pCol = (*TTable)(unsafe.Pointer(pTab)).FaCol
for {
if !(iCol < int32((*TTable)(unsafe.Pointer(pTab)).FnCol)) {
break
}
if int32((*TColumn)(unsafe.Pointer(pCol)).FhName) == int32(hCol1) && _sqlite3StrICmp(tls, (*TColumn)(unsafe.Pointer(pCol)).FzCnName, zCol) == 0 {
if iCol == int32((*TTable)(unsafe.Pointer(pTab)).FiPKey) {
iCol = -int32(1)
}
break
}
goto _6
_6:
;
iCol++
pCol += 16
}
if iCol >= int32((*TTable)(unsafe.Pointer(pTab)).FnCol) && _sqlite3IsRowid(tls, zCol) != 0 && (*TTable)(unsafe.Pointer(pTab)).FtabFlags&uint32(TF_NoVisibleRowid) == uint32(0) {
/* IMP: R-51414-32910 */
iCol = -int32(1)
}
if iCol < int32((*TTable)(unsafe.Pointer(pTab)).FnCol) {
cnt++
pMatch = uintptr(0)
if (*TExpr)(unsafe.Pointer(pExpr)).FiTable == int32(EXCLUDED_TABLE_NUMBER) {
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(iCol)
*(*uintptr)(unsafe.Pointer(pExpr + 64)) = pTab
eNewExprOp = int32(TK_COLUMN)
} else {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = (*TUpsert)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pNC + 16)))).FregData + int32(_sqlite3TableColumnToStorage(tls, pTab, int16(iCol)))
eNewExprOp = int32(TK_REGISTER)
}
} else {
*(*uintptr)(unsafe.Pointer(pExpr + 64)) = pTab
if (*TParse)(unsafe.Pointer(pParse)).FbReturning != 0 {
eNewExprOp = int32(TK_REGISTER)
(*TExpr)(unsafe.Pointer(pExpr)).Fop2 = uint8(TK_COLUMN)
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(iCol)
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = *(*int32)(unsafe.Pointer(&(*TNameContext)(unsafe.Pointer(pNC)).FuNC)) + (int32((*TTable)(unsafe.Pointer(pTab)).FnCol)+int32(1))*(*TExpr)(unsafe.Pointer(pExpr)).FiTable + int32(_sqlite3TableColumnToStorage(tls, pTab, int16(iCol))) + int32(1)
} else {
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(iCol)
eNewExprOp = int32(TK_TRIGGER)
if iCol < 0 {
(*TExpr)(unsafe.Pointer(pExpr)).FaffExpr = int8(SQLITE_AFF_INTEGER)
} else {
if (*TExpr)(unsafe.Pointer(pExpr)).FiTable == 0 {
if iCol >= int32(32) {
v7 = uint32(0xffffffff)
} else {
v7 = libc.Uint32FromInt32(1) << iCol
}
*(*Tu32)(unsafe.Pointer(pParse + 216)) |= v7
} else {
if iCol >= int32(32) {
v8 = uint32(0xffffffff)
} else {
v8 = libc.Uint32FromInt32(1) << iCol
}
*(*Tu32)(unsafe.Pointer(pParse + 220)) |= v8
}
}
}
}
}
}
}
/*
** Perhaps the name is a reference to the ROWID
*/
if cnt == 0 && cntTab >= int32(1) && pMatch != 0 && (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_IdxExpr)|libc.Int32FromInt32(NC_GenCol)) == 0 && _sqlite3IsRowid(tls, zCol) != 0 && ((*TTable)(unsafe.Pointer((*TSrcItem)(unsafe.Pointer(pMatch)).FpTab)).FtabFlags&uint32(TF_NoVisibleRowid) == uint32(0) || int32(uint32(*(*uint16)(unsafe.Pointer(pMatch + 60 + 4))&0x2000>>13)) != 0) {
cnt = cntTab
if int32(uint32(*(*uint16)(unsafe.Pointer(pMatch + 60 + 4))&0x2000>>13)) == 0 {
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = int16(-int32(1))
}
(*TExpr)(unsafe.Pointer(pExpr)).FaffExpr = int8(SQLITE_AFF_INTEGER)
}
/*
** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z
** might refer to an result-set alias. This happens, for example, when
** we are resolving names in the WHERE clause of the following command:
**
** SELECT a+b AS x FROM table WHERE x<10;
**
** In cases like this, replace pExpr with a copy of the expression that
** forms the result set entry ("a+b" in the example) and return immediately.
** Note that the expression in the result set should have already been
** resolved by the time the WHERE clause is resolved.
**
** The ability to use an output result-set column in the WHERE, GROUP BY,
** or HAVING clauses, or as part of a larger expression in the ORDER BY
** clause is not standard SQL. This is a (goofy) SQLite extension, that
** is supported for backwards compatibility only. Hence, we issue a warning
** on sqlite3_log() whenever the capability is used.
*/
if cnt == 0 && (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_UEList) != 0 && zTab == uintptr(0) {
pEList = *(*uintptr)(unsafe.Pointer(pNC + 16))
j = 0
for {
if !(j < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
zAs = (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(j)*32))).FzEName
if int32(uint32(*(*uint16)(unsafe.Pointer(pEList + 8 + uintptr(j)*32 + 16 + 4))&0x3>>0)) == ENAME_NAME && Xsqlite3_stricmp(tls, zAs, zCol) == 0 {
pOrig = (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(j)*32))).FpExpr
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_AllowAgg) == 0 && (*TExpr)(unsafe.Pointer(pOrig)).Fflags&uint32(libc.Int32FromInt32(EP_Agg)) != uint32(0) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6611, libc.VaList(bp+24, zAs))
return int32(WRC_Abort)
}
if (*TExpr)(unsafe.Pointer(pOrig)).Fflags&uint32(libc.Int32FromInt32(EP_Win)) != uint32(0) && ((*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_AllowWin) == 0 || pNC != pTopNC) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6642, libc.VaList(bp+24, zAs))
return int32(WRC_Abort)
}
if _sqlite3ExprVectorSize(tls, pOrig) != int32(1) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6679, 0)
return int32(WRC_Abort)
}
_resolveAlias(tls, pParse, pEList, j, pExpr, nSubquery)
cnt = int32(1)
pMatch = uintptr(0)
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
_sqlite3RenameTokenRemap(tls, pParse, uintptr(0), pExpr)
}
goto lookupname_end
}
goto _9
_9:
;
j++
}
}
/* Advance to the next name context. The loop will exit when either
** we have a match (cnt>0) or when we run out of name contexts.
*/
if cnt != 0 {
break
}
pNC = (*TNameContext)(unsafe.Pointer(pNC)).FpNext
nSubquery++
}
/*
** If X and Y are NULL (in other words if only the column name Z is
** supplied) and the value of Z is enclosed in double-quotes, then
** Z is a string literal if it doesn't match any column names. In that
** case, we need to return right away and not make any changes to
** pExpr.
**
** Because no reference was made to outer contexts, the pNC->nRef
** fields are not changed in any context.
*/
if cnt == 0 && zTab == uintptr(0) {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_DblQuoted)) != uint32(0) && _areDoubleQuotedStringsEnabled(tls, db, pTopNC) != 0 {
/* If a double-quoted identifier does not match any known column name,
** then treat it as a string.
**
** This hack was added in the early days of SQLite in a misguided attempt
** to be compatible with MySQL 3.x, which used double-quotes for strings.
** I now sorely regret putting in this hack. The effect of this hack is
** that misspelled identifier names are silently converted into strings
** rather than causing an error, to the frustration of countless
** programmers. To all those frustrated programmers, my apologies.
**
** Someday, I hope to get rid of this hack. Unfortunately there is
** a huge amount of legacy SQL that uses it. So for now, we just
** issue a warning.
*/
Xsqlite3_log(tls, int32(SQLITE_WARNING), __ccgo_ts+6697, libc.VaList(bp+24, zCol))
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_STRING)
libc.Xmemset(tls, pExpr+64, 0, uint64(8))
return int32(WRC_Prune)
}
if _sqlite3ExprIdToTrueFalse(tls, pExpr) != 0 {
return int32(WRC_Prune)
}
}
/*
** cnt==0 means there was not match.
** cnt>1 means there were two or more matches.
**
** cnt==0 is always an error. cnt>1 is often an error, but might
** be multiple matches for a NATURAL LEFT JOIN or a LEFT JOIN USING.
*/
if cnt != int32(1) {
if *(*uintptr)(unsafe.Pointer(bp)) != 0 {
if (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(bp)))).FnExpr == cnt-int32(1) {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Leaf)) != uint32(0) {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) &= uint32(^libc.Int32FromInt32(EP_Leaf))
} else {
_sqlite3ExprDelete(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
(*TExpr)(unsafe.Pointer(pExpr)).FpLeft = uintptr(0)
_sqlite3ExprDelete(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpRight)
(*TExpr)(unsafe.Pointer(pExpr)).FpRight = uintptr(0)
}
_extendFJMatch(tls, pParse, bp, pMatch, (*TExpr)(unsafe.Pointer(pExpr)).FiColumn)
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_FUNCTION)
*(*uintptr)(unsafe.Pointer(pExpr + 8)) = __ccgo_ts + 6732
*(*uintptr)(unsafe.Pointer(pExpr + 32)) = *(*uintptr)(unsafe.Pointer(bp))
cnt = int32(1)
goto lookupname_end
} else {
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(bp)))
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
}
}
if cnt == 0 {
v10 = __ccgo_ts + 6741
} else {
v10 = __ccgo_ts + 6756
}
zErr = v10
if zDb != 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6778, libc.VaList(bp+24, zErr, zDb, zTab, zCol))
} else {
if zTab != 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6791, libc.VaList(bp+24, zErr, zTab, zCol))
} else {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6801, libc.VaList(bp+24, zErr, zCol))
}
}
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr)
(*TParse)(unsafe.Pointer(pParse)).FcheckSchema = uint8(1)
(*TNameContext)(unsafe.Pointer(pTopNC)).FnNcErr++
eNewExprOp = int32(TK_NULL)
}
/* Remove all substructure from pExpr */
if !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_TokenOnly)|libc.Int32FromInt32(EP_Leaf)) != libc.Uint32FromInt32(0)) {
_sqlite3ExprDelete(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
(*TExpr)(unsafe.Pointer(pExpr)).FpLeft = uintptr(0)
_sqlite3ExprDelete(tls, db, (*TExpr)(unsafe.Pointer(pExpr)).FpRight)
(*TExpr)(unsafe.Pointer(pExpr)).FpRight = uintptr(0)
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_Leaf))
}
/* If a column from a table in pSrcList is referenced, then record
** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes
** bit 0 to be set. Column 1 sets bit 1. And so forth. Bit 63 is
** set if the 63rd or any subsequent column is used.
**
** The colUsed mask is an optimization used to help determine if an
** index is a covering index. The correct answer is still obtained
** if the mask contains extra set bits. However, it is important to
** avoid setting bits beyond the maximum column number of the table.
** (See ticket [b92e5e8ec2cdbaa1]).
**
** If a generated column is referenced, set bits for every column
** of the table.
*/
if int32((*TExpr)(unsafe.Pointer(pExpr)).FiColumn) >= 0 && cnt == int32(1) && pMatch != uintptr(0) {
*(*TBitmask)(unsafe.Pointer(pMatch + 80)) |= _sqlite3ExprColUsed(tls, pExpr)
}
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(eNewExprOp)
goto lookupname_end
lookupname_end:
;
if cnt == int32(1) {
if (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FxAuth != 0 && (int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_COLUMN) || int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_TRIGGER)) {
_sqlite3AuthRead(tls, pParse, pExpr, pSchema, (*TNameContext)(unsafe.Pointer(pNC)).FpSrcList)
}
/* Increment the nRef value on all name contexts from TopNC up to
** the point where the name matched. */
for {
(*TNameContext)(unsafe.Pointer(pTopNC)).FnRef++
if pTopNC == pNC {
break
}
pTopNC = (*TNameContext)(unsafe.Pointer(pTopNC)).FpNext
goto _11
_11:
}
return int32(WRC_Prune)
} else {
return int32(WRC_Abort)
}
return r
}
// C documentation
//
// /*
// ** Allocate and return a pointer to an expression to load the column iCol
// ** from datasource iSrc in SrcList pSrc.
// */
func _sqlite3CreateColumnExpr(tls *libc.TLS, db uintptr, pSrc uintptr, iSrc int32, iCol int32) (r uintptr) {
var p, pItem, pTab, v1 uintptr
var v2 uint64
var v3 int32
_, _, _, _, _, _ = p, pItem, pTab, v1, v2, v3
p = _sqlite3ExprAlloc(tls, db, int32(TK_COLUMN), uintptr(0), 0)
if p != 0 {
pItem = pSrc + 8 + uintptr(iSrc)*104
v1 = (*TSrcItem)(unsafe.Pointer(pItem)).FpTab
*(*uintptr)(unsafe.Pointer(p + 64)) = v1
pTab = v1
(*TExpr)(unsafe.Pointer(p)).FiTable = (*TSrcItem)(unsafe.Pointer(pItem)).FiCursor
if int32((*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 64)))).FiPKey) == iCol {
(*TExpr)(unsafe.Pointer(p)).FiColumn = int16(-int32(1))
} else {
(*TExpr)(unsafe.Pointer(p)).FiColumn = int16(iCol)
if (*TTable)(unsafe.Pointer(pTab)).FtabFlags&uint32(TF_HasGenerated) != uint32(0) && int32((*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FaCol + uintptr(iCol)*16))).FcolFlags)&int32(COLFLAG_GENERATED) != 0 {
if int32((*TTable)(unsafe.Pointer(pTab)).FnCol) >= int32(64) {
v2 = uint64(-libc.Int32FromInt32(1))
} else {
v2 = libc.Uint64FromInt32(1)<<(*TTable)(unsafe.Pointer(pTab)).FnCol - uint64(1)
}
(*TSrcItem)(unsafe.Pointer(pItem)).FcolUsed = v2
} else {
if iCol >= int32(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8)) {
v3 = int32(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8)) - libc.Int32FromInt32(1)
} else {
v3 = iCol
}
*(*TBitmask)(unsafe.Pointer(pItem + 80)) |= libc.Uint64FromInt32(1) << v3
}
}
}
return p
}
// C documentation
//
// /*
// ** Report an error that an expression is not valid for some set of
// ** pNC->ncFlags values determined by validMask.
// **
// ** static void notValid(
// ** Parse *pParse, // Leave error message here
// ** NameContext *pNC, // The name context
// ** const char *zMsg, // Type of error
// ** int validMask, // Set of contexts for which prohibited
// ** Expr *pExpr // Invalidate this expression on error
// ** ){...}
// **
// ** As an optimization, since the conditional is almost always false
// ** (because errors are rare), the conditional is moved outside of the
// ** function call using a macro.
// */
func _notValidImpl(tls *libc.TLS, pParse uintptr, pNC uintptr, zMsg uintptr, pExpr uintptr, pError uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var zIn uintptr
_ = zIn
zIn = __ccgo_ts + 6808
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_IdxExpr) != 0 {
zIn = __ccgo_ts + 6836
} else {
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_IsCheck) != 0 {
zIn = __ccgo_ts + 6854
} else {
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_GenCol) != 0 {
zIn = __ccgo_ts + 6872
}
}
}
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6890, libc.VaList(bp+8, zMsg, zIn))
if pExpr != 0 {
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_NULL)
}
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pError)
}
// C documentation
//
// /*
// ** Expression p should encode a floating point value between 1.0 and 0.0.
// ** Return 1024 times this value. Or return -1 if p is not a floating point
// ** value between 1.0 and 0.0.
// */
func _exprProbability(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* r at bp+0 */ float64
*(*float64)(unsafe.Pointer(bp)) = -libc.Float64FromFloat64(1)
if int32((*TExpr)(unsafe.Pointer(p)).Fop) != int32(TK_FLOAT) {
return -int32(1)
}
_sqlite3AtoF(tls, *(*uintptr)(unsafe.Pointer(p + 8)), bp, _sqlite3Strlen30(tls, *(*uintptr)(unsafe.Pointer(p + 8))), uint8(SQLITE_UTF8))
if *(*float64)(unsafe.Pointer(bp)) > float64(1) {
return -int32(1)
}
return int32(*(*float64)(unsafe.Pointer(bp)) * libc.Float64FromFloat64(1.34217728e+08))
}
// C documentation
//
// /*
// ** This routine is callback for sqlite3WalkExpr().
// **
// ** Resolve symbolic names into TK_COLUMN operators for the current
// ** node in the expression tree. Return 0 to continue the search down
// ** the tree or 2 to abort the tree walk.
// **
// ** This routine also does error checking and name resolution for
// ** function names. The operator for aggregate functions is changed
// ** to TK_AGG_FUNCTION.
// */
func _resolveExprStep(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var anRef [8]int32
var auth, i, is_agg, n, nLeft, nRef, nRight, no_such_func, rc, savedAllowFlags, wrong_num_args, v12, v4, v6, v7 int32
var enc Tu8
var p, pDef, pItem, pLeft, pList, pNC, pNC2, pParse, pRight, pRight1, pSel, pSrcList, pWin, zColumn, zDb, zId, zTable, zType, v5, v8, p10, p11, p9 uintptr
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = anRef, auth, enc, i, is_agg, n, nLeft, nRef, nRight, no_such_func, p, pDef, pItem, pLeft, pList, pNC, pNC2, pParse, pRight, pRight1, pSel, pSrcList, pWin, rc, savedAllowFlags, wrong_num_args, zColumn, zDb, zId, zTable, zType, v12, v4, v5, v6, v7, v8, p10, p11, p9
pNC = *(*uintptr)(unsafe.Pointer(pWalker + 40))
pParse = (*TNameContext)(unsafe.Pointer(pNC)).FpParse
switch int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) {
/* The special operator TK_ROW means use the rowid for the first
** column in the FROM clause. This is used by the LIMIT and ORDER BY
** clause processing on UPDATE and DELETE statements, and by
** UPDATE ... FROM statement processing.
*/
case int32(TK_ROW):
pSrcList = (*TNameContext)(unsafe.Pointer(pNC)).FpSrcList
pItem = pSrcList + 8
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_COLUMN)
*(*uintptr)(unsafe.Pointer(pExpr + 64)) = (*TSrcItem)(unsafe.Pointer(pItem)).FpTab
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = (*TSrcItem)(unsafe.Pointer(pItem)).FiCursor
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn--
(*TExpr)(unsafe.Pointer(pExpr)).FaffExpr = int8(SQLITE_AFF_INTEGER)
break
/* An optimization: Attempt to convert
**
** "expr IS NOT NULL" --> "TRUE"
** "expr IS NULL" --> "FALSE"
**
** if we can prove that "expr" is never NULL. Call this the
** "NOT NULL strength reduction optimization".
**
** If this optimization occurs, also restore the NameContext ref-counts
** to the state they where in before the "column" LHS expression was
** resolved. This prevents "column" from being counted as having been
** referenced, which might prevent a SELECT from being erroneously
** marked as correlated.
**
** 2024-03-28: Beware of aggregates. A bare column of aggregated table
** can still evaluate to NULL even though it is marked as NOT NULL.
** Example:
**
** CREATE TABLE t1(a INT NOT NULL);
** SELECT a, a IS NULL, a IS NOT NULL, count(*) FROM t1;
**
** The "a IS NULL" and "a IS NOT NULL" expressions cannot be optimized
** here because at the time this case is hit, we do not yet know whether
** or not t1 is being aggregated. We have to assume the worst and omit
** the optimization. The only time it is safe to apply this optimization
** is within the WHERE clause.
*/
fallthrough
case int32(TK_NOTNULL):
fallthrough
case int32(TK_ISNULL):
i = 0
p = pNC
for {
if !(p != 0 && i < int32(libc.Uint64FromInt64(32)/libc.Uint64FromInt64(4))) {
break
}
anRef[i] = (*TNameContext)(unsafe.Pointer(p)).FnRef
goto _1
_1:
;
p = (*TNameContext)(unsafe.Pointer(p)).FpNext
i++
}
_sqlite3WalkExpr(tls, pWalker, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
return int32(WRC_Prune)
}
if _sqlite3ExprCanBeNull(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft) != 0 {
/* The expression can be NULL. So the optimization does not apply */
return int32(WRC_Prune)
}
i = 0
p = pNC
for {
if !(p != 0) {
break
}
if (*TNameContext)(unsafe.Pointer(p)).FncFlags&int32(NC_Where) == 0 {
return int32(WRC_Prune) /* Not in a WHERE clause. Unsafe to optimize. */
}
goto _2
_2:
;
p = (*TNameContext)(unsafe.Pointer(p)).FpNext
i++
}
*(*int32)(unsafe.Pointer(&(*TExpr)(unsafe.Pointer(pExpr)).Fu)) = libc.BoolInt32(int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_NOTNULL))
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(EP_IntValue)
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_INTEGER)
i = 0
p = pNC
for {
if !(p != 0 && i < int32(libc.Uint64FromInt64(32)/libc.Uint64FromInt64(4))) {
break
}
(*TNameContext)(unsafe.Pointer(p)).FnRef = anRef[i]
goto _3
_3:
;
p = (*TNameContext)(unsafe.Pointer(p)).FpNext
i++
}
_sqlite3ExprDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
(*TExpr)(unsafe.Pointer(pExpr)).FpLeft = uintptr(0)
return int32(WRC_Prune)
/* A column name: ID
** Or table name and column name: ID.ID
** Or a database, table and column: ID.ID.ID
**
** The TK_ID and TK_OUT cases are combined so that there will only
** be one call to lookupName(). Then the compiler will in-line
** lookupName() for a size reduction and performance increase.
*/
fallthrough
case int32(TK_ID):
fallthrough
case int32(TK_DOT):
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_ID) {
zDb = uintptr(0)
zTable = uintptr(0)
zColumn = *(*uintptr)(unsafe.Pointer(pExpr + 8))
} else {
pLeft = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_IdxExpr)|libc.Int32FromInt32(NC_GenCol)) != 0 {
_notValidImpl(tls, pParse, pNC, __ccgo_ts+6910, uintptr(0), pExpr)
}
pRight = (*TExpr)(unsafe.Pointer(pExpr)).FpRight
if int32((*TExpr)(unsafe.Pointer(pRight)).Fop) == int32(TK_ID) {
zDb = uintptr(0)
} else {
zDb = *(*uintptr)(unsafe.Pointer(pLeft + 8))
pLeft = (*TExpr)(unsafe.Pointer(pRight)).FpLeft
pRight = (*TExpr)(unsafe.Pointer(pRight)).FpRight
}
zTable = *(*uintptr)(unsafe.Pointer(pLeft + 8))
zColumn = *(*uintptr)(unsafe.Pointer(pRight + 8))
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
_sqlite3RenameTokenRemap(tls, pParse, pExpr, pRight)
_sqlite3RenameTokenRemap(tls, pParse, pExpr+64, pLeft)
}
}
return _lookupName(tls, pParse, zDb, zTable, zColumn, pNC, pExpr)
/* Resolve function names
*/
fallthrough
case int32(TK_FUNCTION):
pList = *(*uintptr)(unsafe.Pointer(pExpr + 32))
if pList != 0 {
v4 = (*TExprList)(unsafe.Pointer(pList)).FnExpr
} else {
v4 = 0
} /* The argument list */
n = v4 /* Number of arguments */
no_such_func = 0 /* True if no such function exists */
wrong_num_args = 0 /* True if wrong number of arguments */
is_agg = 0 /* Information about the function */
enc = (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).Fenc /* The database encoding */
savedAllowFlags = (*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_AllowAgg) | libc.Int32FromInt32(NC_AllowWin))
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) && int32((*TWindow)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 64)))).FeFrmType) != int32(TK_FILTER) {
v5 = *(*uintptr)(unsafe.Pointer(pExpr + 64))
} else {
v5 = uintptr(0)
}
pWin = v5
zId = *(*uintptr)(unsafe.Pointer(pExpr + 8))
pDef = _sqlite3FindFunction(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, zId, n, enc, uint8(0))
if pDef == uintptr(0) {
pDef = _sqlite3FindFunction(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, zId, -int32(2), enc, uint8(0))
if pDef == uintptr(0) {
no_such_func = int32(1)
} else {
wrong_num_args = int32(1)
}
} else {
is_agg = libc.BoolInt32((*TFuncDef)(unsafe.Pointer(pDef)).FxFinalize != uintptr(0))
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_UNLIKELY) != 0 {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_Unlikely))
if n == int32(2) {
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = _exprProbability(tls, (*(*TExprList_item)(unsafe.Pointer(pList + 8 + 1*32))).FpExpr)
if (*TExpr)(unsafe.Pointer(pExpr)).FiTable < 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6927, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
}
} else {
/* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is
** equivalent to likelihood(X, 0.0625).
** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is
** short-hand for likelihood(X,0.0625).
** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand
** for likelihood(X,0.9375).
** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent
** to likelihood(X,0.9375). */
/* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */
if int32(*(*int8)(unsafe.Pointer((*TFuncDef)(unsafe.Pointer(pDef)).FzName))) == int32('u') {
v6 = int32(8388608)
} else {
v6 = int32(125829120)
}
(*TExpr)(unsafe.Pointer(pExpr)).FiTable = v6
}
}
auth = _sqlite3AuthCheck(tls, pParse, int32(SQLITE_FUNCTION), uintptr(0), (*TFuncDef)(unsafe.Pointer(pDef)).FzName, uintptr(0))
if auth != SQLITE_OK {
if auth == int32(SQLITE_DENY) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6991, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
}
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_NULL)
return int32(WRC_Prune)
}
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(libc.Int32FromInt32(SQLITE_FUNC_CONSTANT)|libc.Int32FromInt32(SQLITE_FUNC_SLOCHNG)) != 0 {
/* For the purposes of the EP_ConstFunc flag, date and time
** functions and other functions that change slowly are considered
** constant because they are constant for the duration of one query.
** This allows them to be factored out of inner loops. */
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_ConstFunc))
}
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_CONSTANT) == uint32(0) {
/* Clearly non-deterministic functions like random(), but also
** date/time functions that use 'now', and other functions like
** sqlite_version() that might change over time cannot be used
** in an index or generated column. Curiously, they can be used
** in a CHECK constraint. SQLServer, MySQL, and PostgreSQL all
** all this. */
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_IdxExpr)|libc.Int32FromInt32(NC_PartIdx)|libc.Int32FromInt32(NC_GenCol)) != 0 {
_notValidImpl(tls, pParse, pNC, __ccgo_ts+7027, uintptr(0), pExpr)
}
} else {
/* Must fit in 8 bits */
(*TExpr)(unsafe.Pointer(pExpr)).Fop2 = uint8((*TNameContext)(unsafe.Pointer(pNC)).FncFlags & int32(NC_SelfRef))
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_FromDDL) != 0 {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_FromDDL))
}
}
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_INTERNAL) != uint32(0) && int32((*TParse)(unsafe.Pointer(pParse)).Fnested) == 0 && (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FmDbFlags&uint32(DBFLAG_InternalFunc) == uint32(0) {
/* Internal-use-only functions are disallowed unless the
** SQL is being compiled using sqlite3NestedParse() or
** the SQLITE_TESTCTRL_INTERNAL_FUNCTIONS test-control has be
** used to activate internal functions for testing purposes */
no_such_func = int32(1)
pDef = uintptr(0)
} else {
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(libc.Int32FromInt32(SQLITE_FUNC_DIRECT)|libc.Int32FromInt32(SQLITE_FUNC_UNSAFE)) != uint32(0) && !(int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= libc.Int32FromInt32(PARSE_MODE_RENAME)) {
_sqlite3ExprFunctionUsable(tls, pParse, pExpr, pDef)
}
}
}
if 0 == libc.BoolInt32(int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME)) {
if pDef != 0 && (*TFuncDef)(unsafe.Pointer(pDef)).FxValue == uintptr(0) && pWin != 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7055, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
} else {
if is_agg != 0 && (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_AllowAgg) == 0 || is_agg != 0 && (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_WINDOW) != 0 && !(pWin != 0) || is_agg != 0 && pWin != 0 && (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_AllowWin) == 0 {
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_WINDOW) != 0 || pWin != 0 {
zType = __ccgo_ts + 7098
} else {
zType = __ccgo_ts + 7105
}
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7115, libc.VaList(bp+8, zType, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
is_agg = 0
} else {
if no_such_func != 0 && int32((*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).Finit1.Fbusy) == 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7143, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
} else {
if wrong_num_args != 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7165, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
} else {
if is_agg == 0 && (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7209, libc.VaList(bp+8, pExpr))
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
} else {
if is_agg == 0 && (*TExpr)(unsafe.Pointer(pExpr)).FpLeft != 0 {
_sqlite3ExprOrderByAggregateError(tls, pParse, pExpr)
(*TNameContext)(unsafe.Pointer(pNC)).FnNcErr++
}
}
}
}
}
}
if is_agg != 0 {
/* Window functions may not be arguments of aggregate functions.
** Or arguments of other window functions. But aggregate functions
** may be arguments for window functions. */
if !(pWin != 0) {
v7 = int32(NC_AllowAgg)
} else {
v7 = 0
}
*(*int32)(unsafe.Pointer(pNC + 40)) &= ^(libc.Int32FromInt32(NC_AllowWin) | v7)
}
} else {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
is_agg = int32(1)
}
}
_sqlite3WalkExprList(tls, pWalker, pList)
if is_agg != 0 {
if (*TExpr)(unsafe.Pointer(pExpr)).FpLeft != 0 {
_sqlite3WalkExprList(tls, pWalker, *(*uintptr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(pExpr)).FpLeft + 32)))
}
if pWin != 0 {
pSel = (*TNameContext)(unsafe.Pointer(pNC)).FpWinSelect
if libc.BoolInt32(int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME)) == 0 {
if pSel != 0 {
v8 = (*TSelect)(unsafe.Pointer(pSel)).FpWinDefn
} else {
v8 = uintptr(0)
}
_sqlite3WindowUpdate(tls, pParse, v8, pWin, pDef)
if (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FmallocFailed != 0 {
break
}
}
_sqlite3WalkExprList(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpPartition)
_sqlite3WalkExprList(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpOrderBy)
_sqlite3WalkExpr(tls, pWalker, (*TWindow)(unsafe.Pointer(pWin)).FpFilter)
_sqlite3WindowLink(tls, pSel, pWin)
*(*int32)(unsafe.Pointer(pNC + 40)) |= int32(NC_HasWin)
} else {
/* For looping up thru outer contexts */
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_AGG_FUNCTION)
(*TExpr)(unsafe.Pointer(pExpr)).Fop2 = uint8(0)
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
_sqlite3WalkExpr(tls, pWalker, (*TWindow)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 64)))).FpFilter)
}
pNC2 = pNC
for pNC2 != 0 && _sqlite3ReferencesSrcList(tls, pParse, pExpr, (*TNameContext)(unsafe.Pointer(pNC2)).FpSrcList) == 0 {
p9 = pExpr + 2
*(*Tu8)(unsafe.Pointer(p9)) = Tu8(uint32(*(*Tu8)(unsafe.Pointer(p9))) + (libc.Uint32FromInt32(1) + (*TNameContext)(unsafe.Pointer(pNC2)).FnNestedSelect))
pNC2 = (*TNameContext)(unsafe.Pointer(pNC2)).FpNext
}
if pNC2 != 0 && pDef != 0 {
p10 = pExpr + 2
*(*Tu8)(unsafe.Pointer(p10)) = Tu8(uint32(*(*Tu8)(unsafe.Pointer(p10))) + (*TNameContext)(unsafe.Pointer(pNC2)).FnNestedSelect)
p11 = pNC2 + 40
*(*int32)(unsafe.Pointer(p11)) = int32(uint32(*(*int32)(unsafe.Pointer(p11))) | (libc.Uint32FromInt32(NC_HasAgg) | ((*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags^libc.Uint32FromInt32(SQLITE_FUNC_ANYORDER))&uint32(libc.Int32FromInt32(SQLITE_FUNC_MINMAX)|libc.Int32FromInt32(SQLITE_FUNC_ANYORDER))))
}
}
*(*int32)(unsafe.Pointer(pNC + 40)) |= savedAllowFlags
}
/* FIX ME: Compute pExpr->affinity based on the expected return
** type of the function
*/
return int32(WRC_Prune)
case int32(TK_SELECT):
fallthrough
case int32(TK_EXISTS):
fallthrough
case int32(TK_IN):
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
nRef = (*TNameContext)(unsafe.Pointer(pNC)).FnRef
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_SelfRef) != 0 {
_notValidImpl(tls, pParse, pNC, __ccgo_ts+7257, pExpr, pExpr)
} else {
_sqlite3WalkSelect(tls, pWalker, *(*uintptr)(unsafe.Pointer(pExpr + 32)))
}
if nRef != (*TNameContext)(unsafe.Pointer(pNC)).FnRef {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_VarSelect))
}
*(*int32)(unsafe.Pointer(pNC + 40)) |= int32(NC_Subquery)
}
case int32(TK_VARIABLE):
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_IsCheck)|libc.Int32FromInt32(NC_PartIdx)|libc.Int32FromInt32(NC_IdxExpr)|libc.Int32FromInt32(NC_GenCol)) != 0 {
_notValidImpl(tls, pParse, pNC, __ccgo_ts+7268, pExpr, pExpr)
}
case int32(TK_IS):
fallthrough
case int32(TK_ISNOT):
pRight1 = _sqlite3ExprSkipCollateAndLikely(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpRight)
/* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE",
** and "x IS NOT FALSE". */
if pRight1 != 0 && (int32((*TExpr)(unsafe.Pointer(pRight1)).Fop) == int32(TK_ID) || int32((*TExpr)(unsafe.Pointer(pRight1)).Fop) == int32(TK_TRUEFALSE)) {
rc = _resolveExprStep(tls, pWalker, pRight1)
if rc == int32(WRC_Abort) {
return int32(WRC_Abort)
}
if int32((*TExpr)(unsafe.Pointer(pRight1)).Fop) == int32(TK_TRUEFALSE) {
(*TExpr)(unsafe.Pointer(pExpr)).Fop2 = (*TExpr)(unsafe.Pointer(pExpr)).Fop
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_TRUTH)
return WRC_Continue
}
}
fallthrough
case int32(TK_BETWEEN):
fallthrough
case int32(TK_EQ):
fallthrough
case int32(TK_NE):
fallthrough
case int32(TK_LT):
fallthrough
case int32(TK_LE):
fallthrough
case int32(TK_GT):
fallthrough
case int32(TK_GE):
if (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FmallocFailed != 0 {
break
}
nLeft = _sqlite3ExprVectorSize(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_BETWEEN) {
nRight = _sqlite3ExprVectorSize(tls, (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)) + 8))).FpExpr)
if nRight == nLeft {
nRight = _sqlite3ExprVectorSize(tls, (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)) + 8 + 1*32))).FpExpr)
}
} else {
nRight = _sqlite3ExprVectorSize(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpRight)
}
if nLeft != nRight {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6679, 0)
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr)
}
break
}
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
v12 = int32(WRC_Abort)
} else {
v12 = WRC_Continue
}
return v12
}
// C documentation
//
// /*
// ** pEList is a list of expressions which are really the result set of the
// ** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause.
// ** This routine checks to see if pE is a simple identifier which corresponds
// ** to the AS-name of one of the terms of the expression list. If it is,
// ** this routine return an integer between 1 and N where N is the number of
// ** elements in pEList, corresponding to the matching entry. If there is
// ** no match, or if pE is not a simple identifier, then this routine
// ** return 0.
// **
// ** pEList has been resolved. pE has not.
// */
func _resolveAsName(tls *libc.TLS, pParse uintptr, pEList uintptr, pE uintptr) (r int32) {
var i int32
var zCol uintptr
_, _ = i, zCol /* Loop counter */
_ = pParse
if int32((*TExpr)(unsafe.Pointer(pE)).Fop) == int32(TK_ID) {
zCol = *(*uintptr)(unsafe.Pointer(pE + 8))
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pEList + 8 + uintptr(i)*32 + 16 + 4))&0x3>>0)) == ENAME_NAME && Xsqlite3_stricmp(tls, (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32))).FzEName, zCol) == 0 {
return i + int32(1)
}
goto _1
_1:
;
i++
}
}
return 0
}
// C documentation
//
// /*
// ** pE is a pointer to an expression which is a single term in the
// ** ORDER BY of a compound SELECT. The expression has not been
// ** name resolved.
// **
// ** At the point this routine is called, we already know that the
// ** ORDER BY term is not an integer index into the result set. That
// ** case is handled by the calling routine.
// **
// ** Attempt to match pE against result set columns in the left-most
// ** SELECT statement. Return the index i of the matching column,
// ** as an indication to the caller that it should sort by the i-th column.
// ** The left-most column is 1. In other words, the value returned is the
// ** same integer value that would be used in the SQL statement to indicate
// ** the column.
// **
// ** If there is no match, return 0. Return -1 if an error occurs.
// */
func _resolveOrderByTermToExprList(tls *libc.TLS, pParse uintptr, pSelect uintptr, pE uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var db, pEList uintptr
var i, rc int32
var savedSuppErr Tu8
var _ /* nc at bp+0 */ TNameContext
_, _, _, _, _ = db, i, pEList, rc, savedSuppErr /* Saved value of db->suppressErr */
pEList = (*TSelect)(unsafe.Pointer(pSelect)).FpEList
/* Resolve all names in the ORDER BY term expression
*/
libc.Xmemset(tls, bp, 0, uint64(56))
(*(*TNameContext)(unsafe.Pointer(bp))).FpParse = pParse
(*(*TNameContext)(unsafe.Pointer(bp))).FpSrcList = (*TSelect)(unsafe.Pointer(pSelect)).FpSrc
*(*uintptr)(unsafe.Pointer(bp + 16)) = pEList
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags = libc.Int32FromInt32(NC_AllowAgg) | libc.Int32FromInt32(NC_UEList) | libc.Int32FromInt32(NC_NoSelect)
(*(*TNameContext)(unsafe.Pointer(bp))).FnNcErr = 0
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
savedSuppErr = (*Tsqlite3)(unsafe.Pointer(db)).FsuppressErr
(*Tsqlite3)(unsafe.Pointer(db)).FsuppressErr = uint8(1)
rc = _sqlite3ResolveExprNames(tls, bp, pE)
(*Tsqlite3)(unsafe.Pointer(db)).FsuppressErr = savedSuppErr
if rc != 0 {
return 0
}
/* Try to match the ORDER BY expression against an expression
** in the result set. Return an 1-based index of the matching
** result-set entry.
*/
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
if _sqlite3ExprCompare(tls, uintptr(0), (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32))).FpExpr, pE, -int32(1)) < int32(2) {
return i + int32(1)
}
goto _1
_1:
;
i++
}
/* If no match, return 0. */
return 0
}
// C documentation
//
// /*
// ** Generate an ORDER BY or GROUP BY term out-of-range error.
// */
func _resolveOutOfRangeError(tls *libc.TLS, pParse uintptr, zType uintptr, i int32, mx int32, pError uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7279, libc.VaList(bp+8, i, zType, mx))
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pError)
}
// C documentation
//
// /*
// ** Analyze the ORDER BY clause in a compound SELECT statement. Modify
// ** each term of the ORDER BY clause is a constant integer between 1
// ** and N where N is the number of columns in the compound SELECT.
// **
// ** ORDER BY terms that are already an integer between 1 and N are
// ** unmodified. ORDER BY terms that are integers outside the range of
// ** 1 through N generate an error. ORDER BY terms that are expressions
// ** are matched against result set expressions of compound SELECT
// ** beginning with the left-most SELECT and working toward the right.
// ** At the first match, the ORDER BY expression is transformed into
// ** the integer column number.
// **
// ** Return the number of errors seen.
// */
func _resolveCompoundOrderBy(tls *libc.TLS, pParse uintptr, pSelect uintptr) (r int32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var db, pDup, pE, pEList, pItem, pNew, pOrderBy, pParent uintptr
var i, moreToDo int32
var _ /* iCol at bp+0 */ int32
_, _, _, _, _, _, _, _, _, _ = db, i, moreToDo, pDup, pE, pEList, pItem, pNew, pOrderBy, pParent
moreToDo = int32(1)
pOrderBy = (*TSelect)(unsafe.Pointer(pSelect)).FpOrderBy
if pOrderBy == uintptr(0) {
return 0
}
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7335, 0)
return int32(1)
}
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr) {
break
}
libc.SetBitFieldPtr16Uint32(pOrderBy+8+uintptr(i)*32+16+4, libc.Uint32FromInt32(0), 2, 0x4)
goto _1
_1:
;
i++
}
(*TSelect)(unsafe.Pointer(pSelect)).FpNext = uintptr(0)
for (*TSelect)(unsafe.Pointer(pSelect)).FpPrior != 0 {
(*TSelect)(unsafe.Pointer((*TSelect)(unsafe.Pointer(pSelect)).FpPrior)).FpNext = pSelect
pSelect = (*TSelect)(unsafe.Pointer(pSelect)).FpPrior
}
for pSelect != 0 && moreToDo != 0 {
moreToDo = 0
pEList = (*TSelect)(unsafe.Pointer(pSelect)).FpEList
i = 0
pItem = pOrderBy + 8
for {
if !(i < (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr) {
break
}
*(*int32)(unsafe.Pointer(bp)) = -int32(1)
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem + 16 + 4))&0x4>>2)) != 0 {
goto _2
}
pE = _sqlite3ExprSkipCollateAndLikely(tls, (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr)
if pE == uintptr(0) {
goto _2
}
if _sqlite3ExprIsInteger(tls, pE, bp) != 0 {
if *(*int32)(unsafe.Pointer(bp)) <= 0 || *(*int32)(unsafe.Pointer(bp)) > (*TExprList)(unsafe.Pointer(pEList)).FnExpr {
_resolveOutOfRangeError(tls, pParse, __ccgo_ts+7369, i+int32(1), (*TExprList)(unsafe.Pointer(pEList)).FnExpr, pE)
return int32(1)
}
} else {
*(*int32)(unsafe.Pointer(bp)) = _resolveAsName(tls, pParse, pEList, pE)
if *(*int32)(unsafe.Pointer(bp)) == 0 {
/* Now test if expression pE matches one of the values returned
** by pSelect. In the usual case this is done by duplicating the
** expression, resolving any symbols in it, and then comparing
** it against each expression returned by the SELECT statement.
** Once the comparisons are finished, the duplicate expression
** is deleted.
**
** If this is running as part of an ALTER TABLE operation and
** the symbols resolve successfully, also resolve the symbols in the
** actual expression. This allows the code in alter.c to modify
** column references within the ORDER BY expression as required. */
pDup = _sqlite3ExprDup(tls, db, pE, 0)
if !((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) {
*(*int32)(unsafe.Pointer(bp)) = _resolveOrderByTermToExprList(tls, pParse, pSelect, pDup)
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) && *(*int32)(unsafe.Pointer(bp)) > 0 {
_resolveOrderByTermToExprList(tls, pParse, pSelect, pE)
}
}
_sqlite3ExprDelete(tls, db, pDup)
}
}
if *(*int32)(unsafe.Pointer(bp)) > 0 {
/* Convert the ORDER BY term into an integer column number iCol,
** taking care to preserve the COLLATE clause if it exists. */
if !(int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= libc.Int32FromInt32(PARSE_MODE_RENAME)) {
pNew = _sqlite3Expr(tls, db, int32(TK_INTEGER), uintptr(0))
if pNew == uintptr(0) {
return int32(1)
}
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(EP_IntValue)
*(*int32)(unsafe.Pointer(&(*TExpr)(unsafe.Pointer(pNew)).Fu)) = *(*int32)(unsafe.Pointer(bp))
if (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr == pE {
(*TExprList_item)(unsafe.Pointer(pItem)).FpExpr = pNew
} else {
pParent = (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr
for int32((*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(pParent)).FpLeft)).Fop) == int32(TK_COLLATE) {
pParent = (*TExpr)(unsafe.Pointer(pParent)).FpLeft
}
(*TExpr)(unsafe.Pointer(pParent)).FpLeft = pNew
}
_sqlite3ExprDelete(tls, db, pE)
(*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol = uint16(*(*int32)(unsafe.Pointer(bp)))
}
libc.SetBitFieldPtr16Uint32(pItem+16+4, libc.Uint32FromInt32(1), 2, 0x4)
} else {
moreToDo = int32(1)
}
goto _2
_2:
;
i++
pItem += 32
}
pSelect = (*TSelect)(unsafe.Pointer(pSelect)).FpNext
}
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr) {
break
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pOrderBy + 8 + uintptr(i)*32 + 16 + 4))&0x4>>2)) == 0 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7375, libc.VaList(bp+16, i+int32(1)))
return int32(1)
}
goto _3
_3:
;
i++
}
return 0
}
// C documentation
//
// /*
// ** Check every term in the ORDER BY or GROUP BY clause pOrderBy of
// ** the SELECT statement pSelect. If any term is reference to a
// ** result set expression (as determined by the ExprList.a.u.x.iOrderByCol
// ** field) then convert that term into a copy of the corresponding result set
// ** column.
// **
// ** If any errors are detected, add an error message to pParse and
// ** return non-zero. Return zero if no errors are seen.
// */
func _sqlite3ResolveOrderGroupBy(tls *libc.TLS, pParse uintptr, pSelect uintptr, pOrderBy uintptr, zType uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var db, pEList, pItem uintptr
var i int32
_, _, _, _ = db, i, pEList, pItem
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pOrderBy == uintptr(0) || (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FmallocFailed != 0 || int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
return 0
}
if (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr > *(*int32)(unsafe.Pointer(db + 136 + 2*4)) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7436, libc.VaList(bp+8, zType))
return int32(1)
}
pEList = (*TSelect)(unsafe.Pointer(pSelect)).FpEList
/* sqlite3SelectNew() guarantees this */
i = 0
pItem = pOrderBy + 8
for {
if !(i < (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr) {
break
}
if (*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol != 0 {
if int32((*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol) > (*TExprList)(unsafe.Pointer(pEList)).FnExpr {
_resolveOutOfRangeError(tls, pParse, zType, i+int32(1), (*TExprList)(unsafe.Pointer(pEList)).FnExpr, uintptr(0))
return int32(1)
}
_resolveAlias(tls, pParse, pEList, int32((*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol)-int32(1), (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr, 0)
}
goto _1
_1:
;
i++
pItem += 32
}
return 0
}
// C documentation
//
// /*
// ** Walker callback for windowRemoveExprFromSelect().
// */
func _resolveRemoveWindowsCb(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var pWin uintptr
_ = pWin
_ = pWalker
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
pWin = *(*uintptr)(unsafe.Pointer(pExpr + 64))
_sqlite3WindowUnlinkFromSelect(tls, pWin)
}
return WRC_Continue
}
// C documentation
//
// /*
// ** Remove any Window objects owned by the expression pExpr from the
// ** Select.pWin list of Select object pSelect.
// */
func _windowRemoveExprFromSelect(tls *libc.TLS, pSelect uintptr, pExpr uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* sWalker at bp+0 */ TWalker
if (*TSelect)(unsafe.Pointer(pSelect)).FpWin != 0 {
libc.Xmemset(tls, bp, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_resolveRemoveWindowsCb)
*(*uintptr)(unsafe.Pointer(bp + 40)) = pSelect
_sqlite3WalkExpr(tls, bp, pExpr)
}
}
// C documentation
//
// /*
// ** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect.
// ** The Name context of the SELECT statement is pNC. zType is either
// ** "ORDER" or "GROUP" depending on which type of clause pOrderBy is.
// **
// ** This routine resolves each term of the clause into an expression.
// ** If the order-by term is an integer I between 1 and N (where N is the
// ** number of columns in the result set of the SELECT) then the expression
// ** in the resolution is a copy of the I-th result-set expression. If
// ** the order-by term is an identifier that corresponds to the AS-name of
// ** a result-set expression, then the term resolves to a copy of the
// ** result-set expression. Otherwise, the expression is resolved in
// ** the usual way - using sqlite3ResolveExprNames().
// **
// ** This routine returns the number of errors. If errors occur, then
// ** an appropriate error message might be left in pParse. (OOM errors
// ** excepted.)
// */
func _resolveOrderGroupBy(tls *libc.TLS, pNC uintptr, pSelect uintptr, pOrderBy uintptr, zType uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var i, j, nResult int32
var pE, pE2, pItem, pParse uintptr
var _ /* iCol at bp+0 */ int32
_, _, _, _, _, _, _ = i, j, nResult, pE, pE2, pItem, pParse /* Number of terms in the result set */
nResult = (*TExprList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(pSelect)).FpEList)).FnExpr
pParse = (*TNameContext)(unsafe.Pointer(pNC)).FpParse
i = 0
pItem = pOrderBy + 8
for {
if !(i < (*TExprList)(unsafe.Pointer(pOrderBy)).FnExpr) {
break
}
pE = (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr
pE2 = _sqlite3ExprSkipCollateAndLikely(tls, pE)
if pE2 == uintptr(0) {
goto _1
}
if int32(*(*int8)(unsafe.Pointer(zType))) != int32('G') {
*(*int32)(unsafe.Pointer(bp)) = _resolveAsName(tls, pParse, (*TSelect)(unsafe.Pointer(pSelect)).FpEList, pE2)
if *(*int32)(unsafe.Pointer(bp)) > 0 {
/* If an AS-name match is found, mark this ORDER BY column as being
** a copy of the iCol-th result-set column. The subsequent call to
** sqlite3ResolveOrderGroupBy() will convert the expression to a
** copy of the iCol-th result-set expression. */
(*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol = uint16(*(*int32)(unsafe.Pointer(bp)))
goto _1
}
}
if _sqlite3ExprIsInteger(tls, pE2, bp) != 0 {
/* The ORDER BY term is an integer constant. Again, set the column
** number so that sqlite3ResolveOrderGroupBy() will convert the
** order-by term to a copy of the result-set expression */
if *(*int32)(unsafe.Pointer(bp)) < int32(1) || *(*int32)(unsafe.Pointer(bp)) > int32(0xffff) {
_resolveOutOfRangeError(tls, pParse, zType, i+int32(1), nResult, pE2)
return int32(1)
}
(*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol = uint16(*(*int32)(unsafe.Pointer(bp)))
goto _1
}
/* Otherwise, treat the ORDER BY term as an ordinary expression */
(*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol = uint16(0)
if _sqlite3ResolveExprNames(tls, pNC, pE) != 0 {
return int32(1)
}
j = 0
for {
if !(j < (*TExprList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(pSelect)).FpEList)).FnExpr) {
break
}
if _sqlite3ExprCompare(tls, uintptr(0), pE, (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(pSelect)).FpEList + 8 + uintptr(j)*32))).FpExpr, -int32(1)) == 0 {
/* Since this expression is being changed into a reference
** to an identical expression in the result set, remove all Window
** objects belonging to the expression from the Select.pWin list. */
_windowRemoveExprFromSelect(tls, pSelect, pE)
(*(*struct {
FiOrderByCol Tu16
FiAlias Tu16
})(unsafe.Pointer(pItem + 24))).FiOrderByCol = uint16(j + int32(1))
}
goto _2
_2:
;
j++
}
goto _1
_1:
;
i++
pItem += 32
}
return _sqlite3ResolveOrderGroupBy(tls, pParse, pSelect, pOrderBy, zType)
}
// C documentation
//
// /*
// ** Resolve names in the SELECT statement p and all of its descendants.
// */
func _resolveSelectStep(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var db, pGroupBy, pItem, pItem1, pItem2, pLeftmost, pOuterNC, pParse, pSub, pSub1, pWin, zSavedContext uintptr
var i, isCompound, nCompound, nRef, v1, v3 int32
var _ /* sNC at bp+0 */ TNameContext
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = db, i, isCompound, nCompound, nRef, pGroupBy, pItem, pItem1, pItem2, pLeftmost, pOuterNC, pParse, pSub, pSub1, pWin, zSavedContext, v1, v3 /* Database connection */
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(SF_Resolved) != 0 {
return int32(WRC_Prune)
}
pOuterNC = *(*uintptr)(unsafe.Pointer(pWalker + 40))
pParse = (*TWalker)(unsafe.Pointer(pWalker)).FpParse
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
/* Normally sqlite3SelectExpand() will be called first and will have
** already expanded this SELECT. However, if this is a subquery within
** an expression, sqlite3ResolveExprNames() will be called without a
** prior call to sqlite3SelectExpand(). When that happens, let
** sqlite3SelectPrep() do all of the processing for this SELECT.
** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and
** this routine in the correct order.
*/
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(SF_Expanded) == uint32(0) {
_sqlite3SelectPrep(tls, pParse, p, pOuterNC)
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
v1 = int32(WRC_Abort)
} else {
v1 = int32(WRC_Prune)
}
return v1
}
isCompound = libc.BoolInt32((*TSelect)(unsafe.Pointer(p)).FpPrior != uintptr(0))
nCompound = 0
pLeftmost = p
for p != 0 {
*(*Tu32)(unsafe.Pointer(p + 4)) |= uint32(SF_Resolved)
/* Resolve the expressions in the LIMIT and OFFSET clauses. These
** are not allowed to refer to any names, so pass an empty NameContext.
*/
libc.Xmemset(tls, bp, 0, uint64(56))
(*(*TNameContext)(unsafe.Pointer(bp))).FpParse = pParse
(*(*TNameContext)(unsafe.Pointer(bp))).FpWinSelect = p
if _sqlite3ResolveExprNames(tls, bp, (*TSelect)(unsafe.Pointer(p)).FpLimit) != 0 {
return int32(WRC_Abort)
}
/* If the SF_Converted flags is set, then this Select object was
** was created by the convertCompoundSelectToSubquery() function.
** In this case the ORDER BY clause (p->pOrderBy) should be resolved
** as if it were part of the sub-query, not the parent. This block
** moves the pOrderBy down to the sub-query. It will be moved back
** after the names have been resolved. */
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(SF_Converted) != 0 {
pSub = (*(*TSrcItem)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpSrc + 8))).FpSelect
(*TSelect)(unsafe.Pointer(pSub)).FpOrderBy = (*TSelect)(unsafe.Pointer(p)).FpOrderBy
(*TSelect)(unsafe.Pointer(p)).FpOrderBy = uintptr(0)
}
/* Recursively resolve names in all subqueries in the FROM clause
*/
if pOuterNC != 0 {
(*TNameContext)(unsafe.Pointer(pOuterNC)).FnNestedSelect++
}
i = 0
for {
if !(i < (*TSrcList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpSrc)).FnSrc) {
break
}
pItem = (*TSelect)(unsafe.Pointer(p)).FpSrc + 8 + uintptr(i)*104
if (*TSrcItem)(unsafe.Pointer(pItem)).FpSelect != 0 && (*TSelect)(unsafe.Pointer((*TSrcItem)(unsafe.Pointer(pItem)).FpSelect)).FselFlags&uint32(SF_Resolved) == uint32(0) {
if pOuterNC != 0 {
v3 = (*TNameContext)(unsafe.Pointer(pOuterNC)).FnRef
} else {
v3 = 0
}
nRef = v3
zSavedContext = (*TParse)(unsafe.Pointer(pParse)).FzAuthContext
if (*TSrcItem)(unsafe.Pointer(pItem)).FzName != 0 {
(*TParse)(unsafe.Pointer(pParse)).FzAuthContext = (*TSrcItem)(unsafe.Pointer(pItem)).FzName
}
_sqlite3ResolveSelectNames(tls, pParse, (*TSrcItem)(unsafe.Pointer(pItem)).FpSelect, pOuterNC)
(*TParse)(unsafe.Pointer(pParse)).FzAuthContext = zSavedContext
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
return int32(WRC_Abort)
}
/* If the number of references to the outer context changed when
** expressions in the sub-select were resolved, the sub-select
** is correlated. It is not required to check the refcount on any
** but the innermost outer context object, as lookupName() increments
** the refcount on all contexts between the current one and the
** context containing the column when it resolves a name. */
if pOuterNC != 0 {
libc.SetBitFieldPtr16Uint32(pItem+60+4, libc.BoolUint32((*TNameContext)(unsafe.Pointer(pOuterNC)).FnRef > nRef), 3, 0x8)
}
}
goto _2
_2:
;
i++
}
if pOuterNC != 0 && (*TNameContext)(unsafe.Pointer(pOuterNC)).FnNestedSelect > uint32(0) {
(*TNameContext)(unsafe.Pointer(pOuterNC)).FnNestedSelect--
}
/* Set up the local name-context to pass to sqlite3ResolveExprNames() to
** resolve the result-set expression list.
*/
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags = libc.Int32FromInt32(NC_AllowAgg) | libc.Int32FromInt32(NC_AllowWin)
(*(*TNameContext)(unsafe.Pointer(bp))).FpSrcList = (*TSelect)(unsafe.Pointer(p)).FpSrc
(*(*TNameContext)(unsafe.Pointer(bp))).FpNext = pOuterNC
/* Resolve names in the result set. */
if _sqlite3ResolveExprListNames(tls, bp, (*TSelect)(unsafe.Pointer(p)).FpEList) != 0 {
return int32(WRC_Abort)
}
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags &= ^libc.Int32FromInt32(NC_AllowWin)
/* If there are no aggregate functions in the result-set, and no GROUP BY
** expression, do not allow aggregates in any of the other expressions.
*/
pGroupBy = (*TSelect)(unsafe.Pointer(p)).FpGroupBy
if pGroupBy != 0 || (*(*TNameContext)(unsafe.Pointer(bp))).FncFlags&int32(NC_HasAgg) != 0 {
*(*Tu32)(unsafe.Pointer(p + 4)) |= uint32(int32(SF_Aggregate) | (*(*TNameContext)(unsafe.Pointer(bp))).FncFlags&(libc.Int32FromInt32(NC_MinMaxAgg)|libc.Int32FromInt32(NC_OrderAgg)))
} else {
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags &= ^libc.Int32FromInt32(NC_AllowAgg)
}
/* Add the output column list to the name-context before parsing the
** other expressions in the SELECT statement. This is so that
** expressions in the WHERE clause (etc.) can refer to expressions by
** aliases in the result set.
**
** Minor point: If this is the case, then the expression will be
** re-evaluated for each reference to it.
*/
*(*uintptr)(unsafe.Pointer(bp + 16)) = (*TSelect)(unsafe.Pointer(p)).FpEList
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags |= int32(NC_UEList)
if (*TSelect)(unsafe.Pointer(p)).FpHaving != 0 {
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(SF_Aggregate) == uint32(0) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7467, 0)
return int32(WRC_Abort)
}
if _sqlite3ResolveExprNames(tls, bp, (*TSelect)(unsafe.Pointer(p)).FpHaving) != 0 {
return int32(WRC_Abort)
}
}
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags |= int32(NC_Where)
if _sqlite3ResolveExprNames(tls, bp, (*TSelect)(unsafe.Pointer(p)).FpWhere) != 0 {
return int32(WRC_Abort)
}
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags &= ^libc.Int32FromInt32(NC_Where)
/* Resolve names in table-valued-function arguments */
i = 0
for {
if !(i < (*TSrcList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpSrc)).FnSrc) {
break
}
pItem1 = (*TSelect)(unsafe.Pointer(p)).FpSrc + 8 + uintptr(i)*104
if int32(uint32(*(*uint16)(unsafe.Pointer(pItem1 + 60 + 4))&0x4>>2)) != 0 && _sqlite3ResolveExprListNames(tls, bp, *(*uintptr)(unsafe.Pointer(pItem1 + 88))) != 0 {
return int32(WRC_Abort)
}
goto _4
_4:
;
i++
}
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
pWin = (*TSelect)(unsafe.Pointer(p)).FpWinDefn
for {
if !(pWin != 0) {
break
}
if _sqlite3ResolveExprListNames(tls, bp, (*TWindow)(unsafe.Pointer(pWin)).FpOrderBy) != 0 || _sqlite3ResolveExprListNames(tls, bp, (*TWindow)(unsafe.Pointer(pWin)).FpPartition) != 0 {
return int32(WRC_Abort)
}
goto _5
_5:
;
pWin = (*TWindow)(unsafe.Pointer(pWin)).FpNextWin
}
}
/* The ORDER BY and GROUP BY clauses may not refer to terms in
** outer queries
*/
(*(*TNameContext)(unsafe.Pointer(bp))).FpNext = uintptr(0)
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags |= libc.Int32FromInt32(NC_AllowAgg) | libc.Int32FromInt32(NC_AllowWin)
/* If this is a converted compound query, move the ORDER BY clause from
** the sub-query back to the parent query. At this point each term
** within the ORDER BY clause has been transformed to an integer value.
** These integers will be replaced by copies of the corresponding result
** set expressions by the call to resolveOrderGroupBy() below. */
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(SF_Converted) != 0 {
pSub1 = (*(*TSrcItem)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpSrc + 8))).FpSelect
(*TSelect)(unsafe.Pointer(p)).FpOrderBy = (*TSelect)(unsafe.Pointer(pSub1)).FpOrderBy
(*TSelect)(unsafe.Pointer(pSub1)).FpOrderBy = uintptr(0)
}
/* Process the ORDER BY clause for singleton SELECT statements.
** The ORDER BY clause for compounds SELECT statements is handled
** below, after all of the result-sets for all of the elements of
** the compound have been resolved.
**
** If there is an ORDER BY clause on a term of a compound-select other
** than the right-most term, then that is a syntax error. But the error
** is not detected until much later, and so we need to go ahead and
** resolve those symbols on the incorrect ORDER BY for consistency.
*/
if (*TSelect)(unsafe.Pointer(p)).FpOrderBy != uintptr(0) && isCompound <= nCompound && _resolveOrderGroupBy(tls, bp, p, (*TSelect)(unsafe.Pointer(p)).FpOrderBy, __ccgo_ts+7369) != 0 {
return int32(WRC_Abort)
}
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
return int32(WRC_Abort)
}
(*(*TNameContext)(unsafe.Pointer(bp))).FncFlags &= ^libc.Int32FromInt32(NC_AllowWin)
/* Resolve the GROUP BY clause. At the same time, make sure
** the GROUP BY clause does not contain aggregate functions.
*/
if pGroupBy != 0 {
if _resolveOrderGroupBy(tls, bp, p, pGroupBy, __ccgo_ts+7506) != 0 || (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
return int32(WRC_Abort)
}
i = 0
pItem2 = pGroupBy + 8
for {
if !(i < (*TExprList)(unsafe.Pointer(pGroupBy)).FnExpr) {
break
}
if (*TExpr)(unsafe.Pointer((*TExprList_item)(unsafe.Pointer(pItem2)).FpExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Agg)) != uint32(0) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7512, 0)
return int32(WRC_Abort)
}
goto _6
_6:
;
i++
pItem2 += 32
}
}
/* If this is part of a compound SELECT, check that it has the right
** number of expressions in the select list. */
if (*TSelect)(unsafe.Pointer(p)).FpNext != 0 && (*TExprList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpEList)).FnExpr != (*TExprList)(unsafe.Pointer((*TSelect)(unsafe.Pointer((*TSelect)(unsafe.Pointer(p)).FpNext)).FpEList)).FnExpr {
_sqlite3SelectWrongNumTermsError(tls, pParse, (*TSelect)(unsafe.Pointer(p)).FpNext)
return int32(WRC_Abort)
}
/* Advance to the next term of the compound
*/
p = (*TSelect)(unsafe.Pointer(p)).FpPrior
nCompound++
}
/* Resolve the ORDER BY on a compound SELECT after all terms of
** the compound have been resolved.
*/
if isCompound != 0 && _resolveCompoundOrderBy(tls, pParse, pLeftmost) != 0 {
return int32(WRC_Abort)
}
return int32(WRC_Prune)
}
// C documentation
//
// /*
// ** This routine walks an expression tree and resolves references to
// ** table columns and result-set columns. At the same time, do error
// ** checking on function usage and set a flag if any aggregate functions
// ** are seen.
// **
// ** To resolve table columns references we look for nodes (or subtrees) of the
// ** form X.Y.Z or Y.Z or just Z where
// **
// ** X: The name of a database. Ex: "main" or "temp" or
// ** the symbolic name assigned to an ATTACH-ed database.
// **
// ** Y: The name of a table in a FROM clause. Or in a trigger
// ** one of the special names "old" or "new".
// **
// ** Z: The name of a column in table Y.
// **
// ** The node at the root of the subtree is modified as follows:
// **
// ** Expr.op Changed to TK_COLUMN
// ** Expr.pTab Points to the Table object for X.Y
// ** Expr.iColumn The column index in X.Y. -1 for the rowid.
// ** Expr.iTable The VDBE cursor number for X.Y
// **
// **
// ** To resolve result-set references, look for expression nodes of the
// ** form Z (with no X and Y prefix) where the Z matches the right-hand
// ** size of an AS clause in the result-set of a SELECT. The Z expression
// ** is replaced by a copy of the left-hand side of the result-set expression.
// ** Table-name and function resolution occurs on the substituted expression
// ** tree. For example, in:
// **
// ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x;
// **
// ** The "x" term of the order by is replaced by "a+b" to render:
// **
// ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b;
// **
// ** Function calls are checked to make sure that the function is
// ** defined and that the correct number of arguments are specified.
// ** If the function is an aggregate function, then the NC_HasAgg flag is
// ** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION.
// ** If an expression contains aggregate functions then the EP_Agg
// ** property on the expression is set.
// **
// ** An error message is left in pParse if anything is amiss. The number
// ** if errors is returned.
// */
func _sqlite3ResolveExprNames(tls *libc.TLS, pNC uintptr, pExpr uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var savedHasAgg int32
var v1 uintptr
var _ /* w at bp+0 */ TWalker
_, _ = savedHasAgg, v1
if pExpr == uintptr(0) {
return SQLITE_OK
}
savedHasAgg = (*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
*(*int32)(unsafe.Pointer(pNC + 40)) &= ^(libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
(*(*TWalker)(unsafe.Pointer(bp))).FpParse = (*TNameContext)(unsafe.Pointer(pNC)).FpParse
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_resolveExprStep)
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&int32(NC_NoSelect) != 0 {
v1 = uintptr(0)
} else {
v1 = __ccgo_fp(_resolveSelectStep)
}
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = v1
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback2 = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp + 40)) = pNC
*(*int32)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse + 316)) += (*TExpr)(unsafe.Pointer(pExpr)).FnHeight
if _sqlite3ExprCheckHeight(tls, (*(*TWalker)(unsafe.Pointer(bp))).FpParse, (*TParse)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse)).FnHeight) != 0 {
return int32(SQLITE_ERROR)
}
_sqlite3WalkExprNN(tls, bp, pExpr)
*(*int32)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse + 316)) -= (*TExpr)(unsafe.Pointer(pExpr)).FnHeight
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32((*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_HasWin)))
*(*int32)(unsafe.Pointer(pNC + 40)) |= savedHasAgg
return libc.BoolInt32((*TNameContext)(unsafe.Pointer(pNC)).FnNcErr > 0 || (*TParse)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse)).FnErr > 0)
}
// C documentation
//
// /*
// ** Resolve all names for all expression in an expression list. This is
// ** just like sqlite3ResolveExprNames() except that it works for an expression
// ** list rather than a single expression.
// */
func _sqlite3ResolveExprListNames(tls *libc.TLS, pNC uintptr, pList uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var i, savedHasAgg int32
var pExpr uintptr
var _ /* w at bp+0 */ TWalker
_, _, _ = i, pExpr, savedHasAgg
savedHasAgg = 0
if pList == uintptr(0) {
return WRC_Continue
}
(*(*TWalker)(unsafe.Pointer(bp))).FpParse = (*TNameContext)(unsafe.Pointer(pNC)).FpParse
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_resolveExprStep)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = __ccgo_fp(_resolveSelectStep)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback2 = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp + 40)) = pNC
savedHasAgg = (*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
*(*int32)(unsafe.Pointer(pNC + 40)) &= ^(libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pList)).FnExpr) {
break
}
pExpr = (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr(i)*32))).FpExpr
if pExpr == uintptr(0) {
goto _1
}
*(*int32)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse + 316)) += (*TExpr)(unsafe.Pointer(pExpr)).FnHeight
if _sqlite3ExprCheckHeight(tls, (*(*TWalker)(unsafe.Pointer(bp))).FpParse, (*TParse)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse)).FnHeight) != 0 {
return int32(WRC_Abort)
}
_sqlite3WalkExprNN(tls, bp, pExpr)
*(*int32)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse + 316)) -= (*TExpr)(unsafe.Pointer(pExpr)).FnHeight
if (*TNameContext)(unsafe.Pointer(pNC)).FncFlags&(libc.Int32FromInt32(NC_HasAgg)|libc.Int32FromInt32(NC_MinMaxAgg)|libc.Int32FromInt32(NC_HasWin)|libc.Int32FromInt32(NC_OrderAgg)) != 0 {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32((*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_HasWin)))
savedHasAgg |= (*TNameContext)(unsafe.Pointer(pNC)).FncFlags & (libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
*(*int32)(unsafe.Pointer(pNC + 40)) &= ^(libc.Int32FromInt32(NC_HasAgg) | libc.Int32FromInt32(NC_MinMaxAgg) | libc.Int32FromInt32(NC_HasWin) | libc.Int32FromInt32(NC_OrderAgg))
}
if (*TParse)(unsafe.Pointer((*(*TWalker)(unsafe.Pointer(bp))).FpParse)).FnErr > 0 {
return int32(WRC_Abort)
}
goto _1
_1:
;
i++
}
*(*int32)(unsafe.Pointer(pNC + 40)) |= savedHasAgg
return WRC_Continue
}
// C documentation
//
// /*
// ** Resolve all names in all expressions of a SELECT and in all
// ** descendants of the SELECT, including compounds off of p->pPrior,
// ** subqueries in expressions, and subqueries used as FROM clause
// ** terms.
// **
// ** See sqlite3ResolveExprNames() for a description of the kinds of
// ** transformations that occur.
// **
// ** All SELECT statements should have been expanded using
// ** sqlite3SelectExpand() prior to invoking this routine.
// */
func _sqlite3ResolveSelectNames(tls *libc.TLS, pParse uintptr, p uintptr, pOuterNC uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* w at bp+0 */ TWalker
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_resolveExprStep)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = __ccgo_fp(_resolveSelectStep)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback2 = uintptr(0)
(*(*TWalker)(unsafe.Pointer(bp))).FpParse = pParse
*(*uintptr)(unsafe.Pointer(bp + 40)) = pOuterNC
_sqlite3WalkSelect(tls, bp, p)
}
// C documentation
//
// /*
// ** Resolve names in expressions that can only reference a single table
// ** or which cannot reference any tables at all. Examples:
// **
// ** "type" flag
// ** ------------
// ** (1) CHECK constraints NC_IsCheck
// ** (2) WHERE clauses on partial indices NC_PartIdx
// ** (3) Expressions in indexes on expressions NC_IdxExpr
// ** (4) Expression arguments to VACUUM INTO. 0
// ** (5) GENERATED ALWAYS as expressions NC_GenCol
// **
// ** In all cases except (4), the Expr.iTable value for Expr.op==TK_COLUMN
// ** nodes of the expression is set to -1 and the Expr.iColumn value is
// ** set to the column number. In case (4), TK_COLUMN nodes cause an error.
// **
// ** Any errors cause an error message to be set in pParse.
// */
func _sqlite3ResolveSelfReference(tls *libc.TLS, pParse uintptr, pTab uintptr, type1 int32, pExpr uintptr, pList uintptr) (r int32) {
bp := tls.Alloc(176)
defer tls.Free(176)
var rc, v1 int32
var _ /* sNC at bp+112 */ TNameContext
var _ /* sSrc at bp+0 */ TSrcList
_, _ = rc, v1
libc.Xmemset(tls, bp+112, 0, uint64(56))
libc.Xmemset(tls, bp, 0, uint64(112))
if pTab != 0 {
(*(*TSrcList)(unsafe.Pointer(bp))).FnSrc = int32(1)
(*(*TSrcItem)(unsafe.Pointer(bp + 8))).FzName = (*TTable)(unsafe.Pointer(pTab)).FzName
(*(*TSrcItem)(unsafe.Pointer(bp + 8))).FpTab = pTab
(*(*TSrcItem)(unsafe.Pointer(bp + 8))).FiCursor = -int32(1)
if (*TTable)(unsafe.Pointer(pTab)).FpSchema != (*(*TDb)(unsafe.Pointer((*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FaDb + 1*32))).FpSchema {
/* Cause EP_FromDDL to be set on TK_FUNCTION nodes of non-TEMP
** schema elements */
type1 |= int32(NC_FromDDL)
}
}
(*(*TNameContext)(unsafe.Pointer(bp + 112))).FpParse = pParse
(*(*TNameContext)(unsafe.Pointer(bp + 112))).FpSrcList = bp
(*(*TNameContext)(unsafe.Pointer(bp + 112))).FncFlags = type1 | int32(NC_IsDDL)
v1 = _sqlite3ResolveExprNames(tls, bp+112, pExpr)
rc = v1
if v1 != SQLITE_OK {
return rc
}
if pList != 0 {
rc = _sqlite3ResolveExprListNames(tls, bp+112, pList)
}
return rc
}
// C documentation
//
// /*
// ** Return the affinity character for a single column of a table.
// */
func _sqlite3TableColumnAffinity(tls *libc.TLS, pTab uintptr, iCol int32) (r int8) {
if iCol < 0 || iCol >= int32((*TTable)(unsafe.Pointer(pTab)).FnCol) {
return int8(SQLITE_AFF_INTEGER)
}
return (*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FaCol + uintptr(iCol)*16))).Faffinity
}
// C documentation
//
// /*
// ** Return the 'affinity' of the expression pExpr if any.
// **
// ** If pExpr is a column, a reference to a column via an 'AS' alias,
// ** or a sub-select with a column as the return value, then the
// ** affinity of that column is returned. Otherwise, 0x00 is returned,
// ** indicating no affinity for the expression.
// **
// ** i.e. the WHERE clause expressions in the following statements all
// ** have an affinity:
// **
// ** CREATE TABLE t1(a);
// ** SELECT * FROM t1 WHERE a;
// ** SELECT a AS b FROM t1 WHERE b;
// ** SELECT * FROM t1 WHERE (select a from t1);
// */
func _sqlite3ExprAffinity(tls *libc.TLS, pExpr uintptr) (r int8) {
var op, v1 int32
var v2 bool
_, _, _ = op, v1, v2
op = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop)
for int32(1) != 0 {
if op == int32(TK_COLUMN) || op == int32(TK_AGG_COLUMN) && *(*uintptr)(unsafe.Pointer(pExpr + 64)) != uintptr(0) {
return _sqlite3TableColumnAffinity(tls, *(*uintptr)(unsafe.Pointer(pExpr + 64)), int32((*TExpr)(unsafe.Pointer(pExpr)).FiColumn))
}
if op == int32(TK_SELECT) {
return _sqlite3ExprAffinity(tls, (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FpEList + 8))).FpExpr)
}
if op == int32(TK_CAST) {
return _sqlite3AffinityType(tls, *(*uintptr)(unsafe.Pointer(pExpr + 8)), uintptr(0))
}
if op == int32(TK_SELECT_COLUMN) {
return _sqlite3ExprAffinity(tls, (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(pExpr)).FpLeft + 32)))).FpEList + 8 + uintptr((*TExpr)(unsafe.Pointer(pExpr)).FiColumn)*32))).FpExpr)
}
if op == int32(TK_VECTOR) {
return _sqlite3ExprAffinity(tls, (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)) + 8))).FpExpr)
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Skip)|libc.Int32FromInt32(EP_IfNullRow)) != uint32(0) {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
op = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop)
continue
}
if v2 = op != int32(TK_REGISTER); !v2 {
v1 = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop2)
op = v1
}
if v2 || v1 == int32(TK_REGISTER) {
break
}
}
return (*TExpr)(unsafe.Pointer(pExpr)).FaffExpr
}
// C documentation
//
// /*
// ** Make a guess at all the possible datatypes of the result that could
// ** be returned by an expression. Return a bitmask indicating the answer:
// **
// ** 0x01 Numeric
// ** 0x02 Text
// ** 0x04 Blob
// **
// ** If the expression must return NULL, then 0x00 is returned.
// */
func _sqlite3ExprDataType(tls *libc.TLS, pExpr uintptr) (r int32) {
var aff, ii, res int32
var pList uintptr
_, _, _, _ = aff, ii, pList, res
for pExpr != 0 {
switch int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) {
case int32(TK_COLLATE):
fallthrough
case int32(TK_IF_NULL_ROW):
fallthrough
case int32(TK_UPLUS):
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
case int32(TK_NULL):
pExpr = uintptr(0)
case int32(TK_STRING):
return int32(0x02)
case int32(TK_BLOB):
return int32(0x04)
case int32(TK_CONCAT):
return int32(0x06)
case int32(TK_VARIABLE):
fallthrough
case int32(TK_AGG_FUNCTION):
fallthrough
case int32(TK_FUNCTION):
return int32(0x07)
case int32(TK_COLUMN):
fallthrough
case int32(TK_AGG_COLUMN):
fallthrough
case int32(TK_SELECT):
fallthrough
case int32(TK_CAST):
fallthrough
case int32(TK_SELECT_COLUMN):
fallthrough
case int32(TK_VECTOR):
aff = int32(_sqlite3ExprAffinity(tls, pExpr))
if aff >= int32(SQLITE_AFF_NUMERIC) {
return int32(0x05)
}
if aff == int32(SQLITE_AFF_TEXT) {
return int32(0x06)
}
return int32(0x07)
case int32(TK_CASE):
res = 0
pList = *(*uintptr)(unsafe.Pointer(pExpr + 32))
ii = int32(1)
for {
if !(ii < (*TExprList)(unsafe.Pointer(pList)).FnExpr) {
break
}
res |= _sqlite3ExprDataType(tls, (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr(ii)*32))).FpExpr)
goto _1
_1:
;
ii += int32(2)
}
if (*TExprList)(unsafe.Pointer(pList)).FnExpr%int32(2) != 0 {
res |= _sqlite3ExprDataType(tls, (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr((*TExprList)(unsafe.Pointer(pList)).FnExpr-int32(1))*32))).FpExpr)
}
return res
default:
return int32(0x01)
} /* End of switch(op) */
} /* End of while(pExpr) */
return 0x00
}
// C documentation
//
// /*
// ** Set the collating sequence for expression pExpr to be the collating
// ** sequence named by pToken. Return a pointer to a new Expr node that
// ** implements the COLLATE operator.
// **
// ** If a memory allocation error occurs, that fact is recorded in pParse->db
// ** and the pExpr parameter is returned unchanged.
// */
func _sqlite3ExprAddCollateToken(tls *libc.TLS, pParse uintptr, pExpr uintptr, pCollName uintptr, dequote int32) (r uintptr) {
var pNew uintptr
_ = pNew
if (*TToken)(unsafe.Pointer(pCollName)).Fn > uint32(0) {
pNew = _sqlite3ExprAlloc(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, int32(TK_COLLATE), pCollName, dequote)
if pNew != 0 {
(*TExpr)(unsafe.Pointer(pNew)).FpLeft = pExpr
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(libc.Int32FromInt32(EP_Collate) | libc.Int32FromInt32(EP_Skip))
pExpr = pNew
}
}
return pExpr
}
func _sqlite3ExprAddCollateString(tls *libc.TLS, pParse uintptr, pExpr uintptr, zC uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* s at bp+0 */ TToken
_sqlite3TokenInit(tls, bp, zC)
return _sqlite3ExprAddCollateToken(tls, pParse, pExpr, bp, 0)
}
// C documentation
//
// /*
// ** Skip over any TK_COLLATE operators.
// */
func _sqlite3ExprSkipCollate(tls *libc.TLS, pExpr uintptr) (r uintptr) {
for pExpr != 0 && (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Skip)) != uint32(0) {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
}
return pExpr
}
// C documentation
//
// /*
// ** Skip over any TK_COLLATE operators and/or any unlikely()
// ** or likelihood() or likely() functions at the root of an
// ** expression.
// */
func _sqlite3ExprSkipCollateAndLikely(tls *libc.TLS, pExpr uintptr) (r uintptr) {
for pExpr != 0 && (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Skip)|libc.Int32FromInt32(EP_Unlikely)) != uint32(0) {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Unlikely)) != uint32(0) {
pExpr = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)) + 8))).FpExpr
} else {
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_COLLATE) {
pExpr = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
} else {
break
}
}
}
return pExpr
}
// C documentation
//
// /*
// ** Return the collation sequence for the expression pExpr. If
// ** there is no defined collating sequence, return NULL.
// **
// ** See also: sqlite3ExprNNCollSeq()
// **
// ** The sqlite3ExprNNCollSeq() works the same exact that it returns the
// ** default collation if pExpr has no defined collation.
// **
// ** The collating sequence might be determined by a COLLATE operator
// ** or by the presence of a column with a defined collating sequence.
// ** COLLATE operators take first precedence. Left operands take
// ** precedence over right operands.
// */
func _sqlite3ExprCollSeq(tls *libc.TLS, pParse uintptr, pExpr uintptr) (r uintptr) {
var db, p, pColl, pNext, zColl uintptr
var i, j, op, v1 int32
_, _, _, _, _, _, _, _, _ = db, i, j, op, p, pColl, pNext, zColl, v1
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
pColl = uintptr(0)
p = pExpr
for p != 0 {
op = int32((*TExpr)(unsafe.Pointer(p)).Fop)
if op == int32(TK_REGISTER) {
op = int32((*TExpr)(unsafe.Pointer(p)).Fop2)
}
if op == int32(TK_AGG_COLUMN) && *(*uintptr)(unsafe.Pointer(p + 64)) != uintptr(0) || op == int32(TK_COLUMN) || op == int32(TK_TRIGGER) {
v1 = int32((*TExpr)(unsafe.Pointer(p)).FiColumn)
j = v1
if v1 >= 0 {
zColl = _sqlite3ColumnColl(tls, (*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 64)))).FaCol+uintptr(j)*16)
pColl = _sqlite3FindCollSeq(tls, db, (*Tsqlite3)(unsafe.Pointer(db)).Fenc, zColl, 0)
}
break
}
if op == int32(TK_CAST) || op == int32(TK_UPLUS) {
p = (*TExpr)(unsafe.Pointer(p)).FpLeft
continue
}
if op == int32(TK_VECTOR) {
p = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 32)) + 8))).FpExpr
continue
}
if op == int32(TK_COLLATE) {
pColl = _sqlite3GetCollSeq(tls, pParse, (*Tsqlite3)(unsafe.Pointer(db)).Fenc, uintptr(0), *(*uintptr)(unsafe.Pointer(p + 8)))
break
}
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_Collate) != 0 {
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 && (*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(p)).FpLeft)).Fflags&uint32(EP_Collate) != uint32(0) {
p = (*TExpr)(unsafe.Pointer(p)).FpLeft
} else {
pNext = (*TExpr)(unsafe.Pointer(p)).FpRight
/* The Expr.x union is never used at the same time as Expr.pRight */
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_xIsSelect) == uint32(0) && *(*uintptr)(unsafe.Pointer(p + 32)) != uintptr(0) && !((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) {
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 32)))).FnExpr) {
break
}
if (*TExpr)(unsafe.Pointer((*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 32)) + 8 + uintptr(i)*32))).FpExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Collate)) != uint32(0) {
pNext = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 32)) + 8 + uintptr(i)*32))).FpExpr
break
}
goto _2
_2:
;
i++
}
}
p = pNext
}
} else {
break
}
}
if _sqlite3CheckCollSeq(tls, pParse, pColl) != 0 {
pColl = uintptr(0)
}
return pColl
}
// C documentation
//
// /*
// ** Return the collation sequence for the expression pExpr. If
// ** there is no defined collating sequence, return a pointer to the
// ** default collation sequence.
// **
// ** See also: sqlite3ExprCollSeq()
// **
// ** The sqlite3ExprCollSeq() routine works the same except that it
// ** returns NULL if there is no defined collation.
// */
func _sqlite3ExprNNCollSeq(tls *libc.TLS, pParse uintptr, pExpr uintptr) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3ExprCollSeq(tls, pParse, pExpr)
if p == uintptr(0) {
p = (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).FpDfltColl
}
return p
}
// C documentation
//
// /*
// ** Return TRUE if the two expressions have equivalent collating sequences.
// */
func _sqlite3ExprCollSeqMatch(tls *libc.TLS, pParse uintptr, pE1 uintptr, pE2 uintptr) (r int32) {
var pColl1, pColl2 uintptr
_, _ = pColl1, pColl2
pColl1 = _sqlite3ExprNNCollSeq(tls, pParse, pE1)
pColl2 = _sqlite3ExprNNCollSeq(tls, pParse, pE2)
return libc.BoolInt32(_sqlite3StrICmp(tls, (*TCollSeq)(unsafe.Pointer(pColl1)).FzName, (*TCollSeq)(unsafe.Pointer(pColl2)).FzName) == 0)
}
// C documentation
//
// /*
// ** pExpr is an operand of a comparison operator. aff2 is the
// ** type affinity of the other operand. This routine returns the
// ** type affinity that should be used for the comparison operator.
// */
func _sqlite3CompareAffinity(tls *libc.TLS, pExpr uintptr, aff2 int8) (r int8) {
var aff1 int8
var v1 int32
_, _ = aff1, v1
aff1 = _sqlite3ExprAffinity(tls, pExpr)
if int32(aff1) > int32(SQLITE_AFF_NONE) && int32(aff2) > int32(SQLITE_AFF_NONE) {
/* Both sides of the comparison are columns. If one has numeric
** affinity, use that. Otherwise use no affinity.
*/
if int32(aff1) >= int32(SQLITE_AFF_NUMERIC) || int32(aff2) >= int32(SQLITE_AFF_NUMERIC) {
return int8(SQLITE_AFF_NUMERIC)
} else {
return int8(SQLITE_AFF_BLOB)
}
} else {
/* One side is a column, the other is not. Use the columns affinity. */
if int32(aff1) <= int32(SQLITE_AFF_NONE) {
v1 = int32(aff2)
} else {
v1 = int32(aff1)
}
return int8(v1 | int32(SQLITE_AFF_NONE))
}
return r
}
// C documentation
//
// /*
// ** pExpr is a comparison operator. Return the type affinity that should
// ** be applied to both operands prior to doing the comparison.
// */
func _comparisonAffinity(tls *libc.TLS, pExpr uintptr) (r int8) {
var aff int8
_ = aff
aff = _sqlite3ExprAffinity(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
if (*TExpr)(unsafe.Pointer(pExpr)).FpRight != 0 {
aff = _sqlite3CompareAffinity(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpRight, aff)
} else {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
aff = _sqlite3CompareAffinity(tls, (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FpEList + 8))).FpExpr, aff)
} else {
if int32(aff) == 0 {
aff = int8(SQLITE_AFF_BLOB)
}
}
}
return aff
}
// C documentation
//
// /*
// ** pExpr is a comparison expression, eg. '=', '<', IN(...) etc.
// ** idx_affinity is the affinity of an indexed column. Return true
// ** if the index with affinity idx_affinity may be used to implement
// ** the comparison in pExpr.
// */
func _sqlite3IndexAffinityOk(tls *libc.TLS, pExpr uintptr, idx_affinity int8) (r int32) {
var aff int8
_ = aff
aff = _comparisonAffinity(tls, pExpr)
if int32(aff) < int32(SQLITE_AFF_TEXT) {
return int32(1)
}
if int32(aff) == int32(SQLITE_AFF_TEXT) {
return libc.BoolInt32(int32(idx_affinity) == int32(SQLITE_AFF_TEXT))
}
return libc.BoolInt32(int32(idx_affinity) >= int32(SQLITE_AFF_NUMERIC))
}
// C documentation
//
// /*
// ** Return the P5 value that should be used for a binary comparison
// ** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2.
// */
func _binaryCompareP5(tls *libc.TLS, pExpr1 uintptr, pExpr2 uintptr, jumpIfNull int32) (r Tu8) {
var aff Tu8
_ = aff
aff = uint8(_sqlite3ExprAffinity(tls, pExpr2))
aff = uint8(int32(uint8(_sqlite3CompareAffinity(tls, pExpr1, int8(aff)))) | int32(uint8(jumpIfNull)))
return aff
}
// C documentation
//
// /*
// ** Return a pointer to the collation sequence that should be used by
// ** a binary comparison operator comparing pLeft and pRight.
// **
// ** If the left hand expression has a collating sequence type, then it is
// ** used. Otherwise the collation sequence for the right hand expression
// ** is used, or the default (BINARY) if neither expression has a collating
// ** type.
// **
// ** Argument pRight (but not pLeft) may be a null pointer. In this case,
// ** it is not considered.
// */
func _sqlite3BinaryCompareCollSeq(tls *libc.TLS, pParse uintptr, pLeft uintptr, pRight uintptr) (r uintptr) {
var pColl uintptr
_ = pColl
if (*TExpr)(unsafe.Pointer(pLeft)).Fflags&uint32(EP_Collate) != 0 {
pColl = _sqlite3ExprCollSeq(tls, pParse, pLeft)
} else {
if pRight != 0 && (*TExpr)(unsafe.Pointer(pRight)).Fflags&uint32(EP_Collate) != uint32(0) {
pColl = _sqlite3ExprCollSeq(tls, pParse, pRight)
} else {
pColl = _sqlite3ExprCollSeq(tls, pParse, pLeft)
if !(pColl != 0) {
pColl = _sqlite3ExprCollSeq(tls, pParse, pRight)
}
}
}
return pColl
}
// C documentation
//
// /* Expression p is a comparison operator. Return a collation sequence
// ** appropriate for the comparison operator.
// **
// ** This is normally just a wrapper around sqlite3BinaryCompareCollSeq().
// ** However, if the OP_Commuted flag is set, then the order of the operands
// ** is reversed in the sqlite3BinaryCompareCollSeq() call so that the
// ** correct collating sequence is found.
// */
func _sqlite3ExprCompareCollSeq(tls *libc.TLS, pParse uintptr, p uintptr) (r uintptr) {
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_Commuted)) != uint32(0) {
return _sqlite3BinaryCompareCollSeq(tls, pParse, (*TExpr)(unsafe.Pointer(p)).FpRight, (*TExpr)(unsafe.Pointer(p)).FpLeft)
} else {
return _sqlite3BinaryCompareCollSeq(tls, pParse, (*TExpr)(unsafe.Pointer(p)).FpLeft, (*TExpr)(unsafe.Pointer(p)).FpRight)
}
return r
}
// C documentation
//
// /*
// ** Generate code for a comparison operator.
// */
func _codeCompare(tls *libc.TLS, pParse uintptr, pLeft uintptr, pRight uintptr, opcode int32, in1 int32, in2 int32, dest int32, jumpIfNull int32, isCommuted int32) (r int32) {
var addr, p5 int32
var p4 uintptr
_, _, _ = addr, p4, p5
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
return 0
}
if isCommuted != 0 {
p4 = _sqlite3BinaryCompareCollSeq(tls, pParse, pRight, pLeft)
} else {
p4 = _sqlite3BinaryCompareCollSeq(tls, pParse, pLeft, pRight)
}
p5 = int32(_binaryCompareP5(tls, pLeft, pRight, jumpIfNull))
addr = _sqlite3VdbeAddOp4(tls, (*TParse)(unsafe.Pointer(pParse)).FpVdbe, opcode, in2, dest, in1, p4, -int32(2))
_sqlite3VdbeChangeP5(tls, (*TParse)(unsafe.Pointer(pParse)).FpVdbe, uint16(uint8(p5)))
return addr
}
// C documentation
//
// /*
// ** Return true if expression pExpr is a vector, or false otherwise.
// **
// ** A vector is defined as any expression that results in two or more
// ** columns of result. Every TK_VECTOR node is an vector because the
// ** parser will not generate a TK_VECTOR with fewer than two entries.
// ** But a TK_SELECT might be either a vector or a scalar. It is only
// ** considered a vector if it has two or more result columns.
// */
func _sqlite3ExprIsVector(tls *libc.TLS, pExpr uintptr) (r int32) {
return libc.BoolInt32(_sqlite3ExprVectorSize(tls, pExpr) > int32(1))
}
// C documentation
//
// /*
// ** If the expression passed as the only argument is of type TK_VECTOR
// ** return the number of expressions in the vector. Or, if the expression
// ** is a sub-select, return the number of columns in the sub-select. For
// ** any other type of expression, return 1.
// */
func _sqlite3ExprVectorSize(tls *libc.TLS, pExpr uintptr) (r int32) {
var op Tu8
_ = op
op = (*TExpr)(unsafe.Pointer(pExpr)).Fop
if int32(op) == int32(TK_REGISTER) {
op = (*TExpr)(unsafe.Pointer(pExpr)).Fop2
}
if int32(op) == int32(TK_VECTOR) {
return (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FnExpr
} else {
if int32(op) == int32(TK_SELECT) {
return (*TExprList)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FpEList)).FnExpr
} else {
return int32(1)
}
}
return r
}
// C documentation
//
// /*
// ** Return a pointer to a subexpression of pVector that is the i-th
// ** column of the vector (numbered starting with 0). The caller must
// ** ensure that i is within range.
// **
// ** If pVector is really a scalar (and "scalar" here includes subqueries
// ** that return a single column!) then return pVector unmodified.
// **
// ** pVector retains ownership of the returned subexpression.
// **
// ** If the vector is a (SELECT ...) then the expression returned is
// ** just the expression for the i-th term of the result set, and may
// ** not be ready for evaluation because the table cursor has not yet
// ** been positioned.
// */
func _sqlite3VectorFieldSubexpr(tls *libc.TLS, pVector uintptr, i int32) (r uintptr) {
if _sqlite3ExprIsVector(tls, pVector) != 0 {
if int32((*TExpr)(unsafe.Pointer(pVector)).Fop) == int32(TK_SELECT) || int32((*TExpr)(unsafe.Pointer(pVector)).Fop2) == int32(TK_SELECT) {
return (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pVector + 32)))).FpEList + 8 + uintptr(i)*32))).FpExpr
} else {
return (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pVector + 32)) + 8 + uintptr(i)*32))).FpExpr
}
}
return pVector
}
// C documentation
//
// /*
// ** Compute and return a new Expr object which when passed to
// ** sqlite3ExprCode() will generate all necessary code to compute
// ** the iField-th column of the vector expression pVector.
// **
// ** It is ok for pVector to be a scalar (as long as iField==0).
// ** In that case, this routine works like sqlite3ExprDup().
// **
// ** The caller owns the returned Expr object and is responsible for
// ** ensuring that the returned value eventually gets freed.
// **
// ** The caller retains ownership of pVector. If pVector is a TK_SELECT,
// ** then the returned object will reference pVector and so pVector must remain
// ** valid for the life of the returned object. If pVector is a TK_VECTOR
// ** or a scalar expression, then it can be deleted as soon as this routine
// ** returns.
// **
// ** A trick to cause a TK_SELECT pVector to be deleted together with
// ** the returned Expr object is to attach the pVector to the pRight field
// ** of the returned TK_SELECT_COLUMN Expr object.
// */
func _sqlite3ExprForVectorField(tls *libc.TLS, pParse uintptr, pVector uintptr, iField int32, nField int32) (r uintptr) {
var pRet, ppVector uintptr
_, _ = pRet, ppVector
if int32((*TExpr)(unsafe.Pointer(pVector)).Fop) == int32(TK_SELECT) {
/* The TK_SELECT_COLUMN Expr node:
**
** pLeft: pVector containing TK_SELECT. Not deleted.
** pRight: not used. But recursively deleted.
** iColumn: Index of a column in pVector
** iTable: 0 or the number of columns on the LHS of an assignment
** pLeft->iTable: First in an array of register holding result, or 0
** if the result is not yet computed.
**
** sqlite3ExprDelete() specifically skips the recursive delete of
** pLeft on TK_SELECT_COLUMN nodes. But pRight is followed, so pVector
** can be attached to pRight to cause this node to take ownership of
** pVector. Typically there will be multiple TK_SELECT_COLUMN nodes
** with the same pLeft pointer to the pVector, but only one of them
** will own the pVector.
*/
pRet = _sqlite3PExpr(tls, pParse, int32(TK_SELECT_COLUMN), uintptr(0), uintptr(0))
if pRet != 0 {
*(*Tu32)(unsafe.Pointer(pRet + 4)) |= uint32(libc.Int32FromInt32(EP_FullSize))
(*TExpr)(unsafe.Pointer(pRet)).FiTable = nField
(*TExpr)(unsafe.Pointer(pRet)).FiColumn = int16(iField)
(*TExpr)(unsafe.Pointer(pRet)).FpLeft = pVector
}
} else {
if int32((*TExpr)(unsafe.Pointer(pVector)).Fop) == int32(TK_VECTOR) {
ppVector = *(*uintptr)(unsafe.Pointer(pVector + 32)) + 8 + uintptr(iField)*32
pVector = *(*uintptr)(unsafe.Pointer(ppVector))
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
/* This must be a vector UPDATE inside a trigger */
*(*uintptr)(unsafe.Pointer(ppVector)) = uintptr(0)
return pVector
}
}
pRet = _sqlite3ExprDup(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pVector, 0)
}
return pRet
}
// C documentation
//
// /*
// ** If expression pExpr is of type TK_SELECT, generate code to evaluate
// ** it. Return the register in which the result is stored (or, if the
// ** sub-select returns more than one column, the first in an array
// ** of registers in which the result is stored).
// **
// ** If pExpr is not a TK_SELECT expression, return 0.
// */
func _exprCodeSubselect(tls *libc.TLS, pParse uintptr, pExpr uintptr) (r int32) {
var reg int32
_ = reg
reg = 0
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_SELECT) {
reg = _sqlite3CodeSubselect(tls, pParse, pExpr)
}
return reg
}
// C documentation
//
// /*
// ** Argument pVector points to a vector expression - either a TK_VECTOR
// ** or TK_SELECT that returns more than one column. This function returns
// ** the register number of a register that contains the value of
// ** element iField of the vector.
// **
// ** If pVector is a TK_SELECT expression, then code for it must have
// ** already been generated using the exprCodeSubselect() routine. In this
// ** case parameter regSelect should be the first in an array of registers
// ** containing the results of the sub-select.
// **
// ** If pVector is of type TK_VECTOR, then code for the requested field
// ** is generated. In this case (*pRegFree) may be set to the number of
// ** a temporary register to be freed by the caller before returning.
// **
// ** Before returning, output parameter (*ppExpr) is set to point to the
// ** Expr object corresponding to element iElem of the vector.
// */
func _exprVectorRegister(tls *libc.TLS, pParse uintptr, pVector uintptr, iField int32, regSelect int32, ppExpr uintptr, pRegFree uintptr) (r int32) {
var op Tu8
_ = op
op = (*TExpr)(unsafe.Pointer(pVector)).Fop
if int32(op) == int32(TK_REGISTER) {
*(*uintptr)(unsafe.Pointer(ppExpr)) = _sqlite3VectorFieldSubexpr(tls, pVector, iField)
return (*TExpr)(unsafe.Pointer(pVector)).FiTable + iField
}
if int32(op) == int32(TK_SELECT) {
*(*uintptr)(unsafe.Pointer(ppExpr)) = (*(*TExprList_item)(unsafe.Pointer((*TSelect)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pVector + 32)))).FpEList + 8 + uintptr(iField)*32))).FpExpr
return regSelect + iField
}
if int32(op) == int32(TK_VECTOR) {
*(*uintptr)(unsafe.Pointer(ppExpr)) = (*(*TExprList_item)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pVector + 32)) + 8 + uintptr(iField)*32))).FpExpr
return _sqlite3ExprCodeTemp(tls, pParse, *(*uintptr)(unsafe.Pointer(ppExpr)), pRegFree)
}
return 0
}
// C documentation
//
// /*
// ** Expression pExpr is a comparison between two vector values. Compute
// ** the result of the comparison (1, 0, or NULL) and write that
// ** result into register dest.
// **
// ** The caller must satisfy the following preconditions:
// **
// ** if pExpr->op==TK_IS: op==TK_EQ and p5==SQLITE_NULLEQ
// ** if pExpr->op==TK_ISNOT: op==TK_NE and p5==SQLITE_NULLEQ
// ** otherwise: op==pExpr->op and p5==0
// */
func _codeVectorCompare(tls *libc.TLS, pParse uintptr, pExpr uintptr, dest int32, op Tu8, p5 Tu8) {
bp := tls.Alloc(32)
defer tls.Free(32)
var addrCmp, addrDone, i, isCommuted, nLeft, r1, r2, regLeft, regRight int32
var opx Tu8
var pLeft, pRight, v uintptr
var _ /* pL at bp+8 */ uintptr
var _ /* pR at bp+16 */ uintptr
var _ /* regFree1 at bp+0 */ int32
var _ /* regFree2 at bp+4 */ int32
_, _, _, _, _, _, _, _, _, _, _, _, _ = addrCmp, addrDone, i, isCommuted, nLeft, opx, pLeft, pRight, r1, r2, regLeft, regRight, v
v = (*TParse)(unsafe.Pointer(pParse)).FpVdbe
pLeft = (*TExpr)(unsafe.Pointer(pExpr)).FpLeft
pRight = (*TExpr)(unsafe.Pointer(pExpr)).FpRight
nLeft = _sqlite3ExprVectorSize(tls, pLeft)
regLeft = 0
regRight = 0
opx = op
addrCmp = 0
addrDone = _sqlite3VdbeMakeLabel(tls, pParse)
isCommuted = libc.BoolInt32((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Commuted)) != uint32(0))
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
return
}
if nLeft != _sqlite3ExprVectorSize(tls, pRight) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+6679, 0)
return
}
if int32(op) == int32(TK_LE) {
opx = uint8(TK_LT)
}
if int32(op) == int32(TK_GE) {
opx = uint8(TK_GT)
}
if int32(op) == int32(TK_NE) {
opx = uint8(TK_EQ)
}
regLeft = _exprCodeSubselect(tls, pParse, pLeft)
regRight = _exprCodeSubselect(tls, pParse, pRight)
_sqlite3VdbeAddOp2(tls, v, int32(OP_Integer), int32(1), dest)
i = 0
for {
if !(int32(1) != 0) {
break
}
*(*int32)(unsafe.Pointer(bp)) = 0
*(*int32)(unsafe.Pointer(bp + 4)) = 0
*(*uintptr)(unsafe.Pointer(bp + 8)) = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp + 16)) = uintptr(0)
if addrCmp != 0 {
_sqlite3VdbeJumpHere(tls, v, addrCmp)
}
r1 = _exprVectorRegister(tls, pParse, pLeft, i, regLeft, bp+8, bp)
r2 = _exprVectorRegister(tls, pParse, pRight, i, regRight, bp+16, bp+4)
addrCmp = _sqlite3VdbeCurrentAddr(tls, v)
_codeCompare(tls, pParse, *(*uintptr)(unsafe.Pointer(bp + 8)), *(*uintptr)(unsafe.Pointer(bp + 16)), int32(opx), r1, r2, addrDone, int32(p5), isCommuted)
_sqlite3ReleaseTempReg(tls, pParse, *(*int32)(unsafe.Pointer(bp)))
_sqlite3ReleaseTempReg(tls, pParse, *(*int32)(unsafe.Pointer(bp + 4)))
if (int32(opx) == int32(TK_LT) || int32(opx) == int32(TK_GT)) && i < nLeft-int32(1) {
addrCmp = _sqlite3VdbeAddOp0(tls, v, int32(OP_ElseEq))
}
if int32(p5) == int32(SQLITE_NULLEQ) {
_sqlite3VdbeAddOp2(tls, v, int32(OP_Integer), 0, dest)
} else {
_sqlite3VdbeAddOp3(tls, v, int32(OP_ZeroOrNull), r1, dest, r2)
}
if i == nLeft-int32(1) {
break
}
if int32(opx) == int32(TK_EQ) {
_sqlite3VdbeAddOp2(tls, v, int32(OP_NotNull), dest, addrDone)
} else {
_sqlite3VdbeAddOp2(tls, v, int32(OP_Goto), 0, addrDone)
if i == nLeft-int32(2) {
opx = op
}
}
goto _1
_1:
;
i++
}
_sqlite3VdbeJumpHere(tls, v, addrCmp)
_sqlite3VdbeResolveLabel(tls, v, addrDone)
if int32(op) == int32(TK_NE) {
_sqlite3VdbeAddOp2(tls, v, int32(OP_Not), dest, dest)
}
}
// C documentation
//
// /*
// ** Check that argument nHeight is less than or equal to the maximum
// ** expression depth allowed. If it is not, leave an error message in
// ** pParse.
// */
func _sqlite3ExprCheckHeight(tls *libc.TLS, pParse uintptr, nHeight int32) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var mxHeight, rc int32
_, _ = mxHeight, rc
rc = SQLITE_OK
mxHeight = *(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb + 136 + 3*4))
if nHeight > mxHeight {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7571, libc.VaList(bp+8, mxHeight))
rc = int32(SQLITE_ERROR)
}
return rc
}
// C documentation
//
// /* The following three functions, heightOfExpr(), heightOfExprList()
// ** and heightOfSelect(), are used to determine the maximum height
// ** of any expression tree referenced by the structure passed as the
// ** first argument.
// **
// ** If this maximum height is greater than the current value pointed
// ** to by pnHeight, the second parameter, then set *pnHeight to that
// ** value.
// */
func _heightOfExpr(tls *libc.TLS, p uintptr, pnHeight uintptr) {
if p != 0 {
if (*TExpr)(unsafe.Pointer(p)).FnHeight > *(*int32)(unsafe.Pointer(pnHeight)) {
*(*int32)(unsafe.Pointer(pnHeight)) = (*TExpr)(unsafe.Pointer(p)).FnHeight
}
}
}
func _heightOfExprList(tls *libc.TLS, p uintptr, pnHeight uintptr) {
var i int32
_ = i
if p != 0 {
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(p)).FnExpr) {
break
}
_heightOfExpr(tls, (*(*TExprList_item)(unsafe.Pointer(p + 8 + uintptr(i)*32))).FpExpr, pnHeight)
goto _1
_1:
;
i++
}
}
}
func _heightOfSelect(tls *libc.TLS, pSelect uintptr, pnHeight uintptr) {
var p uintptr
_ = p
p = pSelect
for {
if !(p != 0) {
break
}
_heightOfExpr(tls, (*TSelect)(unsafe.Pointer(p)).FpWhere, pnHeight)
_heightOfExpr(tls, (*TSelect)(unsafe.Pointer(p)).FpHaving, pnHeight)
_heightOfExpr(tls, (*TSelect)(unsafe.Pointer(p)).FpLimit, pnHeight)
_heightOfExprList(tls, (*TSelect)(unsafe.Pointer(p)).FpEList, pnHeight)
_heightOfExprList(tls, (*TSelect)(unsafe.Pointer(p)).FpGroupBy, pnHeight)
_heightOfExprList(tls, (*TSelect)(unsafe.Pointer(p)).FpOrderBy, pnHeight)
goto _1
_1:
;
p = (*TSelect)(unsafe.Pointer(p)).FpPrior
}
}
// C documentation
//
// /*
// ** Set the Expr.nHeight variable in the structure passed as an
// ** argument. An expression with no children, Expr.pList or
// ** Expr.pSelect member has a height of 1. Any other expression
// ** has a height equal to the maximum height of any other
// ** referenced Expr plus one.
// **
// ** Also propagate EP_Propagate flags up from Expr.x.pList to Expr.flags,
// ** if appropriate.
// */
func _exprSetHeight(tls *libc.TLS, p uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var v1 int32
var _ /* nHeight at bp+0 */ int32
_ = v1
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 {
v1 = (*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(p)).FpLeft)).FnHeight
} else {
v1 = 0
}
*(*int32)(unsafe.Pointer(bp)) = v1
if (*TExpr)(unsafe.Pointer(p)).FpRight != 0 && (*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(p)).FpRight)).FnHeight > *(*int32)(unsafe.Pointer(bp)) {
*(*int32)(unsafe.Pointer(bp)) = (*TExpr)(unsafe.Pointer((*TExpr)(unsafe.Pointer(p)).FpRight)).FnHeight
}
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
_heightOfSelect(tls, *(*uintptr)(unsafe.Pointer(p + 32)), bp)
} else {
if *(*uintptr)(unsafe.Pointer(p + 32)) != 0 {
_heightOfExprList(tls, *(*uintptr)(unsafe.Pointer(p + 32)), bp)
*(*Tu32)(unsafe.Pointer(p + 4)) |= uint32(libc.Int32FromInt32(EP_Collate)|libc.Int32FromInt32(EP_Subquery)|libc.Int32FromInt32(EP_HasFunc)) & _sqlite3ExprListFlags(tls, *(*uintptr)(unsafe.Pointer(p + 32)))
}
}
(*TExpr)(unsafe.Pointer(p)).FnHeight = *(*int32)(unsafe.Pointer(bp)) + int32(1)
}
// C documentation
//
// /*
// ** Set the Expr.nHeight variable using the exprSetHeight() function. If
// ** the height is greater than the maximum allowed expression depth,
// ** leave an error in pParse.
// **
// ** Also propagate all EP_Propagate flags from the Expr.x.pList into
// ** Expr.flags.
// */
func _sqlite3ExprSetHeightAndFlags(tls *libc.TLS, pParse uintptr, p uintptr) {
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
return
}
_exprSetHeight(tls, p)
_sqlite3ExprCheckHeight(tls, pParse, (*TExpr)(unsafe.Pointer(p)).FnHeight)
}
// C documentation
//
// /*
// ** Return the maximum height of any expression tree referenced
// ** by the select statement passed as an argument.
// */
func _sqlite3SelectExprHeight(tls *libc.TLS, p uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* nHeight at bp+0 */ int32
*(*int32)(unsafe.Pointer(bp)) = 0
_heightOfSelect(tls, p, bp)
return *(*int32)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Set the error offset for an Expr node, if possible.
// */
func _sqlite3ExprSetErrorOffset(tls *libc.TLS, pExpr uintptr, iOfst int32) {
if pExpr == uintptr(0) {
return
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_InnerON)|libc.Int32FromInt32(EP_OuterON)) != uint32(0) {
return
}
*(*int32)(unsafe.Pointer(pExpr + 52)) = iOfst
}
// C documentation
//
// /*
// ** This routine is the core allocator for Expr nodes.
// **
// ** Construct a new expression node and return a pointer to it. Memory
// ** for this node and for the pToken argument is a single allocation
// ** obtained from sqlite3DbMalloc(). The calling function
// ** is responsible for making sure the node eventually gets freed.
// **
// ** If dequote is true, then the token (if it exists) is dequoted.
// ** If dequote is false, no dequoting is performed. The deQuote
// ** parameter is ignored if pToken is NULL or if the token does not
// ** appear to be quoted. If the quotes were of the form "..." (double-quotes)
// ** then the EP_DblQuoted flag is set on the expression node.
// **
// ** Special case: If op==TK_INTEGER and pToken points to a string that
// ** can be translated into a 32-bit integer, then the token is not
// ** stored in u.zToken. Instead, the integer values is written
// ** into u.iValue and the EP_IntValue flag is set. No extra storage
// ** is allocated to hold the integer text and the dequote flag is ignored.
// */
func _sqlite3ExprAlloc(tls *libc.TLS, db uintptr, op int32, pToken uintptr, dequote int32) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nExtra, v1 int32
var pNew uintptr
var _ /* iValue at bp+0 */ int32
_, _, _ = nExtra, pNew, v1
nExtra = 0
*(*int32)(unsafe.Pointer(bp)) = 0
if pToken != 0 {
if op != int32(TK_INTEGER) || (*TToken)(unsafe.Pointer(pToken)).Fz == uintptr(0) || _sqlite3GetInt32(tls, (*TToken)(unsafe.Pointer(pToken)).Fz, bp) == 0 {
nExtra = int32((*TToken)(unsafe.Pointer(pToken)).Fn + uint32(1))
}
}
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(72)+uint64(nExtra))
if pNew != 0 {
libc.Xmemset(tls, pNew, 0, uint64(72))
(*TExpr)(unsafe.Pointer(pNew)).Fop = uint8(op)
(*TExpr)(unsafe.Pointer(pNew)).FiAgg = int16(-int32(1))
if pToken != 0 {
if nExtra == 0 {
if *(*int32)(unsafe.Pointer(bp)) != 0 {
v1 = int32(EP_IsTrue)
} else {
v1 = int32(EP_IsFalse)
}
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(libc.Int32FromInt32(EP_IntValue) | libc.Int32FromInt32(EP_Leaf) | v1)
*(*int32)(unsafe.Pointer(&(*TExpr)(unsafe.Pointer(pNew)).Fu)) = *(*int32)(unsafe.Pointer(bp))
} else {
*(*uintptr)(unsafe.Pointer(pNew + 8)) = pNew + 1*72
if (*TToken)(unsafe.Pointer(pToken)).Fn != 0 {
libc.Xmemcpy(tls, *(*uintptr)(unsafe.Pointer(pNew + 8)), (*TToken)(unsafe.Pointer(pToken)).Fz, uint64((*TToken)(unsafe.Pointer(pToken)).Fn))
}
*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pNew + 8)) + uintptr((*TToken)(unsafe.Pointer(pToken)).Fn))) = 0
if dequote != 0 && int32(_sqlite3CtypeMap[uint8(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pNew + 8)))))])&int32(0x80) != 0 {
_sqlite3DequoteExpr(tls, pNew)
}
}
}
(*TExpr)(unsafe.Pointer(pNew)).FnHeight = int32(1)
}
return pNew
}
// C documentation
//
// /*
// ** Allocate a new expression node from a zero-terminated token that has
// ** already been dequoted.
// */
func _sqlite3Expr(tls *libc.TLS, db uintptr, op int32, zToken uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var _ /* x at bp+0 */ TToken
(*(*TToken)(unsafe.Pointer(bp))).Fz = zToken
(*(*TToken)(unsafe.Pointer(bp))).Fn = uint32(_sqlite3Strlen30(tls, zToken))
return _sqlite3ExprAlloc(tls, db, op, bp, 0)
}
// C documentation
//
// /*
// ** Attach subtrees pLeft and pRight to the Expr node pRoot.
// **
// ** If pRoot==NULL that means that a memory allocation error has occurred.
// ** In that case, delete the subtrees pLeft and pRight.
// */
func _sqlite3ExprAttachSubtrees(tls *libc.TLS, db uintptr, pRoot uintptr, pLeft uintptr, pRight uintptr) {
if pRoot == uintptr(0) {
_sqlite3ExprDelete(tls, db, pLeft)
_sqlite3ExprDelete(tls, db, pRight)
} else {
if pRight != 0 {
(*TExpr)(unsafe.Pointer(pRoot)).FpRight = pRight
*(*Tu32)(unsafe.Pointer(pRoot + 4)) |= uint32(libc.Int32FromInt32(EP_Collate)|libc.Int32FromInt32(EP_Subquery)|libc.Int32FromInt32(EP_HasFunc)) & (*TExpr)(unsafe.Pointer(pRight)).Fflags
(*TExpr)(unsafe.Pointer(pRoot)).FnHeight = (*TExpr)(unsafe.Pointer(pRight)).FnHeight + int32(1)
} else {
(*TExpr)(unsafe.Pointer(pRoot)).FnHeight = int32(1)
}
if pLeft != 0 {
(*TExpr)(unsafe.Pointer(pRoot)).FpLeft = pLeft
*(*Tu32)(unsafe.Pointer(pRoot + 4)) |= uint32(libc.Int32FromInt32(EP_Collate)|libc.Int32FromInt32(EP_Subquery)|libc.Int32FromInt32(EP_HasFunc)) & (*TExpr)(unsafe.Pointer(pLeft)).Fflags
if (*TExpr)(unsafe.Pointer(pLeft)).FnHeight >= (*TExpr)(unsafe.Pointer(pRoot)).FnHeight {
(*TExpr)(unsafe.Pointer(pRoot)).FnHeight = (*TExpr)(unsafe.Pointer(pLeft)).FnHeight + int32(1)
}
}
}
}
// C documentation
//
// /*
// ** Allocate an Expr node which joins as many as two subtrees.
// **
// ** One or both of the subtrees can be NULL. Return a pointer to the new
// ** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed,
// ** free the subtrees and return NULL.
// */
func _sqlite3PExpr(tls *libc.TLS, pParse uintptr, op int32, pLeft uintptr, pRight uintptr) (r uintptr) {
var p uintptr
_ = p
p = _sqlite3DbMallocRawNN(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, uint64(72))
if p != 0 {
libc.Xmemset(tls, p, 0, uint64(72))
(*TExpr)(unsafe.Pointer(p)).Fop = uint8(op & int32(0xff))
(*TExpr)(unsafe.Pointer(p)).FiAgg = int16(-int32(1))
_sqlite3ExprAttachSubtrees(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, p, pLeft, pRight)
_sqlite3ExprCheckHeight(tls, pParse, (*TExpr)(unsafe.Pointer(p)).FnHeight)
} else {
_sqlite3ExprDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pLeft)
_sqlite3ExprDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pRight)
}
return p
}
// C documentation
//
// /*
// ** Add pSelect to the Expr.x.pSelect field. Or, if pExpr is NULL (due
// ** do a memory allocation failure) then delete the pSelect object.
// */
func _sqlite3PExprAddSelect(tls *libc.TLS, pParse uintptr, pExpr uintptr, pSelect uintptr) {
if pExpr != 0 {
*(*uintptr)(unsafe.Pointer(pExpr + 32)) = pSelect
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_xIsSelect) | libc.Int32FromInt32(EP_Subquery))
_sqlite3ExprSetHeightAndFlags(tls, pParse, pExpr)
} else {
_sqlite3SelectDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pSelect)
}
}
// C documentation
//
// /*
// ** Expression list pEList is a list of vector values. This function
// ** converts the contents of pEList to a VALUES(...) Select statement
// ** returning 1 row for each element of the list. For example, the
// ** expression list:
// **
// ** ( (1,2), (3,4) (5,6) )
// **
// ** is translated to the equivalent of:
// **
// ** VALUES(1,2), (3,4), (5,6)
// **
// ** Each of the vector values in pEList must contain exactly nElem terms.
// ** If a list element that is not a vector or does not contain nElem terms,
// ** an error message is left in pParse.
// **
// ** This is used as part of processing IN(...) expressions with a list
// ** of vectors on the RHS. e.g. "... IN ((1,2), (3,4), (5,6))".
// */
func _sqlite3ExprListToValues(tls *libc.TLS, pParse uintptr, nElem int32, pEList uintptr) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var ii, nExprElem int32
var pExpr, pRet, pSel, v2 uintptr
_, _, _, _, _, _ = ii, nExprElem, pExpr, pRet, pSel, v2
pRet = uintptr(0)
ii = 0
for {
if !(ii < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
pExpr = (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(ii)*32))).FpExpr
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_VECTOR) {
nExprElem = (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FnExpr
} else {
nExprElem = int32(1)
}
if nExprElem != nElem {
if nExprElem > int32(1) {
v2 = __ccgo_ts + 7619
} else {
v2 = __ccgo_ts + 1650
}
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7621, libc.VaList(bp+8, nExprElem, v2, nElem))
break
}
pSel = _sqlite3SelectNew(tls, pParse, *(*uintptr)(unsafe.Pointer(pExpr + 32)), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uintptr(0), uint32(SF_Values), uintptr(0))
*(*uintptr)(unsafe.Pointer(pExpr + 32)) = uintptr(0)
if pSel != 0 {
if pRet != 0 {
(*TSelect)(unsafe.Pointer(pSel)).Fop = uint8(TK_ALL)
(*TSelect)(unsafe.Pointer(pSel)).FpPrior = pRet
}
pRet = pSel
}
goto _1
_1:
;
ii++
}
if pRet != 0 && (*TSelect)(unsafe.Pointer(pRet)).FpPrior != 0 {
*(*Tu32)(unsafe.Pointer(pRet + 4)) |= uint32(SF_MultiValue)
}
_sqlite3ExprListDelete(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pEList)
return pRet
}
// C documentation
//
// /*
// ** Join two expressions using an AND operator. If either expression is
// ** NULL, then just return the other expression.
// **
// ** If one side or the other of the AND is known to be false, and neither side
// ** is part of an ON clause, then instead of returning an AND expression,
// ** just return a constant expression with a value of false.
// */
func _sqlite3ExprAnd(tls *libc.TLS, pParse uintptr, pLeft uintptr, pRight uintptr) (r uintptr) {
var db uintptr
var f Tu32
_, _ = db, f
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pLeft == uintptr(0) {
return pRight
} else {
if pRight == uintptr(0) {
return pLeft
} else {
f = (*TExpr)(unsafe.Pointer(pLeft)).Fflags | (*TExpr)(unsafe.Pointer(pRight)).Fflags
if f&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_InnerON)|libc.Int32FromInt32(EP_IsFalse)) == uint32(EP_IsFalse) && !(int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= libc.Int32FromInt32(PARSE_MODE_RENAME)) {
_sqlite3ExprDeferredDelete(tls, pParse, pLeft)
_sqlite3ExprDeferredDelete(tls, pParse, pRight)
return _sqlite3Expr(tls, db, int32(TK_INTEGER), __ccgo_ts+1724)
} else {
return _sqlite3PExpr(tls, pParse, int32(TK_AND), pLeft, pRight)
}
}
}
return r
}
// C documentation
//
// /*
// ** Construct a new expression node for a function with multiple
// ** arguments.
// */
func _sqlite3ExprFunction(tls *libc.TLS, pParse uintptr, pList uintptr, pToken uintptr, eDistinct int32) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var db, pNew uintptr
_, _ = db, pNew
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
pNew = _sqlite3ExprAlloc(tls, db, int32(TK_FUNCTION), pToken, int32(1))
if pNew == uintptr(0) {
_sqlite3ExprListDelete(tls, db, pList) /* Avoid memory leak when malloc fails */
return uintptr(0)
}
*(*int32)(unsafe.Pointer(pNew + 52)) = int32(int64((*TToken)(unsafe.Pointer(pToken)).Fz) - int64((*TParse)(unsafe.Pointer(pParse)).FzTail))
if pList != 0 && (*TExprList)(unsafe.Pointer(pList)).FnExpr > *(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb + 136 + 6*4)) && !((*TParse)(unsafe.Pointer(pParse)).Fnested != 0) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7665, libc.VaList(bp+8, pToken))
}
*(*uintptr)(unsafe.Pointer(pNew + 32)) = pList
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(libc.Int32FromInt32(EP_HasFunc))
_sqlite3ExprSetHeightAndFlags(tls, pParse, pNew)
if eDistinct == int32(SF_Distinct) {
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= uint32(libc.Int32FromInt32(EP_Distinct))
}
return pNew
}
// C documentation
//
// /*
// ** Report an error when attempting to use an ORDER BY clause within
// ** the arguments of a non-aggregate function.
// */
func _sqlite3ExprOrderByAggregateError(tls *libc.TLS, pParse uintptr, p uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7699, libc.VaList(bp+8, p))
}
// C documentation
//
// /*
// ** Attach an ORDER BY clause to a function call.
// **
// ** functionname( arguments ORDER BY sortlist )
// ** \_____________________/ \______/
// ** pExpr pOrderBy
// **
// ** The ORDER BY clause is inserted into a new Expr node of type TK_ORDER
// ** and added to the Expr.pLeft field of the parent TK_FUNCTION node.
// */
func _sqlite3ExprAddFunctionOrderBy(tls *libc.TLS, pParse uintptr, pExpr uintptr, pOrderBy uintptr) {
var db, pOB uintptr
_, _ = db, pOB
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pOrderBy == uintptr(0) {
return
}
if pExpr == uintptr(0) {
_sqlite3ExprListDelete(tls, db, pOrderBy)
return
}
if *(*uintptr)(unsafe.Pointer(pExpr + 32)) == uintptr(0) || (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FnExpr == 0 {
/* Ignore ORDER BY on zero-argument aggregates */
_sqlite3ParserAddCleanup(tls, pParse, __ccgo_fp(_sqlite3ExprListDeleteGeneric), pOrderBy)
return
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) && int32((*TWindow)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 64)))).FeFrmType) != int32(TK_FILTER) {
_sqlite3ExprOrderByAggregateError(tls, pParse, pExpr)
_sqlite3ExprListDelete(tls, db, pOrderBy)
return
}
pOB = _sqlite3ExprAlloc(tls, db, int32(TK_ORDER), uintptr(0), 0)
if pOB == uintptr(0) {
_sqlite3ExprListDelete(tls, db, pOrderBy)
return
}
*(*uintptr)(unsafe.Pointer(pOB + 32)) = pOrderBy
(*TExpr)(unsafe.Pointer(pExpr)).FpLeft = pOB
*(*Tu32)(unsafe.Pointer(pOB + 4)) |= uint32(libc.Int32FromInt32(EP_FullSize))
}
// C documentation
//
// /*
// ** Check to see if a function is usable according to current access
// ** rules:
// **
// ** SQLITE_FUNC_DIRECT - Only usable from top-level SQL
// **
// ** SQLITE_FUNC_UNSAFE - Usable if TRUSTED_SCHEMA or from
// ** top-level SQL
// **
// ** If the function is not usable, create an error.
// */
func _sqlite3ExprFunctionUsable(tls *libc.TLS, pParse uintptr, pExpr uintptr, pDef uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_FromDDL)) != uint32(0) {
if (*TFuncDef)(unsafe.Pointer(pDef)).FfuncFlags&uint32(SQLITE_FUNC_DIRECT) != uint32(0) || (*Tsqlite3)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb)).Fflags&uint64(SQLITE_TrustedSchema) == uint64(0) {
/* Functions prohibited in triggers and views if:
** (1) tagged with SQLITE_DIRECTONLY
** (2) not tagged with SQLITE_INNOCUOUS (which means it
** is tagged with SQLITE_FUNC_UNSAFE) and
** SQLITE_DBCONFIG_TRUSTED_SCHEMA is off (meaning
** that the schema is possibly tainted).
*/
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7749, libc.VaList(bp+8, pExpr))
}
}
}
// C documentation
//
// /*
// ** Assign a variable number to an expression that encodes a wildcard
// ** in the original SQL statement.
// **
// ** Wildcards consisting of a single "?" are assigned the next sequential
// ** variable number.
// **
// ** Wildcards of the form "?nnn" are assigned the number "nnn". We make
// ** sure "nnn" is not too big to avoid a denial of service attack when
// ** the SQL statement comes from an external source.
// **
// ** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number
// ** as the previous instance of the same wildcard. Or if this is the first
// ** instance of the wildcard, the next sequential variable number is
// ** assigned.
// */
func _sqlite3ExprAssignVarNumber(tls *libc.TLS, pParse uintptr, pExpr uintptr, n Tu32) {
bp := tls.Alloc(32)
defer tls.Free(32)
var bOk, doAdd int32
var db, z, v2, v4 uintptr
var x, v1, v3 TynVar
var _ /* i at bp+0 */ Ti64
_, _, _, _, _, _, _, _, _ = bOk, db, doAdd, x, z, v1, v2, v3, v4
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pExpr == uintptr(0) {
return
}
z = *(*uintptr)(unsafe.Pointer(pExpr + 8))
if int32(*(*int8)(unsafe.Pointer(z + 1))) == 0 {
/* Wildcard of the form "?". Assign the next variable number */
v2 = pParse + 304
*(*TynVar)(unsafe.Pointer(v2))++
v1 = *(*TynVar)(unsafe.Pointer(v2))
x = v1
} else {
doAdd = 0
if int32(*(*int8)(unsafe.Pointer(z))) == int32('?') {
if n == uint32(2) { /*OPTIMIZATION-IF-TRUE*/
*(*Ti64)(unsafe.Pointer(bp)) = int64(int32(*(*int8)(unsafe.Pointer(z + 1))) - int32('0')) /* The common case of ?N for a single digit N */
bOk = int32(1)
} else {
bOk = libc.BoolInt32(0 == _sqlite3Atoi64(tls, z+1, bp, int32(n-uint32(1)), uint8(SQLITE_UTF8)))
}
if bOk == 0 || *(*Ti64)(unsafe.Pointer(bp)) < int64(1) || *(*Ti64)(unsafe.Pointer(bp)) > int64(*(*int32)(unsafe.Pointer(db + 136 + 9*4))) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7769, libc.VaList(bp+16, *(*int32)(unsafe.Pointer(db + 136 + 9*4))))
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr)
return
}
x = int16(*(*Ti64)(unsafe.Pointer(bp)))
if int32(x) > int32((*TParse)(unsafe.Pointer(pParse)).FnVar) {
(*TParse)(unsafe.Pointer(pParse)).FnVar = int16(int32(x))
doAdd = int32(1)
} else {
if _sqlite3VListNumToName(tls, (*TParse)(unsafe.Pointer(pParse)).FpVList, int32(x)) == uintptr(0) {
doAdd = int32(1)
}
}
} else {
/* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable
** number as the prior appearance of the same name, or if the name
** has never appeared before, reuse the same variable number
*/
x = int16(_sqlite3VListNameToNum(tls, (*TParse)(unsafe.Pointer(pParse)).FpVList, z, int32(n)))
if int32(x) == 0 {
v4 = pParse + 304
*(*TynVar)(unsafe.Pointer(v4))++
v3 = *(*TynVar)(unsafe.Pointer(v4))
x = v3
doAdd = int32(1)
}
}
if doAdd != 0 {
(*TParse)(unsafe.Pointer(pParse)).FpVList = _sqlite3VListAdd(tls, db, (*TParse)(unsafe.Pointer(pParse)).FpVList, z, int32(n), int32(x))
}
}
(*TExpr)(unsafe.Pointer(pExpr)).FiColumn = x
if int32(x) > *(*int32)(unsafe.Pointer(db + 136 + 9*4)) {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7812, 0)
_sqlite3RecordErrorOffsetOfExpr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr)
}
}
// C documentation
//
// /*
// ** Recursively delete an expression tree.
// */
func _sqlite3ExprDeleteNN(tls *libc.TLS, db uintptr, p uintptr) {
if !((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_TokenOnly)|libc.Int32FromInt32(EP_Leaf)) != libc.Uint32FromInt32(0)) {
/* The Expr.x union is never used at the same time as Expr.pRight */
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 && int32((*TExpr)(unsafe.Pointer(p)).Fop) != int32(TK_SELECT_COLUMN) {
_sqlite3ExprDeleteNN(tls, db, (*TExpr)(unsafe.Pointer(p)).FpLeft)
}
if (*TExpr)(unsafe.Pointer(p)).FpRight != 0 {
_sqlite3ExprDeleteNN(tls, db, (*TExpr)(unsafe.Pointer(p)).FpRight)
} else {
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
_sqlite3SelectDelete(tls, db, *(*uintptr)(unsafe.Pointer(p + 32)))
} else {
_sqlite3ExprListDelete(tls, db, *(*uintptr)(unsafe.Pointer(p + 32)))
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
_sqlite3WindowDelete(tls, db, *(*uintptr)(unsafe.Pointer(p + 64)))
}
}
}
}
if !((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_Static)) != libc.Uint32FromInt32(0)) {
_sqlite3DbNNFreeNN(tls, db, p)
}
}
func _sqlite3ExprDelete(tls *libc.TLS, db uintptr, p uintptr) {
if p != 0 {
_sqlite3ExprDeleteNN(tls, db, p)
}
}
func _sqlite3ExprDeleteGeneric(tls *libc.TLS, db uintptr, p uintptr) {
if p != 0 {
_sqlite3ExprDeleteNN(tls, db, p)
}
}
// C documentation
//
// /*
// ** Clear both elements of an OnOrUsing object
// */
func _sqlite3ClearOnOrUsing(tls *libc.TLS, db uintptr, p uintptr) {
if p == uintptr(0) {
/* Nothing to clear */
} else {
if (*TOnOrUsing)(unsafe.Pointer(p)).FpOn != 0 {
_sqlite3ExprDeleteNN(tls, db, (*TOnOrUsing)(unsafe.Pointer(p)).FpOn)
} else {
if (*TOnOrUsing)(unsafe.Pointer(p)).FpUsing != 0 {
_sqlite3IdListDelete(tls, db, (*TOnOrUsing)(unsafe.Pointer(p)).FpUsing)
}
}
}
}
// C documentation
//
// /*
// ** Arrange to cause pExpr to be deleted when the pParse is deleted.
// ** This is similar to sqlite3ExprDelete() except that the delete is
// ** deferred until the pParse is deleted.
// **
// ** The pExpr might be deleted immediately on an OOM error.
// **
// ** The deferred delete is (currently) implemented by adding the
// ** pExpr to the pParse->pConstExpr list with a register number of 0.
// */
func _sqlite3ExprDeferredDelete(tls *libc.TLS, pParse uintptr, pExpr uintptr) {
_sqlite3ParserAddCleanup(tls, pParse, __ccgo_fp(_sqlite3ExprDeleteGeneric), pExpr)
}
// C documentation
//
// /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
// ** expression.
// */
func _sqlite3ExprUnmapAndDelete(tls *libc.TLS, pParse uintptr, p uintptr) {
if p != 0 {
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
_sqlite3RenameExprUnmap(tls, pParse, p)
}
_sqlite3ExprDeleteNN(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, p)
}
}
// C documentation
//
// /*
// ** Return the number of bytes allocated for the expression structure
// ** passed as the first argument. This is always one of EXPR_FULLSIZE,
// ** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE.
// */
func _exprStructSize(tls *libc.TLS, p uintptr) (r int32) {
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_TokenOnly)) != uint32(0) {
return int32(uint64(libc.UintptrFromInt32(0) + 16))
}
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_Reduced)) != uint32(0) {
return int32(uint64(libc.UintptrFromInt32(0) + 44))
}
return int32(72)
}
// C documentation
//
// /*
// ** The dupedExpr*Size() routines each return the number of bytes required
// ** to store a copy of an expression or expression tree. They differ in
// ** how much of the tree is measured.
// **
// ** dupedExprStructSize() Size of only the Expr structure
// ** dupedExprNodeSize() Size of Expr + space for token
// ** dupedExprSize() Expr + token + subtree components
// **
// ***************************************************************************
// **
// ** The dupedExprStructSize() function returns two values OR-ed together:
// ** (1) the space required for a copy of the Expr structure only and
// ** (2) the EP_xxx flags that indicate what the structure size should be.
// ** The return values is always one of:
// **
// ** EXPR_FULLSIZE
// ** EXPR_REDUCEDSIZE | EP_Reduced
// ** EXPR_TOKENONLYSIZE | EP_TokenOnly
// **
// ** The size of the structure can be found by masking the return value
// ** of this routine with 0xfff. The flags can be found by masking the
// ** return value with EP_Reduced|EP_TokenOnly.
// **
// ** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size
// ** (unreduced) Expr objects as they or originally constructed by the parser.
// ** During expression analysis, extra information is computed and moved into
// ** later parts of the Expr object and that extra information might get chopped
// ** off if the expression is reduced. Note also that it does not work to
// ** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal
// ** to reduce a pristine expression tree from the parser. The implementation
// ** of dupedExprStructSize() contain multiple assert() statements that attempt
// ** to enforce this constraint.
// */
func _dupedExprStructSize(tls *libc.TLS, p uintptr, flags int32) (r int32) {
var nSize int32
_ = nSize
/* Only one flag value allowed */
if 0 == flags || (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_FullSize)) != uint32(0) {
nSize = int32(72)
} else {
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 || *(*uintptr)(unsafe.Pointer(p + 32)) != 0 {
nSize = int32(uint64(libc.UintptrFromInt32(0)+44) | libc.Uint64FromInt32(EP_Reduced))
} else {
nSize = int32(uint64(libc.UintptrFromInt32(0)+16) | libc.Uint64FromInt32(EP_TokenOnly))
}
}
return nSize
}
// C documentation
//
// /*
// ** This function returns the space in bytes required to store the copy
// ** of the Expr structure and a copy of the Expr.u.zToken string (if that
// ** string is defined.)
// */
func _dupedExprNodeSize(tls *libc.TLS, p uintptr, flags int32) (r int32) {
var nByte int32
_ = nByte
nByte = _dupedExprStructSize(tls, p, flags) & int32(0xfff)
if !((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_IntValue)) != libc.Uint32FromInt32(0)) && *(*uintptr)(unsafe.Pointer(p + 8)) != 0 {
nByte = int32(uint64(nByte) + (libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(p + 8)))&libc.Uint64FromInt32(0x3fffffff) + libc.Uint64FromInt32(1)))
}
return (nByte + int32(7)) & ^libc.Int32FromInt32(7)
}
// C documentation
//
// /*
// ** Return the number of bytes required to create a duplicate of the
// ** expression passed as the first argument.
// **
// ** The value returned includes space to create a copy of the Expr struct
// ** itself and the buffer referred to by Expr.u.zToken, if any.
// **
// ** The return value includes space to duplicate all Expr nodes in the
// ** tree formed by Expr.pLeft and Expr.pRight, but not any other
// ** substructure such as Expr.x.pList, Expr.x.pSelect, and Expr.y.pWin.
// */
func _dupedExprSize(tls *libc.TLS, p uintptr) (r int32) {
var nByte int32
_ = nByte
nByte = _dupedExprNodeSize(tls, p, int32(EXPRDUP_REDUCE))
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 {
nByte += _dupedExprSize(tls, (*TExpr)(unsafe.Pointer(p)).FpLeft)
}
if (*TExpr)(unsafe.Pointer(p)).FpRight != 0 {
nByte += _dupedExprSize(tls, (*TExpr)(unsafe.Pointer(p)).FpRight)
}
return nByte
}
// C documentation
//
// /*
// ** An EdupBuf is a memory allocation used to stored multiple Expr objects
// ** together with their Expr.zToken content. This is used to help implement
// ** compression while doing sqlite3ExprDup(). The top-level Expr does the
// ** allocation for itself and many of its decendents, then passes an instance
// ** of the structure down into exprDup() so that they decendents can have
// ** access to that memory.
// */
type TEdupBuf = struct {
FzAlloc uintptr
}
type EdupBuf = TEdupBuf
type TEdupBuf1 = struct {
FzAlloc uintptr
}
type EdupBuf1 = TEdupBuf1
// C documentation
//
// /*
// ** This function is similar to sqlite3ExprDup(), except that if pEdupBuf
// ** is not NULL then it points to memory that can be used to store a copy
// ** of the input Expr p together with its p->u.zToken (if any). pEdupBuf
// ** is updated with the new buffer tail prior to returning.
// */
func _exprDup(tls *libc.TLS, db uintptr, p uintptr, dupFlags int32, pEdupBuf uintptr) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var nAlloc, nNewSize, nToken, v2 int32
var nSize, staticFlag Tu32
var nStructSize uint32
var pNew, zToken, v1, v3, v4 uintptr
var _ /* sEdupBuf at bp+0 */ TEdupBuf
_, _, _, _, _, _, _, _, _, _, _, _ = nAlloc, nNewSize, nSize, nStructSize, nToken, pNew, staticFlag, zToken, v1, v2, v3, v4 /* EP_Static if space not obtained from malloc */
nToken = -int32(1) /* Space needed for p->u.zToken. -1 means unknown */
/* Figure out where to write the new Expr structure. */
if pEdupBuf != 0 {
(*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc = (*TEdupBuf)(unsafe.Pointer(pEdupBuf)).FzAlloc
staticFlag = uint32(EP_Static)
} else {
if dupFlags != 0 {
nAlloc = _dupedExprSize(tls, p)
} else {
if !((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_IntValue)) != libc.Uint32FromInt32(0)) && *(*uintptr)(unsafe.Pointer(p + 8)) != 0 {
nToken = int32(libc.Xstrlen(tls, *(*uintptr)(unsafe.Pointer(p + 8)))&uint64(0x3fffffff) + uint64(1))
nAlloc = int32((libc.Uint64FromInt64(72) + uint64(nToken) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
} else {
nToken = 0
nAlloc = int32((libc.Uint64FromInt64(72) + libc.Uint64FromInt32(7)) & uint64(^libc.Int32FromInt32(7)))
}
}
(*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc = _sqlite3DbMallocRawNN(tls, db, uint64(nAlloc))
staticFlag = uint32(0)
}
pNew = (*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc
if pNew != 0 {
/* Set nNewSize to the size allocated for the structure pointed to
** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or
** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed
** by the copy of the p->u.zToken string (if any).
*/
nStructSize = uint32(_dupedExprStructSize(tls, p, dupFlags))
nNewSize = int32(nStructSize & uint32(0xfff))
if nToken < 0 {
if !((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_IntValue)) != libc.Uint32FromInt32(0)) && *(*uintptr)(unsafe.Pointer(p + 8)) != 0 {
nToken = _sqlite3Strlen30(tls, *(*uintptr)(unsafe.Pointer(p + 8))) + int32(1)
} else {
nToken = 0
}
}
if dupFlags != 0 {
libc.Xmemcpy(tls, (*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc, p, uint64(nNewSize))
} else {
nSize = uint32(_exprStructSize(tls, p))
libc.Xmemcpy(tls, (*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc, p, uint64(nSize))
if uint64(nSize) < uint64(72) {
libc.Xmemset(tls, (*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc+uintptr(nSize), 0, uint64(72)-uint64(nSize))
}
nNewSize = int32(72)
}
/* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */
*(*Tu32)(unsafe.Pointer(pNew + 4)) &= uint32(^(libc.Int32FromInt32(EP_Reduced) | libc.Int32FromInt32(EP_TokenOnly) | libc.Int32FromInt32(EP_Static)))
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= nStructSize & uint32(libc.Int32FromInt32(EP_Reduced)|libc.Int32FromInt32(EP_TokenOnly))
*(*Tu32)(unsafe.Pointer(pNew + 4)) |= staticFlag
if dupFlags != 0 {
}
/* Copy the p->u.zToken string, if any. */
if nToken > 0 {
v1 = (*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc + uintptr(nNewSize)
*(*uintptr)(unsafe.Pointer(pNew + 8)) = v1
zToken = v1
libc.Xmemcpy(tls, zToken, *(*uintptr)(unsafe.Pointer(p + 8)), uint64(nToken))
nNewSize += nToken
}
(*(*TEdupBuf)(unsafe.Pointer(bp))).FzAlloc += uintptr((nNewSize + libc.Int32FromInt32(7)) & ^libc.Int32FromInt32(7))
if ((*TExpr)(unsafe.Pointer(p)).Fflags|(*TExpr)(unsafe.Pointer(pNew)).Fflags)&uint32(libc.Int32FromInt32(EP_TokenOnly)|libc.Int32FromInt32(EP_Leaf)) == uint32(0) {
/* Fill in the pNew->x.pSelect or pNew->x.pList member. */
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
*(*uintptr)(unsafe.Pointer(pNew + 32)) = _sqlite3SelectDup(tls, db, *(*uintptr)(unsafe.Pointer(p + 32)), dupFlags)
} else {
if int32((*TExpr)(unsafe.Pointer(p)).Fop) != int32(TK_ORDER) {
v2 = dupFlags
} else {
v2 = 0
}
*(*uintptr)(unsafe.Pointer(pNew + 32)) = _sqlite3ExprListDup(tls, db, *(*uintptr)(unsafe.Pointer(p + 32)), v2)
}
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
*(*uintptr)(unsafe.Pointer(pNew + 64)) = _sqlite3WindowDup(tls, db, pNew, *(*uintptr)(unsafe.Pointer(p + 64)))
}
/* Fill in pNew->pLeft and pNew->pRight. */
if dupFlags != 0 {
if int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_SELECT_COLUMN) {
(*TExpr)(unsafe.Pointer(pNew)).FpLeft = (*TExpr)(unsafe.Pointer(p)).FpLeft
} else {
if (*TExpr)(unsafe.Pointer(p)).FpLeft != 0 {
v3 = _exprDup(tls, db, (*TExpr)(unsafe.Pointer(p)).FpLeft, int32(EXPRDUP_REDUCE), bp)
} else {
v3 = uintptr(0)
}
(*TExpr)(unsafe.Pointer(pNew)).FpLeft = v3
}
if (*TExpr)(unsafe.Pointer(p)).FpRight != 0 {
v4 = _exprDup(tls, db, (*TExpr)(unsafe.Pointer(p)).FpRight, int32(EXPRDUP_REDUCE), bp)
} else {
v4 = uintptr(0)
}
(*TExpr)(unsafe.Pointer(pNew)).FpRight = v4
} else {
if int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_SELECT_COLUMN) {
(*TExpr)(unsafe.Pointer(pNew)).FpLeft = (*TExpr)(unsafe.Pointer(p)).FpLeft
} else {
(*TExpr)(unsafe.Pointer(pNew)).FpLeft = _sqlite3ExprDup(tls, db, (*TExpr)(unsafe.Pointer(p)).FpLeft, 0)
}
(*TExpr)(unsafe.Pointer(pNew)).FpRight = _sqlite3ExprDup(tls, db, (*TExpr)(unsafe.Pointer(p)).FpRight, 0)
}
}
}
if pEdupBuf != 0 {
libc.Xmemcpy(tls, pEdupBuf, bp, uint64(8))
}
return pNew
}
// C documentation
//
// /*
// ** Create and return a deep copy of the object passed as the second
// ** argument. If an OOM condition is encountered, NULL is returned
// ** and the db->mallocFailed flag set.
// */
func _sqlite3WithDup(tls *libc.TLS, db uintptr, p uintptr) (r uintptr) {
var i int32
var nByte Tsqlite3_int64
var pRet uintptr
_, _, _ = i, nByte, pRet
pRet = uintptr(0)
if p != 0 {
nByte = int64(uint64(64) + uint64(48)*uint64((*TWith)(unsafe.Pointer(p)).FnCte-libc.Int32FromInt32(1)))
pRet = _sqlite3DbMallocZero(tls, db, uint64(nByte))
if pRet != 0 {
(*TWith)(unsafe.Pointer(pRet)).FnCte = (*TWith)(unsafe.Pointer(p)).FnCte
i = 0
for {
if !(i < (*TWith)(unsafe.Pointer(p)).FnCte) {
break
}
(*(*TCte)(unsafe.Pointer(pRet + 16 + uintptr(i)*48))).FpSelect = _sqlite3SelectDup(tls, db, (*(*TCte)(unsafe.Pointer(p + 16 + uintptr(i)*48))).FpSelect, 0)
(*(*TCte)(unsafe.Pointer(pRet + 16 + uintptr(i)*48))).FpCols = _sqlite3ExprListDup(tls, db, (*(*TCte)(unsafe.Pointer(p + 16 + uintptr(i)*48))).FpCols, 0)
(*(*TCte)(unsafe.Pointer(pRet + 16 + uintptr(i)*48))).FzName = _sqlite3DbStrDup(tls, db, (*(*TCte)(unsafe.Pointer(p + 16 + uintptr(i)*48))).FzName)
(*(*TCte)(unsafe.Pointer(pRet + 16 + uintptr(i)*48))).FeM10d = (*(*TCte)(unsafe.Pointer(p + 16 + uintptr(i)*48))).FeM10d
goto _1
_1:
;
i++
}
}
}
return pRet
}
// C documentation
//
// /*
// ** The gatherSelectWindows() procedure and its helper routine
// ** gatherSelectWindowsCallback() are used to scan all the expressions
// ** an a newly duplicated SELECT statement and gather all of the Window
// ** objects found there, assembling them onto the linked list at Select->pWin.
// */
func _gatherSelectWindowsCallback(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var pSelect, pWin uintptr
_, _ = pSelect, pWin
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_FUNCTION) && (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != uint32(0) {
pSelect = *(*uintptr)(unsafe.Pointer(pWalker + 40))
pWin = *(*uintptr)(unsafe.Pointer(pExpr + 64))
_sqlite3WindowLink(tls, pSelect, pWin)
}
return WRC_Continue
}
func _gatherSelectWindowsSelectCallback(tls *libc.TLS, pWalker uintptr, p uintptr) (r int32) {
var v1 int32
_ = v1
if p == *(*uintptr)(unsafe.Pointer(pWalker + 40)) {
v1 = WRC_Continue
} else {
v1 = int32(WRC_Prune)
}
return v1
}
func _gatherSelectWindows(tls *libc.TLS, p uintptr) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* w at bp+0 */ TWalker
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_gatherSelectWindowsCallback)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = __ccgo_fp(_gatherSelectWindowsSelectCallback)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback2 = uintptr(0)
(*(*TWalker)(unsafe.Pointer(bp))).FpParse = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp + 40)) = p
_sqlite3WalkSelect(tls, bp, p)
}
// C documentation
//
// /*
// ** The following group of routines make deep copies of expressions,
// ** expression lists, ID lists, and select statements. The copies can
// ** be deleted (by being passed to their respective ...Delete() routines)
// ** without effecting the originals.
// **
// ** The expression list, ID, and source lists return by sqlite3ExprListDup(),
// ** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded
// ** by subsequent calls to sqlite*ListAppend() routines.
// **
// ** Any tables that the SrcList might point to are not duplicated.
// **
// ** The flags parameter contains a combination of the EXPRDUP_XXX flags.
// ** If the EXPRDUP_REDUCE flag is set, then the structure returned is a
// ** truncated version of the usual Expr structure that will be stored as
// ** part of the in-memory representation of the database schema.
// */
func _sqlite3ExprDup(tls *libc.TLS, db uintptr, p uintptr, flags int32) (r uintptr) {
var v1 uintptr
_ = v1
if p != 0 {
v1 = _exprDup(tls, db, p, flags, uintptr(0))
} else {
v1 = uintptr(0)
}
return v1
}
func _sqlite3ExprListDup(tls *libc.TLS, db uintptr, p uintptr, flags int32) (r uintptr) {
var i int32
var pItem, pNew, pNewExpr, pOldExpr, pOldItem, pPriorSelectColNew, pPriorSelectColOld, v2 uintptr
var v3 bool
_, _, _, _, _, _, _, _, _, _ = i, pItem, pNew, pNewExpr, pOldExpr, pOldItem, pPriorSelectColNew, pPriorSelectColOld, v2, v3
pPriorSelectColOld = uintptr(0)
pPriorSelectColNew = uintptr(0)
if p == uintptr(0) {
return uintptr(0)
}
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(_sqlite3DbMallocSize(tls, db, p)))
if pNew == uintptr(0) {
return uintptr(0)
}
(*TExprList)(unsafe.Pointer(pNew)).FnExpr = (*TExprList)(unsafe.Pointer(p)).FnExpr
(*TExprList)(unsafe.Pointer(pNew)).FnAlloc = (*TExprList)(unsafe.Pointer(p)).FnAlloc
pItem = pNew + 8
pOldItem = p + 8
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(p)).FnExpr) {
break
}
pOldExpr = (*TExprList_item)(unsafe.Pointer(pOldItem)).FpExpr
(*TExprList_item)(unsafe.Pointer(pItem)).FpExpr = _sqlite3ExprDup(tls, db, pOldExpr, flags)
if v3 = pOldExpr != 0 && int32((*TExpr)(unsafe.Pointer(pOldExpr)).Fop) == int32(TK_SELECT_COLUMN); v3 {
v2 = (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr
pNewExpr = v2
}
if v3 && v2 != uintptr(0) {
if (*TExpr)(unsafe.Pointer(pNewExpr)).FpRight != 0 {
pPriorSelectColOld = (*TExpr)(unsafe.Pointer(pOldExpr)).FpRight
pPriorSelectColNew = (*TExpr)(unsafe.Pointer(pNewExpr)).FpRight
(*TExpr)(unsafe.Pointer(pNewExpr)).FpLeft = (*TExpr)(unsafe.Pointer(pNewExpr)).FpRight
} else {
if (*TExpr)(unsafe.Pointer(pOldExpr)).FpLeft != pPriorSelectColOld {
pPriorSelectColOld = (*TExpr)(unsafe.Pointer(pOldExpr)).FpLeft
pPriorSelectColNew = _sqlite3ExprDup(tls, db, pPriorSelectColOld, flags)
(*TExpr)(unsafe.Pointer(pNewExpr)).FpRight = pPriorSelectColNew
}
(*TExpr)(unsafe.Pointer(pNewExpr)).FpLeft = pPriorSelectColNew
}
}
(*TExprList_item)(unsafe.Pointer(pItem)).FzEName = _sqlite3DbStrDup(tls, db, (*TExprList_item)(unsafe.Pointer(pOldItem)).FzEName)
(*TExprList_item)(unsafe.Pointer(pItem)).Ffg = (*TExprList_item)(unsafe.Pointer(pOldItem)).Ffg
libc.SetBitFieldPtr16Uint32(pItem+16+4, libc.Uint32FromInt32(0), 2, 0x4)
(*TExprList_item)(unsafe.Pointer(pItem)).Fu = (*TExprList_item)(unsafe.Pointer(pOldItem)).Fu
goto _1
_1:
;
i++
pItem += 32
pOldItem += 32
}
return pNew
}
// C documentation
//
// /*
// ** If cursors, triggers, views and subqueries are all omitted from
// ** the build, then none of the following routines, except for
// ** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes
// ** called with a NULL argument.
// */
func _sqlite3SrcListDup(tls *libc.TLS, db uintptr, p uintptr, flags int32) (r uintptr) {
var i, nByte int32
var pNew, pNewItem, pOldItem, pTab, v4 uintptr
var v1 uint64
var v2 Tu32
_, _, _, _, _, _, _, _, _ = i, nByte, pNew, pNewItem, pOldItem, pTab, v1, v2, v4
if p == uintptr(0) {
return uintptr(0)
}
if (*TSrcList)(unsafe.Pointer(p)).FnSrc > 0 {
v1 = uint64(104) * uint64((*TSrcList)(unsafe.Pointer(p)).FnSrc-libc.Int32FromInt32(1))
} else {
v1 = uint64(0)
}
nByte = int32(uint64(112) + v1)
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(nByte))
if pNew == uintptr(0) {
return uintptr(0)
}
v2 = uint32((*TSrcList)(unsafe.Pointer(p)).FnSrc)
(*TSrcList)(unsafe.Pointer(pNew)).FnAlloc = v2
(*TSrcList)(unsafe.Pointer(pNew)).FnSrc = int32(v2)
i = 0
for {
if !(i < (*TSrcList)(unsafe.Pointer(p)).FnSrc) {
break
}
pNewItem = pNew + 8 + uintptr(i)*104
pOldItem = p + 8 + uintptr(i)*104
(*TSrcItem)(unsafe.Pointer(pNewItem)).FpSchema = (*TSrcItem)(unsafe.Pointer(pOldItem)).FpSchema
(*TSrcItem)(unsafe.Pointer(pNewItem)).FzDatabase = _sqlite3DbStrDup(tls, db, (*TSrcItem)(unsafe.Pointer(pOldItem)).FzDatabase)
(*TSrcItem)(unsafe.Pointer(pNewItem)).FzName = _sqlite3DbStrDup(tls, db, (*TSrcItem)(unsafe.Pointer(pOldItem)).FzName)
(*TSrcItem)(unsafe.Pointer(pNewItem)).FzAlias = _sqlite3DbStrDup(tls, db, (*TSrcItem)(unsafe.Pointer(pOldItem)).FzAlias)
(*TSrcItem)(unsafe.Pointer(pNewItem)).Ffg = (*TSrcItem)(unsafe.Pointer(pOldItem)).Ffg
(*TSrcItem)(unsafe.Pointer(pNewItem)).FiCursor = (*TSrcItem)(unsafe.Pointer(pOldItem)).FiCursor
(*TSrcItem)(unsafe.Pointer(pNewItem)).FaddrFillSub = (*TSrcItem)(unsafe.Pointer(pOldItem)).FaddrFillSub
(*TSrcItem)(unsafe.Pointer(pNewItem)).FregReturn = (*TSrcItem)(unsafe.Pointer(pOldItem)).FregReturn
if int32(uint32(*(*uint16)(unsafe.Pointer(pNewItem + 60 + 4))&0x2>>1)) != 0 {
*(*uintptr)(unsafe.Pointer(pNewItem + 88)) = _sqlite3DbStrDup(tls, db, *(*uintptr)(unsafe.Pointer(pOldItem + 88)))
}
(*TSrcItem)(unsafe.Pointer(pNewItem)).Fu2 = (*TSrcItem)(unsafe.Pointer(pOldItem)).Fu2
if int32(uint32(*(*uint16)(unsafe.Pointer(pNewItem + 60 + 4))&0x100>>8)) != 0 {
(*TCteUse)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pNewItem + 96)))).FnUse++
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pNewItem + 60 + 4))&0x4>>2)) != 0 {
*(*uintptr)(unsafe.Pointer(pNewItem + 88)) = _sqlite3ExprListDup(tls, db, *(*uintptr)(unsafe.Pointer(pOldItem + 88)), flags)
}
v4 = (*TSrcItem)(unsafe.Pointer(pOldItem)).FpTab
(*TSrcItem)(unsafe.Pointer(pNewItem)).FpTab = v4
pTab = v4
if pTab != 0 {
(*TTable)(unsafe.Pointer(pTab)).FnTabRef++
}
(*TSrcItem)(unsafe.Pointer(pNewItem)).FpSelect = _sqlite3SelectDup(tls, db, (*TSrcItem)(unsafe.Pointer(pOldItem)).FpSelect, flags)
if int32(uint32(*(*uint16)(unsafe.Pointer(pOldItem + 60 + 4))&0x400>>10)) != 0 {
*(*uintptr)(unsafe.Pointer(pNewItem + 72)) = _sqlite3IdListDup(tls, db, *(*uintptr)(unsafe.Pointer(pOldItem + 72)))
} else {
*(*uintptr)(unsafe.Pointer(pNewItem + 72)) = _sqlite3ExprDup(tls, db, *(*uintptr)(unsafe.Pointer(pOldItem + 72)), flags)
}
(*TSrcItem)(unsafe.Pointer(pNewItem)).FcolUsed = (*TSrcItem)(unsafe.Pointer(pOldItem)).FcolUsed
goto _3
_3:
;
i++
}
return pNew
}
func _sqlite3IdListDup(tls *libc.TLS, db uintptr, p uintptr) (r uintptr) {
var i int32
var pNew, pNewItem, pOldItem uintptr
_, _, _, _ = i, pNew, pNewItem, pOldItem
if p == uintptr(0) {
return uintptr(0)
}
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(24)+uint64((*TIdList)(unsafe.Pointer(p)).FnId-libc.Int32FromInt32(1))*uint64(16))
if pNew == uintptr(0) {
return uintptr(0)
}
(*TIdList)(unsafe.Pointer(pNew)).FnId = (*TIdList)(unsafe.Pointer(p)).FnId
(*TIdList)(unsafe.Pointer(pNew)).FeU4 = (*TIdList)(unsafe.Pointer(p)).FeU4
i = 0
for {
if !(i < (*TIdList)(unsafe.Pointer(p)).FnId) {
break
}
pNewItem = pNew + 8 + uintptr(i)*16
pOldItem = p + 8 + uintptr(i)*16
(*TIdList_item)(unsafe.Pointer(pNewItem)).FzName = _sqlite3DbStrDup(tls, db, (*TIdList_item)(unsafe.Pointer(pOldItem)).FzName)
(*TIdList_item)(unsafe.Pointer(pNewItem)).Fu4 = (*TIdList_item)(unsafe.Pointer(pOldItem)).Fu4
goto _1
_1:
;
i++
}
return pNew
}
func _sqlite3SelectDup(tls *libc.TLS, db uintptr, pDup uintptr, flags int32) (r uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var p, pNew, pNext, pp uintptr
var _ /* pRet at bp+0 */ uintptr
_, _, _, _ = p, pNew, pNext, pp
*(*uintptr)(unsafe.Pointer(bp)) = uintptr(0)
pNext = uintptr(0)
pp = bp
p = pDup
for {
if !(p != 0) {
break
}
pNew = _sqlite3DbMallocRawNN(tls, db, uint64(128))
if pNew == uintptr(0) {
break
}
(*TSelect)(unsafe.Pointer(pNew)).FpEList = _sqlite3ExprListDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpEList, flags)
(*TSelect)(unsafe.Pointer(pNew)).FpSrc = _sqlite3SrcListDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpSrc, flags)
(*TSelect)(unsafe.Pointer(pNew)).FpWhere = _sqlite3ExprDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpWhere, flags)
(*TSelect)(unsafe.Pointer(pNew)).FpGroupBy = _sqlite3ExprListDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpGroupBy, flags)
(*TSelect)(unsafe.Pointer(pNew)).FpHaving = _sqlite3ExprDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpHaving, flags)
(*TSelect)(unsafe.Pointer(pNew)).FpOrderBy = _sqlite3ExprListDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpOrderBy, flags)
(*TSelect)(unsafe.Pointer(pNew)).Fop = (*TSelect)(unsafe.Pointer(p)).Fop
(*TSelect)(unsafe.Pointer(pNew)).FpNext = pNext
(*TSelect)(unsafe.Pointer(pNew)).FpPrior = uintptr(0)
(*TSelect)(unsafe.Pointer(pNew)).FpLimit = _sqlite3ExprDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpLimit, flags)
(*TSelect)(unsafe.Pointer(pNew)).FiLimit = 0
(*TSelect)(unsafe.Pointer(pNew)).FiOffset = 0
(*TSelect)(unsafe.Pointer(pNew)).FselFlags = (*TSelect)(unsafe.Pointer(p)).FselFlags & uint32(^libc.Int32FromInt32(SF_UsesEphemeral))
*(*int32)(unsafe.Pointer(pNew + 20)) = -int32(1)
*(*int32)(unsafe.Pointer(pNew + 20 + 1*4)) = -int32(1)
(*TSelect)(unsafe.Pointer(pNew)).FnSelectRow = (*TSelect)(unsafe.Pointer(p)).FnSelectRow
(*TSelect)(unsafe.Pointer(pNew)).FpWith = _sqlite3WithDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpWith)
(*TSelect)(unsafe.Pointer(pNew)).FpWin = uintptr(0)
(*TSelect)(unsafe.Pointer(pNew)).FpWinDefn = _sqlite3WindowListDup(tls, db, (*TSelect)(unsafe.Pointer(p)).FpWinDefn)
if (*TSelect)(unsafe.Pointer(p)).FpWin != 0 && int32((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed) == 0 {
_gatherSelectWindows(tls, pNew)
}
(*TSelect)(unsafe.Pointer(pNew)).FselId = (*TSelect)(unsafe.Pointer(p)).FselId
if (*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0 {
/* Any prior OOM might have left the Select object incomplete.
** Delete the whole thing rather than allow an incomplete Select
** to be used by the code generator. */
(*TSelect)(unsafe.Pointer(pNew)).FpNext = uintptr(0)
_sqlite3SelectDelete(tls, db, pNew)
break
}
*(*uintptr)(unsafe.Pointer(pp)) = pNew
pp = pNew + 80
pNext = pNew
goto _1
_1:
;
p = (*TSelect)(unsafe.Pointer(p)).FpPrior
}
return *(*uintptr)(unsafe.Pointer(bp))
}
// C documentation
//
// /*
// ** Add a new element to the end of an expression list. If pList is
// ** initially NULL, then create a new expression list.
// **
// ** The pList argument must be either NULL or a pointer to an ExprList
// ** obtained from a prior call to sqlite3ExprListAppend().
// **
// ** If a memory allocation error occurs, the entire list is freed and
// ** NULL is returned. If non-NULL is returned, then it is guaranteed
// ** that the new entry was successfully appended.
// */
type TExprList_item = struct {
FpExpr uintptr
FzEName uintptr
Ffg struct {
F__ccgo_align [0]uint32
FsortFlags Tu8
F__ccgo_align1 [2]byte
F__ccgo4 uint16
}
Fu struct {
FiConstExprReg [0]int32
Fx struct {
FiOrderByCol Tu16
FiAlias Tu16
}
}
}
type ExprList_item = TExprList_item
// C documentation
//
// /*
// ** Add a new element to the end of an expression list. If pList is
// ** initially NULL, then create a new expression list.
// **
// ** The pList argument must be either NULL or a pointer to an ExprList
// ** obtained from a prior call to sqlite3ExprListAppend().
// **
// ** If a memory allocation error occurs, the entire list is freed and
// ** NULL is returned. If non-NULL is returned, then it is guaranteed
// ** that the new entry was successfully appended.
// */
var _zeroItem = TExprList_item{}
func _sqlite3ExprListAppendNew(tls *libc.TLS, db uintptr, pExpr uintptr) (r uintptr) {
var pItem, pList uintptr
_, _ = pItem, pList
pList = _sqlite3DbMallocRawNN(tls, db, libc.Uint64FromInt64(40)+libc.Uint64FromInt64(32)*libc.Uint64FromInt32(4))
if pList == uintptr(0) {
_sqlite3ExprDelete(tls, db, pExpr)
return uintptr(0)
}
(*TExprList)(unsafe.Pointer(pList)).FnAlloc = int32(4)
(*TExprList)(unsafe.Pointer(pList)).FnExpr = int32(1)
pItem = pList + 8
*(*TExprList_item)(unsafe.Pointer(pItem)) = _zeroItem
(*TExprList_item)(unsafe.Pointer(pItem)).FpExpr = pExpr
return pList
}
func _sqlite3ExprListAppendGrow(tls *libc.TLS, db uintptr, pList uintptr, pExpr uintptr) (r uintptr) {
var pItem, pNew, v2 uintptr
var v1 int32
_, _, _, _ = pItem, pNew, v1, v2
*(*int32)(unsafe.Pointer(pList + 4)) *= int32(2)
pNew = _sqlite3DbRealloc(tls, db, pList, uint64(40)+uint64((*TExprList)(unsafe.Pointer(pList)).FnAlloc-libc.Int32FromInt32(1))*uint64(32))
if pNew == uintptr(0) {
_sqlite3ExprListDelete(tls, db, pList)
_sqlite3ExprDelete(tls, db, pExpr)
return uintptr(0)
} else {
pList = pNew
}
v2 = pList
v1 = *(*int32)(unsafe.Pointer(v2))
*(*int32)(unsafe.Pointer(v2))++
pItem = pList + 8 + uintptr(v1)*32
*(*TExprList_item)(unsafe.Pointer(pItem)) = _zeroItem
(*TExprList_item)(unsafe.Pointer(pItem)).FpExpr = pExpr
return pList
}
func _sqlite3ExprListAppend(tls *libc.TLS, pParse uintptr, pList uintptr, pExpr uintptr) (r uintptr) {
var pItem, v2 uintptr
var v1 int32
_, _, _ = pItem, v1, v2
if pList == uintptr(0) {
return _sqlite3ExprListAppendNew(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr)
}
if (*TExprList)(unsafe.Pointer(pList)).FnAlloc < (*TExprList)(unsafe.Pointer(pList)).FnExpr+int32(1) {
return _sqlite3ExprListAppendGrow(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pList, pExpr)
}
v2 = pList
v1 = *(*int32)(unsafe.Pointer(v2))
*(*int32)(unsafe.Pointer(v2))++
pItem = pList + 8 + uintptr(v1)*32
*(*TExprList_item)(unsafe.Pointer(pItem)) = _zeroItem
(*TExprList_item)(unsafe.Pointer(pItem)).FpExpr = pExpr
return pList
}
// C documentation
//
// /*
// ** pColumns and pExpr form a vector assignment which is part of the SET
// ** clause of an UPDATE statement. Like this:
// **
// ** (a,b,c) = (expr1,expr2,expr3)
// ** Or: (a,b,c) = (SELECT x,y,z FROM ....)
// **
// ** For each term of the vector assignment, append new entries to the
// ** expression list pList. In the case of a subquery on the RHS, append
// ** TK_SELECT_COLUMN expressions.
// */
func _sqlite3ExprListAppendVector(tls *libc.TLS, pParse uintptr, pList uintptr, pColumns uintptr, pExpr uintptr) (r uintptr) {
bp := tls.Alloc(32)
defer tls.Free(32)
var db, pFirst, pSubExpr uintptr
var i, iFirst, n, v1, v2 int32
var v3 bool
_, _, _, _, _, _, _, _, _ = db, i, iFirst, n, pFirst, pSubExpr, v1, v2, v3
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pList != 0 {
v1 = (*TExprList)(unsafe.Pointer(pList)).FnExpr
} else {
v1 = 0
}
iFirst = v1
/* pColumns can only be NULL due to an OOM but an OOM will cause an
** exit prior to this routine being invoked */
if pColumns == uintptr(0) {
goto vector_append_error
}
if pExpr == uintptr(0) {
goto vector_append_error
}
/* If the RHS is a vector, then we can immediately check to see that
** the size of the RHS and LHS match. But if the RHS is a SELECT,
** wildcards ("*") in the result set of the SELECT must be expanded before
** we can do the size check, so defer the size check until code generation.
*/
if v3 = int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) != int32(TK_SELECT); v3 {
v2 = _sqlite3ExprVectorSize(tls, pExpr)
n = v2
}
if v3 && (*TIdList)(unsafe.Pointer(pColumns)).FnId != v2 {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7835, libc.VaList(bp+8, (*TIdList)(unsafe.Pointer(pColumns)).FnId, n))
goto vector_append_error
}
i = 0
for {
if !(i < (*TIdList)(unsafe.Pointer(pColumns)).FnId) {
break
}
pSubExpr = _sqlite3ExprForVectorField(tls, pParse, pExpr, i, (*TIdList)(unsafe.Pointer(pColumns)).FnId)
if pSubExpr == uintptr(0) {
goto _4
}
pList = _sqlite3ExprListAppend(tls, pParse, pList, pSubExpr)
if pList != 0 {
(*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr((*TExprList)(unsafe.Pointer(pList)).FnExpr-int32(1))*32))).FzEName = (*(*TIdList_item)(unsafe.Pointer(pColumns + 8 + uintptr(i)*16))).FzName
(*(*TIdList_item)(unsafe.Pointer(pColumns + 8 + uintptr(i)*16))).FzName = uintptr(0)
}
goto _4
_4:
;
i++
}
if !((*Tsqlite3)(unsafe.Pointer(db)).FmallocFailed != 0) && int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_SELECT) && pList != uintptr(0) {
pFirst = (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr(iFirst)*32))).FpExpr
/* Store the SELECT statement in pRight so it will be deleted when
** sqlite3ExprListDelete() is called */
(*TExpr)(unsafe.Pointer(pFirst)).FpRight = pExpr
pExpr = uintptr(0)
/* Remember the size of the LHS in iTable so that we can check that
** the RHS and LHS sizes match during code generation. */
(*TExpr)(unsafe.Pointer(pFirst)).FiTable = (*TIdList)(unsafe.Pointer(pColumns)).FnId
}
goto vector_append_error
vector_append_error:
;
_sqlite3ExprUnmapAndDelete(tls, pParse, pExpr)
_sqlite3IdListDelete(tls, db, pColumns)
return pList
}
// C documentation
//
// /*
// ** Set the sort order for the last element on the given ExprList.
// */
func _sqlite3ExprListSetSortOrder(tls *libc.TLS, p uintptr, iSortOrder int32, eNulls int32) {
var pItem, p1 uintptr
_, _ = pItem, p1
if p == uintptr(0) {
return
}
pItem = p + 8 + uintptr((*TExprList)(unsafe.Pointer(p)).FnExpr-int32(1))*32
if iSortOrder == -int32(1) {
iSortOrder = SQLITE_SO_ASC
}
(*TExprList_item)(unsafe.Pointer(pItem)).Ffg.FsortFlags = uint8(iSortOrder)
if eNulls != -int32(1) {
libc.SetBitFieldPtr16Uint32(pItem+16+4, libc.Uint32FromInt32(1), 5, 0x20)
if iSortOrder != eNulls {
p1 = pItem + 16
*(*Tu8)(unsafe.Pointer(p1)) = Tu8(int32(*(*Tu8)(unsafe.Pointer(p1))) | libc.Int32FromInt32(KEYINFO_ORDER_BIGNULL))
}
}
}
// C documentation
//
// /*
// ** Set the ExprList.a[].zEName element of the most recently added item
// ** on the expression list.
// **
// ** pList might be NULL following an OOM error. But pName should never be
// ** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag
// ** is set.
// */
func _sqlite3ExprListSetName(tls *libc.TLS, pParse uintptr, pList uintptr, pName uintptr, dequote int32) {
var pItem uintptr
_ = pItem
if pList != 0 {
pItem = pList + 8 + uintptr((*TExprList)(unsafe.Pointer(pList)).FnExpr-int32(1))*32
(*TExprList_item)(unsafe.Pointer(pItem)).FzEName = _sqlite3DbStrNDup(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, (*TToken)(unsafe.Pointer(pName)).Fz, uint64((*TToken)(unsafe.Pointer(pName)).Fn))
if dequote != 0 {
/* If dequote==0, then pName->z does not point to part of a DDL
** statement handled by the parser. And so no token need be added
** to the token-map. */
_sqlite3Dequote(tls, (*TExprList_item)(unsafe.Pointer(pItem)).FzEName)
if int32((*TParse)(unsafe.Pointer(pParse)).FeParseMode) >= int32(PARSE_MODE_RENAME) {
_sqlite3RenameTokenMap(tls, pParse, (*TExprList_item)(unsafe.Pointer(pItem)).FzEName, pName)
}
}
}
}
// C documentation
//
// /*
// ** Set the ExprList.a[].zSpan element of the most recently added item
// ** on the expression list.
// **
// ** pList might be NULL following an OOM error. But pSpan should never be
// ** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag
// ** is set.
// */
func _sqlite3ExprListSetSpan(tls *libc.TLS, pParse uintptr, pList uintptr, zStart uintptr, zEnd uintptr) {
var db, pItem uintptr
_, _ = db, pItem
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
if pList != 0 {
pItem = pList + 8 + uintptr((*TExprList)(unsafe.Pointer(pList)).FnExpr-int32(1))*32
if (*TExprList_item)(unsafe.Pointer(pItem)).FzEName == uintptr(0) {
(*TExprList_item)(unsafe.Pointer(pItem)).FzEName = _sqlite3DbSpanDup(tls, db, zStart, zEnd)
libc.SetBitFieldPtr16Uint32(pItem+16+4, libc.Uint32FromInt32(ENAME_SPAN), 0, 0x3)
}
}
}
// C documentation
//
// /*
// ** If the expression list pEList contains more than iLimit elements,
// ** leave an error message in pParse.
// */
func _sqlite3ExprListCheckLength(tls *libc.TLS, pParse uintptr, pEList uintptr, zObject uintptr) {
bp := tls.Alloc(16)
defer tls.Free(16)
var mx int32
_ = mx
mx = *(*int32)(unsafe.Pointer((*TParse)(unsafe.Pointer(pParse)).Fdb + 136 + 2*4))
if pEList != 0 && (*TExprList)(unsafe.Pointer(pEList)).FnExpr > mx {
_sqlite3ErrorMsg(tls, pParse, __ccgo_ts+7865, libc.VaList(bp+8, zObject))
}
}
// C documentation
//
// /*
// ** Delete an entire expression list.
// */
func _exprListDeleteNN(tls *libc.TLS, db uintptr, pList uintptr) {
var i, v1 int32
var pItem uintptr
_, _, _ = i, pItem, v1
i = (*TExprList)(unsafe.Pointer(pList)).FnExpr
pItem = pList + 8
for {
_sqlite3ExprDelete(tls, db, (*TExprList_item)(unsafe.Pointer(pItem)).FpExpr)
if (*TExprList_item)(unsafe.Pointer(pItem)).FzEName != 0 {
_sqlite3DbNNFreeNN(tls, db, (*TExprList_item)(unsafe.Pointer(pItem)).FzEName)
}
pItem += 32
goto _2
_2:
;
i--
v1 = i
if !(v1 > 0) {
break
}
}
_sqlite3DbNNFreeNN(tls, db, pList)
}
func _sqlite3ExprListDelete(tls *libc.TLS, db uintptr, pList uintptr) {
if pList != 0 {
_exprListDeleteNN(tls, db, pList)
}
}
func _sqlite3ExprListDeleteGeneric(tls *libc.TLS, db uintptr, pList uintptr) {
if pList != 0 {
_exprListDeleteNN(tls, db, pList)
}
}
// C documentation
//
// /*
// ** Return the bitwise-OR of all Expr.flags fields in the given
// ** ExprList.
// */
func _sqlite3ExprListFlags(tls *libc.TLS, pList uintptr) (r Tu32) {
var i int32
var m Tu32
var pExpr uintptr
_, _, _ = i, m, pExpr
m = uint32(0)
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pList)).FnExpr) {
break
}
pExpr = (*(*TExprList_item)(unsafe.Pointer(pList + 8 + uintptr(i)*32))).FpExpr
m |= (*TExpr)(unsafe.Pointer(pExpr)).Fflags
goto _1
_1:
;
i++
}
return m
}
// C documentation
//
// /*
// ** This is a SELECT-node callback for the expression walker that
// ** always "fails". By "fail" in this case, we mean set
// ** pWalker->eCode to zero and abort.
// **
// ** This callback is used by multiple expression walkers.
// */
func _sqlite3SelectWalkFail(tls *libc.TLS, pWalker uintptr, NotUsed uintptr) (r int32) {
_ = NotUsed
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
}
// C documentation
//
// /*
// ** Check the input string to see if it is "true" or "false" (in any case).
// **
// ** If the string is.... Return
// ** "true" EP_IsTrue
// ** "false" EP_IsFalse
// ** anything else 0
// */
func _sqlite3IsTrueOrFalse(tls *libc.TLS, zIn uintptr) (r Tu32) {
if _sqlite3StrICmp(tls, zIn, __ccgo_ts+7888) == 0 {
return uint32(EP_IsTrue)
}
if _sqlite3StrICmp(tls, zIn, __ccgo_ts+7893) == 0 {
return uint32(EP_IsFalse)
}
return uint32(0)
}
// C documentation
//
// /*
// ** If the input expression is an ID with the name "true" or "false"
// ** then convert it into an TK_TRUEFALSE term. Return non-zero if
// ** the conversion happened, and zero if the expression is unaltered.
// */
func _sqlite3ExprIdToTrueFalse(tls *libc.TLS, pExpr uintptr) (r int32) {
var v, v1 Tu32
var v2 bool
_, _, _ = v, v1, v2
if v2 = !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_Quoted)|libc.Int32FromInt32(EP_IntValue)) != libc.Uint32FromInt32(0)); v2 {
v1 = _sqlite3IsTrueOrFalse(tls, *(*uintptr)(unsafe.Pointer(pExpr + 8)))
v = v1
}
if v2 && v1 != uint32(0) {
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_TRUEFALSE)
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= v
return int32(1)
}
return 0
}
// C documentation
//
// /*
// ** The argument must be a TK_TRUEFALSE Expr node. Return 1 if it is TRUE
// ** and 0 if it is FALSE.
// */
func _sqlite3ExprTruthValue(tls *libc.TLS, pExpr uintptr) (r int32) {
pExpr = _sqlite3ExprSkipCollateAndLikely(tls, pExpr)
return libc.BoolInt32(int32(*(*int8)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 8)) + 4))) == 0)
}
// C documentation
//
// /*
// ** If pExpr is an AND or OR expression, try to simplify it by eliminating
// ** terms that are always true or false. Return the simplified expression.
// ** Or return the original expression if no simplification is possible.
// **
// ** Examples:
// **
// ** (x<10) AND true => (x<10)
// ** (x<10) AND false => false
// ** (x<10) AND (y=22 OR false) => (x<10) AND (y=22)
// ** (x<10) AND (y=22 OR true) => (x<10)
// ** (y=22) OR true => true
// */
func _sqlite3ExprSimplifiedAndOr(tls *libc.TLS, pExpr uintptr) (r uintptr) {
var pLeft, pRight, v1, v2 uintptr
_, _, _, _ = pLeft, pRight, v1, v2
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_AND) || int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_OR) {
pRight = _sqlite3ExprSimplifiedAndOr(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpRight)
pLeft = _sqlite3ExprSimplifiedAndOr(tls, (*TExpr)(unsafe.Pointer(pExpr)).FpLeft)
if (*TExpr)(unsafe.Pointer(pLeft)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_IsTrue)) == uint32(EP_IsTrue) || (*TExpr)(unsafe.Pointer(pRight)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_IsFalse)) == uint32(EP_IsFalse) {
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_AND) {
v1 = pRight
} else {
v1 = pLeft
}
pExpr = v1
} else {
if (*TExpr)(unsafe.Pointer(pRight)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_IsTrue)) == uint32(EP_IsTrue) || (*TExpr)(unsafe.Pointer(pLeft)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_IsFalse)) == uint32(EP_IsFalse) {
if int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_AND) {
v2 = pLeft
} else {
v2 = pRight
}
pExpr = v2
}
}
}
return pExpr
}
// C documentation
//
// /*
// ** These routines are Walker callbacks used to check expressions to
// ** see if they are "constant" for some definition of constant. The
// ** Walker.eCode value determines the type of "constant" we are looking
// ** for.
// **
// ** These callback routines are used to implement the following:
// **
// ** sqlite3ExprIsConstant() pWalker->eCode==1
// ** sqlite3ExprIsConstantNotJoin() pWalker->eCode==2
// ** sqlite3ExprIsTableConstant() pWalker->eCode==3
// ** sqlite3ExprIsConstantOrFunction() pWalker->eCode==4 or 5
// **
// ** In all cases, the callbacks set Walker.eCode=0 and abort if the expression
// ** is found to not be a constant.
// **
// ** The sqlite3ExprIsConstantOrFunction() is used for evaluating DEFAULT
// ** expressions in a CREATE TABLE statement. The Walker.eCode value is 5
// ** when parsing an existing schema out of the sqlite_schema table and 4
// ** when processing a new CREATE TABLE statement. A bound parameter raises
// ** an error for new statements, but is silently converted
// ** to NULL for existing schemas. This allows sqlite_schema tables that
// ** contain a bound parameter because they were generated by older versions
// ** of SQLite to be parsed by newer versions of SQLite without raising a
// ** malformed schema error.
// */
func _exprNodeIsConstant(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
/* If pWalker->eCode is 2 then any term of the expression that comes from
** the ON or USING clauses of an outer join disqualifies the expression
** from being considered constant. */
if int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) == int32(2) && (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)) != uint32(0) {
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
}
switch int32((*TExpr)(unsafe.Pointer(pExpr)).Fop) {
/* Consider functions to be constant if all their arguments are constant
** and either pWalker->eCode==4 or 5 or the function has the
** SQLITE_FUNC_CONST flag. */
case int32(TK_FUNCTION):
if (int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) >= int32(4) || (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_ConstFunc)) != uint32(0)) && !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_WinFunc)) != libc.Uint32FromInt32(0)) {
if int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) == int32(5) {
*(*Tu32)(unsafe.Pointer(pExpr + 4)) |= uint32(libc.Int32FromInt32(EP_FromDDL))
}
return WRC_Continue
} else {
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
}
fallthrough
case int32(TK_ID):
/* Convert "true" or "false" in a DEFAULT clause into the
** appropriate TK_TRUEFALSE operator */
if _sqlite3ExprIdToTrueFalse(tls, pExpr) != 0 {
return int32(WRC_Prune)
}
fallthrough
case int32(TK_COLUMN):
fallthrough
case int32(TK_AGG_FUNCTION):
fallthrough
case int32(TK_AGG_COLUMN):
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_FixedCol)) != uint32(0) && int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) != int32(2) {
return WRC_Continue
}
if int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) == int32(3) && (*TExpr)(unsafe.Pointer(pExpr)).FiTable == *(*int32)(unsafe.Pointer(&(*TWalker)(unsafe.Pointer(pWalker)).Fu)) {
return WRC_Continue
}
fallthrough
case int32(TK_IF_NULL_ROW):
fallthrough
case int32(TK_REGISTER):
fallthrough
case int32(TK_DOT):
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
case int32(TK_VARIABLE):
if int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) == int32(5) {
/* Silently convert bound parameters that appear inside of CREATE
** statements into a NULL when parsing the CREATE statement text out
** of the sqlite_schema table */
(*TExpr)(unsafe.Pointer(pExpr)).Fop = uint8(TK_NULL)
} else {
if int32((*TWalker)(unsafe.Pointer(pWalker)).FeCode) == int32(4) {
/* A bound parameter in a CREATE statement that originates from
** sqlite3_prepare() causes an error */
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
}
}
fallthrough
default:
/* sqlite3SelectWalkFail() disallows */
/* sqlite3SelectWalkFail() disallows */
return WRC_Continue
}
return r
}
func _exprIsConst(tls *libc.TLS, p uintptr, initFlag int32, iCur int32) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* w at bp+0 */ TWalker
(*(*TWalker)(unsafe.Pointer(bp))).FeCode = uint16(initFlag)
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_exprNodeIsConstant)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkFail)
*(*int32)(unsafe.Pointer(bp + 40)) = iCur
_sqlite3WalkExpr(tls, bp, p)
return int32((*(*TWalker)(unsafe.Pointer(bp))).FeCode)
}
// C documentation
//
// /*
// ** Walk an expression tree. Return non-zero if the expression is constant
// ** and 0 if it involves variables or function calls.
// **
// ** For the purposes of this function, a double-quoted string (ex: "abc")
// ** is considered a variable but a single-quoted string (ex: 'abc') is
// ** a constant.
// */
func _sqlite3ExprIsConstant(tls *libc.TLS, p uintptr) (r int32) {
return _exprIsConst(tls, p, int32(1), 0)
}
// C documentation
//
// /*
// ** Walk an expression tree. Return non-zero if
// **
// ** (1) the expression is constant, and
// ** (2) the expression does originate in the ON or USING clause
// ** of a LEFT JOIN, and
// ** (3) the expression does not contain any EP_FixedCol TK_COLUMN
// ** operands created by the constant propagation optimization.
// **
// ** When this routine returns true, it indicates that the expression
// ** can be added to the pParse->pConstExpr list and evaluated once when
// ** the prepared statement starts up. See sqlite3ExprCodeRunJustOnce().
// */
func _sqlite3ExprIsConstantNotJoin(tls *libc.TLS, p uintptr) (r int32) {
return _exprIsConst(tls, p, int32(2), 0)
}
// C documentation
//
// /*
// ** Walk an expression tree. Return non-zero if the expression is constant
// ** for any single row of the table with cursor iCur. In other words, the
// ** expression must not refer to any non-deterministic function nor any
// ** table other than iCur.
// */
func _sqlite3ExprIsTableConstant(tls *libc.TLS, p uintptr, iCur int32) (r int32) {
return _exprIsConst(tls, p, int32(3), iCur)
}
// C documentation
//
// /*
// ** Check pExpr to see if it is an constraint on the single data source
// ** pSrc = &pSrcList->a[iSrc]. In other words, check to see if pExpr
// ** constrains pSrc but does not depend on any other tables or data
// ** sources anywhere else in the query. Return true (non-zero) if pExpr
// ** is a constraint on pSrc only.
// **
// ** This is an optimization. False negatives will perhaps cause slower
// ** queries, but false positives will yield incorrect answers. So when in
// ** doubt, return 0.
// **
// ** To be an single-source constraint, the following must be true:
// **
// ** (1) pExpr cannot refer to any table other than pSrc->iCursor.
// **
// ** (2) pExpr cannot use subqueries or non-deterministic functions.
// **
// ** (3) pSrc cannot be part of the left operand for a RIGHT JOIN.
// ** (Is there some way to relax this constraint?)
// **
// ** (4) If pSrc is the right operand of a LEFT JOIN, then...
// ** (4a) pExpr must come from an ON clause..
// ** (4b) and specifically the ON clause associated with the LEFT JOIN.
// **
// ** (5) If pSrc is not the right operand of a LEFT JOIN or the left
// ** operand of a RIGHT JOIN, then pExpr must be from the WHERE
// ** clause, not an ON clause.
// **
// ** (6) Either:
// **
// ** (6a) pExpr does not originate in an ON or USING clause, or
// **
// ** (6b) The ON or USING clause from which pExpr is derived is
// ** not to the left of a RIGHT JOIN (or FULL JOIN).
// **
// ** Without this restriction, accepting pExpr as a single-table
// ** constraint might move the the ON/USING filter expression
// ** from the left side of a RIGHT JOIN over to the right side,
// ** which leads to incorrect answers. See also restriction (9)
// ** on push-down.
// */
func _sqlite3ExprIsSingleTableConstraint(tls *libc.TLS, pExpr uintptr, pSrcList uintptr, iSrc int32) (r int32) {
var jj int32
var pSrc uintptr
_, _ = jj, pSrc
pSrc = pSrcList + 8 + uintptr(iSrc)*104
if int32((*TSrcItem)(unsafe.Pointer(pSrc)).Ffg.Fjointype)&int32(JT_LTORJ) != 0 {
return 0 /* rule (3) */
}
if int32((*TSrcItem)(unsafe.Pointer(pSrc)).Ffg.Fjointype)&int32(JT_LEFT) != 0 {
if !((*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)) != libc.Uint32FromInt32(0)) {
return 0
} /* rule (4a) */
if *(*int32)(unsafe.Pointer(pExpr + 52)) != (*TSrcItem)(unsafe.Pointer(pSrc)).FiCursor {
return 0
} /* rule (4b) */
} else {
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)) != uint32(0) {
return 0
} /* rule (5) */
}
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(libc.Int32FromInt32(EP_OuterON)|libc.Int32FromInt32(EP_InnerON)) != uint32(0) && int32((*(*TSrcItem)(unsafe.Pointer(pSrcList + 8))).Ffg.Fjointype)&int32(JT_LTORJ) != 0 {
jj = 0
for {
if !(jj < iSrc) {
break
}
if *(*int32)(unsafe.Pointer(pExpr + 52)) == (*(*TSrcItem)(unsafe.Pointer(pSrcList + 8 + uintptr(jj)*104))).FiCursor {
if int32((*(*TSrcItem)(unsafe.Pointer(pSrcList + 8 + uintptr(jj)*104))).Ffg.Fjointype)&int32(JT_LTORJ) != 0 {
return 0 /* restriction (6) */
}
break
}
goto _1
_1:
;
jj++
}
}
return _sqlite3ExprIsTableConstant(tls, pExpr, (*TSrcItem)(unsafe.Pointer(pSrc)).FiCursor) /* rules (1), (2) */
}
// C documentation
//
// /*
// ** sqlite3WalkExpr() callback used by sqlite3ExprIsConstantOrGroupBy().
// */
func _exprNodeIsConstantOrGroupBy(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
var i int32
var p, pColl, pGroupBy uintptr
_, _, _, _ = i, p, pColl, pGroupBy
pGroupBy = *(*uintptr)(unsafe.Pointer(pWalker + 40))
/* Check if pExpr is identical to any GROUP BY term. If so, consider
** it constant. */
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pGroupBy)).FnExpr) {
break
}
p = (*(*TExprList_item)(unsafe.Pointer(pGroupBy + 8 + uintptr(i)*32))).FpExpr
if _sqlite3ExprCompare(tls, uintptr(0), pExpr, p, -int32(1)) < int32(2) {
pColl = _sqlite3ExprNNCollSeq(tls, (*TWalker)(unsafe.Pointer(pWalker)).FpParse, p)
if _sqlite3IsBinary(tls, pColl) != 0 {
return int32(WRC_Prune)
}
}
goto _1
_1:
;
i++
}
/* Check if pExpr is a sub-select. If so, consider it variable. */
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(0)
return int32(WRC_Abort)
}
return _exprNodeIsConstant(tls, pWalker, pExpr)
}
// C documentation
//
// /*
// ** Walk the expression tree passed as the first argument. Return non-zero
// ** if the expression consists entirely of constants or copies of terms
// ** in pGroupBy that sort with the BINARY collation sequence.
// **
// ** This routine is used to determine if a term of the HAVING clause can
// ** be promoted into the WHERE clause. In order for such a promotion to work,
// ** the value of the HAVING clause term must be the same for all members of
// ** a "group". The requirement that the GROUP BY term must be BINARY
// ** assumes that no other collating sequence will have a finer-grained
// ** grouping than binary. In other words (A=B COLLATE binary) implies
// ** A=B in every other collating sequence. The requirement that the
// ** GROUP BY be BINARY is stricter than necessary. It would also work
// ** to promote HAVING clauses that use the same alternative collating
// ** sequence as the GROUP BY term, but that is much harder to check,
// ** alternative collating sequences are uncommon, and this is only an
// ** optimization, so we take the easy way out and simply require the
// ** GROUP BY to use the BINARY collating sequence.
// */
func _sqlite3ExprIsConstantOrGroupBy(tls *libc.TLS, pParse uintptr, p uintptr, pGroupBy uintptr) (r int32) {
bp := tls.Alloc(48)
defer tls.Free(48)
var _ /* w at bp+0 */ TWalker
(*(*TWalker)(unsafe.Pointer(bp))).FeCode = uint16(1)
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_exprNodeIsConstantOrGroupBy)
(*(*TWalker)(unsafe.Pointer(bp))).FxSelectCallback = uintptr(0)
*(*uintptr)(unsafe.Pointer(bp + 40)) = pGroupBy
(*(*TWalker)(unsafe.Pointer(bp))).FpParse = pParse
_sqlite3WalkExpr(tls, bp, p)
return int32((*(*TWalker)(unsafe.Pointer(bp))).FeCode)
}
// C documentation
//
// /*
// ** Walk an expression tree for the DEFAULT field of a column definition
// ** in a CREATE TABLE statement. Return non-zero if the expression is
// ** acceptable for use as a DEFAULT. That is to say, return non-zero if
// ** the expression is constant or a function call with constant arguments.
// ** Return and 0 if there are any variables.
// **
// ** isInit is true when parsing from sqlite_schema. isInit is false when
// ** processing a new CREATE TABLE statement. When isInit is true, parameters
// ** (such as ? or $abc) in the expression are converted into NULL. When
// ** isInit is false, parameters raise an error. Parameters should not be
// ** allowed in a CREATE TABLE statement, but some legacy versions of SQLite
// ** allowed it, so we need to support it when reading sqlite_schema for
// ** backwards compatibility.
// **
// ** If isInit is true, set EP_FromDDL on every TK_FUNCTION node.
// **
// ** For the purposes of this function, a double-quoted string (ex: "abc")
// ** is considered a variable but a single-quoted string (ex: 'abc') is
// ** a constant.
// */
func _sqlite3ExprIsConstantOrFunction(tls *libc.TLS, p uintptr, isInit Tu8) (r int32) {
return _exprIsConst(tls, p, int32(4)+int32(isInit), 0)
}
// C documentation
//
// /*
// ** If the expression p codes a constant integer that is small enough
// ** to fit in a 32-bit integer, return 1 and put the value of the integer
// ** in *pValue. If the expression is not an integer or if it is too big
// ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged.
// */
func _sqlite3ExprIsInteger(tls *libc.TLS, p uintptr, pValue uintptr) (r int32) {
bp := tls.Alloc(16)
defer tls.Free(16)
var rc int32
var _ /* v at bp+0 */ int32
_ = rc
rc = 0
if p == uintptr(0) {
return 0
} /* Used to only happen following on OOM */
/* If an expression is an integer literal that fits in a signed 32-bit
** integer, then the EP_IntValue flag will have already been set */
if (*TExpr)(unsafe.Pointer(p)).Fflags&uint32(EP_IntValue) != 0 {
*(*int32)(unsafe.Pointer(pValue)) = *(*int32)(unsafe.Pointer(&(*TExpr)(unsafe.Pointer(p)).Fu))
return int32(1)
}
switch int32((*TExpr)(unsafe.Pointer(p)).Fop) {
case int32(TK_UPLUS):
rc = _sqlite3ExprIsInteger(tls, (*TExpr)(unsafe.Pointer(p)).FpLeft, pValue)
case int32(TK_UMINUS):
*(*int32)(unsafe.Pointer(bp)) = 0
if _sqlite3ExprIsInteger(tls, (*TExpr)(unsafe.Pointer(p)).FpLeft, bp) != 0 {
*(*int32)(unsafe.Pointer(pValue)) = -*(*int32)(unsafe.Pointer(bp))
rc = int32(1)
}
default:
break
}
return rc
}
// C documentation
//
// /*
// ** Return FALSE if there is no chance that the expression can be NULL.
// **
// ** If the expression might be NULL or if the expression is too complex
// ** to tell return TRUE.
// **
// ** This routine is used as an optimization, to skip OP_IsNull opcodes
// ** when we know that a value cannot be NULL. Hence, a false positive
// ** (returning TRUE when in fact the expression can never be NULL) might
// ** be a small performance hit but is otherwise harmless. On the other
// ** hand, a false negative (returning FALSE when the result could be NULL)
// ** will likely result in an incorrect answer. So when in doubt, return
// ** TRUE.
// */
func _sqlite3ExprCanBeNull(tls *libc.TLS, p uintptr) (r int32) {
var op Tu8
_ = op
for int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_UPLUS) || int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_UMINUS) {
p = (*TExpr)(unsafe.Pointer(p)).FpLeft
}
op = (*TExpr)(unsafe.Pointer(p)).Fop
if int32(op) == int32(TK_REGISTER) {
op = (*TExpr)(unsafe.Pointer(p)).Fop2
}
switch int32(op) {
case int32(TK_INTEGER):
fallthrough
case int32(TK_STRING):
fallthrough
case int32(TK_FLOAT):
fallthrough
case int32(TK_BLOB):
return 0
case int32(TK_COLUMN):
return libc.BoolInt32((*TExpr)(unsafe.Pointer(p)).Fflags&uint32(libc.Int32FromInt32(EP_CanBeNull)) != uint32(0) || *(*uintptr)(unsafe.Pointer(p + 64)) == uintptr(0) || int32((*TExpr)(unsafe.Pointer(p)).FiColumn) >= 0 && (*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 64)))).FaCol != uintptr(0) && int32((*TExpr)(unsafe.Pointer(p)).FiColumn) < int32((*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 64)))).FnCol) && int32(uint32(*(*uint8)(unsafe.Pointer((*TTable)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(p + 64)))).FaCol + uintptr((*TExpr)(unsafe.Pointer(p)).FiColumn)*16 + 8))&0xf>>0)) == 0)
default:
return int32(1)
}
return r
}
// C documentation
//
// /*
// ** Return TRUE if the given expression is a constant which would be
// ** unchanged by OP_Affinity with the affinity given in the second
// ** argument.
// **
// ** This routine is used to determine if the OP_Affinity operation
// ** can be omitted. When in doubt return FALSE. A false negative
// ** is harmless. A false positive, however, can result in the wrong
// ** answer.
// */
func _sqlite3ExprNeedsNoAffinityChange(tls *libc.TLS, p uintptr, aff int8) (r int32) {
var op Tu8
var unaryMinus int32
_, _ = op, unaryMinus
unaryMinus = 0
if int32(aff) == int32(SQLITE_AFF_BLOB) {
return int32(1)
}
for int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_UPLUS) || int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_UMINUS) {
if int32((*TExpr)(unsafe.Pointer(p)).Fop) == int32(TK_UMINUS) {
unaryMinus = int32(1)
}
p = (*TExpr)(unsafe.Pointer(p)).FpLeft
}
op = (*TExpr)(unsafe.Pointer(p)).Fop
if int32(op) == int32(TK_REGISTER) {
op = (*TExpr)(unsafe.Pointer(p)).Fop2
}
switch int32(op) {
case int32(TK_INTEGER):
return libc.BoolInt32(int32(aff) >= int32(SQLITE_AFF_NUMERIC))
case int32(TK_FLOAT):
return libc.BoolInt32(int32(aff) >= int32(SQLITE_AFF_NUMERIC))
case int32(TK_STRING):
return libc.BoolInt32(!(unaryMinus != 0) && int32(aff) == int32(SQLITE_AFF_TEXT))
case int32(TK_BLOB):
return libc.BoolInt32(!(unaryMinus != 0))
case int32(TK_COLUMN):
/* p cannot be part of a CHECK constraint */
return libc.BoolInt32(int32(aff) >= int32(SQLITE_AFF_NUMERIC) && int32((*TExpr)(unsafe.Pointer(p)).FiColumn) < 0)
default:
return 0
}
return r
}
// C documentation
//
// /*
// ** Return TRUE if the given string is a row-id column name.
// */
func _sqlite3IsRowid(tls *libc.TLS, z uintptr) (r int32) {
if _sqlite3StrICmp(tls, z, __ccgo_ts+7899) == 0 {
return int32(1)
}
if _sqlite3StrICmp(tls, z, __ccgo_ts+7907) == 0 {
return int32(1)
}
if _sqlite3StrICmp(tls, z, __ccgo_ts+7913) == 0 {
return int32(1)
}
return 0
}
// C documentation
//
// /*
// ** Return a pointer to a buffer containing a usable rowid alias for table
// ** pTab. An alias is usable if there is not an explicit user-defined column
// ** of the same name.
// */
func _sqlite3RowidAlias(tls *libc.TLS, pTab uintptr) (r uintptr) {
var azOpt [3]uintptr
var iCol, ii int32
_, _, _ = azOpt, iCol, ii
azOpt = [3]uintptr{
0: __ccgo_ts + 7899,
1: __ccgo_ts + 7907,
2: __ccgo_ts + 7913,
}
ii = 0
for {
if !(ii < int32(libc.Uint64FromInt64(24)/libc.Uint64FromInt64(8))) {
break
}
iCol = 0
for {
if !(iCol < int32((*TTable)(unsafe.Pointer(pTab)).FnCol)) {
break
}
if Xsqlite3_stricmp(tls, azOpt[ii], (*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FaCol + uintptr(iCol)*16))).FzCnName) == 0 {
break
}
goto _2
_2:
;
iCol++
}
if iCol == int32((*TTable)(unsafe.Pointer(pTab)).FnCol) {
return azOpt[ii]
}
goto _1
_1:
;
ii++
}
return uintptr(0)
}
// C documentation
//
// /*
// ** pX is the RHS of an IN operator. If pX is a SELECT statement
// ** that can be simplified to a direct table access, then return
// ** a pointer to the SELECT statement. If pX is not a SELECT statement,
// ** or if the SELECT statement needs to be materialized into a transient
// ** table, then return NULL.
// */
func _isCandidateForInOpt(tls *libc.TLS, pX uintptr) (r uintptr) {
var i int32
var p, pEList, pRes, pSrc, pTab uintptr
_, _, _, _, _, _ = i, p, pEList, pRes, pSrc, pTab
if !((*TExpr)(unsafe.Pointer(pX)).Fflags&libc.Uint32FromInt32(EP_xIsSelect) != libc.Uint32FromInt32(0)) {
return uintptr(0)
} /* Not a subquery */
if (*TExpr)(unsafe.Pointer(pX)).Fflags&uint32(libc.Int32FromInt32(EP_VarSelect)) != uint32(0) {
return uintptr(0)
} /* Correlated subq */
p = *(*uintptr)(unsafe.Pointer(pX + 32))
if (*TSelect)(unsafe.Pointer(p)).FpPrior != 0 {
return uintptr(0)
} /* Not a compound SELECT */
if (*TSelect)(unsafe.Pointer(p)).FselFlags&uint32(libc.Int32FromInt32(SF_Distinct)|libc.Int32FromInt32(SF_Aggregate)) != 0 {
return uintptr(0) /* No DISTINCT keyword and no aggregate functions */
}
/* Has no GROUP BY clause */
if (*TSelect)(unsafe.Pointer(p)).FpLimit != 0 {
return uintptr(0)
} /* Has no LIMIT clause */
if (*TSelect)(unsafe.Pointer(p)).FpWhere != 0 {
return uintptr(0)
} /* Has no WHERE clause */
pSrc = (*TSelect)(unsafe.Pointer(p)).FpSrc
if (*TSrcList)(unsafe.Pointer(pSrc)).FnSrc != int32(1) {
return uintptr(0)
} /* Single term in FROM clause */
if (*(*TSrcItem)(unsafe.Pointer(pSrc + 8))).FpSelect != 0 {
return uintptr(0)
} /* FROM is not a subquery or view */
pTab = (*(*TSrcItem)(unsafe.Pointer(pSrc + 8))).FpTab
/* FROM clause is not a view */
if int32((*TTable)(unsafe.Pointer(pTab)).FeTabType) == int32(TABTYP_VTAB) {
return uintptr(0)
} /* FROM clause not a virtual table */
pEList = (*TSelect)(unsafe.Pointer(p)).FpEList
/* All SELECT results must be columns. */
i = 0
for {
if !(i < (*TExprList)(unsafe.Pointer(pEList)).FnExpr) {
break
}
pRes = (*(*TExprList_item)(unsafe.Pointer(pEList + 8 + uintptr(i)*32))).FpExpr
if int32((*TExpr)(unsafe.Pointer(pRes)).Fop) != int32(TK_COLUMN) {
return uintptr(0)
}
/* Not a correlated subquery */
goto _1
_1:
;
i++
}
return p
}
// C documentation
//
// /*
// ** Generate code that checks the left-most column of index table iCur to see if
// ** it contains any NULL entries. Cause the register at regHasNull to be set
// ** to a non-NULL value if iCur contains no NULLs. Cause register regHasNull
// ** to be set to NULL if iCur contains one or more NULL values.
// */
func _sqlite3SetHasNullFlag(tls *libc.TLS, v uintptr, iCur int32, regHasNull int32) {
var addr1 int32
_ = addr1
_sqlite3VdbeAddOp2(tls, v, int32(OP_Integer), 0, regHasNull)
addr1 = _sqlite3VdbeAddOp1(tls, v, int32(OP_Rewind), iCur)
_sqlite3VdbeAddOp3(tls, v, int32(OP_Column), iCur, 0, regHasNull)
_sqlite3VdbeChangeP5(tls, v, uint16(OPFLAG_TYPEOFARG))
_sqlite3VdbeJumpHere(tls, v, addr1)
}
// C documentation
//
// /*
// ** The argument is an IN operator with a list (not a subquery) on the
// ** right-hand side. Return TRUE if that list is constant.
// */
func _sqlite3InRhsIsConstant(tls *libc.TLS, pIn uintptr) (r int32) {
var pLHS uintptr
var res int32
_, _ = pLHS, res
pLHS = (*TExpr)(unsafe.Pointer(pIn)).FpLeft
(*TExpr)(unsafe.Pointer(pIn)).FpLeft = uintptr(0)
res = _sqlite3ExprIsConstant(tls, pIn)
(*TExpr)(unsafe.Pointer(pIn)).FpLeft = pLHS
return res
}
// C documentation
//
// /*
// ** This function is used by the implementation of the IN (...) operator.
// ** The pX parameter is the expression on the RHS of the IN operator, which
// ** might be either a list of expressions or a subquery.
// **
// ** The job of this routine is to find or create a b-tree object that can
// ** be used either to test for membership in the RHS set or to iterate through
// ** all members of the RHS set, skipping duplicates.
// **
// ** A cursor is opened on the b-tree object that is the RHS of the IN operator
// ** and the *piTab parameter is set to the index of that cursor.
// **
// ** The returned value of this function indicates the b-tree type, as follows:
// **
// ** IN_INDEX_ROWID - The cursor was opened on a database table.
// ** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index.
// ** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index.
// ** IN_INDEX_EPH - The cursor was opened on a specially created and
// ** populated ephemeral table.
// ** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be
// ** implemented as a sequence of comparisons.
// **
// ** An existing b-tree might be used if the RHS expression pX is a simple
// ** subquery such as:
// **
// ** SELECT , ... FROM